diff options
823 files changed, 35104 insertions, 15376 deletions
diff --git a/Documentation/00-INDEX b/Documentation/00-INDEX index 1b777b960492..1f89424c36a6 100644 --- a/Documentation/00-INDEX +++ b/Documentation/00-INDEX | |||
@@ -192,10 +192,6 @@ kernel-docs.txt | |||
192 | - listing of various WWW + books that document kernel internals. | 192 | - listing of various WWW + books that document kernel internals. |
193 | kernel-parameters.txt | 193 | kernel-parameters.txt |
194 | - summary listing of command line / boot prompt args for the kernel. | 194 | - summary listing of command line / boot prompt args for the kernel. |
195 | keys-request-key.txt | ||
196 | - description of the kernel key request service. | ||
197 | keys.txt | ||
198 | - description of the kernel key retention service. | ||
199 | kobject.txt | 195 | kobject.txt |
200 | - info of the kobject infrastructure of the Linux kernel. | 196 | - info of the kobject infrastructure of the Linux kernel. |
201 | kprobes.txt | 197 | kprobes.txt |
@@ -294,6 +290,8 @@ scheduler/ | |||
294 | - directory with info on the scheduler. | 290 | - directory with info on the scheduler. |
295 | scsi/ | 291 | scsi/ |
296 | - directory with info on Linux scsi support. | 292 | - directory with info on Linux scsi support. |
293 | security/ | ||
294 | - directory that contains security-related info | ||
297 | serial/ | 295 | serial/ |
298 | - directory with info on the low level serial API. | 296 | - directory with info on the low level serial API. |
299 | serial-console.txt | 297 | serial-console.txt |
diff --git a/Documentation/ABI/obsolete/o2cb b/Documentation/ABI/removed/o2cb index 9c49d8e6c0cc..7f5daa465093 100644 --- a/Documentation/ABI/obsolete/o2cb +++ b/Documentation/ABI/removed/o2cb | |||
@@ -1,11 +1,10 @@ | |||
1 | What: /sys/o2cb symlink | 1 | What: /sys/o2cb symlink |
2 | Date: Dec 2005 | 2 | Date: May 2011 |
3 | KernelVersion: 2.6.16 | 3 | KernelVersion: 2.6.40 |
4 | Contact: ocfs2-devel@oss.oracle.com | 4 | Contact: ocfs2-devel@oss.oracle.com |
5 | Description: This is a symlink: /sys/o2cb to /sys/fs/o2cb. The symlink will | 5 | Description: This is a symlink: /sys/o2cb to /sys/fs/o2cb. The symlink is |
6 | be removed when new versions of ocfs2-tools which know to look | 6 | removed when new versions of ocfs2-tools which know to look |
7 | in /sys/fs/o2cb are sufficiently prevalent. Don't code new | 7 | in /sys/fs/o2cb are sufficiently prevalent. Don't code new |
8 | software to look here, it should try /sys/fs/o2cb instead. | 8 | software to look here, it should try /sys/fs/o2cb instead. |
9 | See Documentation/ABI/stable/o2cb for more information on usage. | ||
10 | Users: ocfs2-tools. It's sufficient to mail proposed changes to | 9 | Users: ocfs2-tools. It's sufficient to mail proposed changes to |
11 | ocfs2-devel@oss.oracle.com. | 10 | ocfs2-devel@oss.oracle.com. |
diff --git a/Documentation/ABI/testing/sysfs-kernel-mm-cleancache b/Documentation/ABI/testing/sysfs-kernel-mm-cleancache new file mode 100644 index 000000000000..662ae646ea12 --- /dev/null +++ b/Documentation/ABI/testing/sysfs-kernel-mm-cleancache | |||
@@ -0,0 +1,11 @@ | |||
1 | What: /sys/kernel/mm/cleancache/ | ||
2 | Date: April 2011 | ||
3 | Contact: Dan Magenheimer <dan.magenheimer@oracle.com> | ||
4 | Description: | ||
5 | /sys/kernel/mm/cleancache/ contains a number of files which | ||
6 | record a count of various cleancache operations | ||
7 | (sum across all filesystems): | ||
8 | succ_gets | ||
9 | failed_gets | ||
10 | puts | ||
11 | flushes | ||
diff --git a/Documentation/DocBook/dvb/dvbproperty.xml b/Documentation/DocBook/dvb/dvbproperty.xml index 52d5e3c7cf6c..b5365f61d69b 100644 --- a/Documentation/DocBook/dvb/dvbproperty.xml +++ b/Documentation/DocBook/dvb/dvbproperty.xml | |||
@@ -141,13 +141,15 @@ struct dtv_properties { | |||
141 | </row></tbody></tgroup></informaltable> | 141 | </row></tbody></tgroup></informaltable> |
142 | </section> | 142 | </section> |
143 | 143 | ||
144 | <section> | ||
145 | <title>Property types</title> | ||
144 | <para> | 146 | <para> |
145 | On <link linkend="FE_GET_PROPERTY">FE_GET_PROPERTY</link>/<link linkend="FE_SET_PROPERTY">FE_SET_PROPERTY</link>, | 147 | On <link linkend="FE_GET_PROPERTY">FE_GET_PROPERTY</link>/<link linkend="FE_SET_PROPERTY">FE_SET_PROPERTY</link>, |
146 | the actual action is determined by the dtv_property cmd/data pairs. With one single ioctl, is possible to | 148 | the actual action is determined by the dtv_property cmd/data pairs. With one single ioctl, is possible to |
147 | get/set up to 64 properties. The actual meaning of each property is described on the next sections. | 149 | get/set up to 64 properties. The actual meaning of each property is described on the next sections. |
148 | </para> | 150 | </para> |
149 | 151 | ||
150 | <para>The Available frontend property types are:</para> | 152 | <para>The available frontend property types are:</para> |
151 | <programlisting> | 153 | <programlisting> |
152 | #define DTV_UNDEFINED 0 | 154 | #define DTV_UNDEFINED 0 |
153 | #define DTV_TUNE 1 | 155 | #define DTV_TUNE 1 |
@@ -193,6 +195,7 @@ get/set up to 64 properties. The actual meaning of each property is described on | |||
193 | #define DTV_ISDBT_LAYER_ENABLED 41 | 195 | #define DTV_ISDBT_LAYER_ENABLED 41 |
194 | #define DTV_ISDBS_TS_ID 42 | 196 | #define DTV_ISDBS_TS_ID 42 |
195 | </programlisting> | 197 | </programlisting> |
198 | </section> | ||
196 | 199 | ||
197 | <section id="fe_property_common"> | 200 | <section id="fe_property_common"> |
198 | <title>Parameters that are common to all Digital TV standards</title> | 201 | <title>Parameters that are common to all Digital TV standards</title> |
diff --git a/Documentation/DocBook/media-entities.tmpl b/Documentation/DocBook/media-entities.tmpl index c8abb23ef1e7..e5fe09430fd9 100644 --- a/Documentation/DocBook/media-entities.tmpl +++ b/Documentation/DocBook/media-entities.tmpl | |||
@@ -293,6 +293,7 @@ | |||
293 | <!ENTITY sub-yuyv SYSTEM "v4l/pixfmt-yuyv.xml"> | 293 | <!ENTITY sub-yuyv SYSTEM "v4l/pixfmt-yuyv.xml"> |
294 | <!ENTITY sub-yvyu SYSTEM "v4l/pixfmt-yvyu.xml"> | 294 | <!ENTITY sub-yvyu SYSTEM "v4l/pixfmt-yvyu.xml"> |
295 | <!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml"> | 295 | <!ENTITY sub-srggb10 SYSTEM "v4l/pixfmt-srggb10.xml"> |
296 | <!ENTITY sub-srggb12 SYSTEM "v4l/pixfmt-srggb12.xml"> | ||
296 | <!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml"> | 297 | <!ENTITY sub-srggb8 SYSTEM "v4l/pixfmt-srggb8.xml"> |
297 | <!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml"> | 298 | <!ENTITY sub-y10 SYSTEM "v4l/pixfmt-y10.xml"> |
298 | <!ENTITY sub-y12 SYSTEM "v4l/pixfmt-y12.xml"> | 299 | <!ENTITY sub-y12 SYSTEM "v4l/pixfmt-y12.xml"> |
@@ -373,9 +374,9 @@ | |||
373 | <!ENTITY sub-media-indices SYSTEM "media-indices.tmpl"> | 374 | <!ENTITY sub-media-indices SYSTEM "media-indices.tmpl"> |
374 | 375 | ||
375 | <!ENTITY sub-media-controller SYSTEM "v4l/media-controller.xml"> | 376 | <!ENTITY sub-media-controller SYSTEM "v4l/media-controller.xml"> |
376 | <!ENTITY sub-media-open SYSTEM "v4l/media-func-open.xml"> | 377 | <!ENTITY sub-media-func-open SYSTEM "v4l/media-func-open.xml"> |
377 | <!ENTITY sub-media-close SYSTEM "v4l/media-func-close.xml"> | 378 | <!ENTITY sub-media-func-close SYSTEM "v4l/media-func-close.xml"> |
378 | <!ENTITY sub-media-ioctl SYSTEM "v4l/media-func-ioctl.xml"> | 379 | <!ENTITY sub-media-func-ioctl SYSTEM "v4l/media-func-ioctl.xml"> |
379 | <!ENTITY sub-media-ioc-device-info SYSTEM "v4l/media-ioc-device-info.xml"> | 380 | <!ENTITY sub-media-ioc-device-info SYSTEM "v4l/media-ioc-device-info.xml"> |
380 | <!ENTITY sub-media-ioc-enum-entities SYSTEM "v4l/media-ioc-enum-entities.xml"> | 381 | <!ENTITY sub-media-ioc-enum-entities SYSTEM "v4l/media-ioc-enum-entities.xml"> |
381 | <!ENTITY sub-media-ioc-enum-links SYSTEM "v4l/media-ioc-enum-links.xml"> | 382 | <!ENTITY sub-media-ioc-enum-links SYSTEM "v4l/media-ioc-enum-links.xml"> |
diff --git a/Documentation/DocBook/v4l/media-controller.xml b/Documentation/DocBook/v4l/media-controller.xml index 2dc25e1d4089..873ac3a621f0 100644 --- a/Documentation/DocBook/v4l/media-controller.xml +++ b/Documentation/DocBook/v4l/media-controller.xml | |||
@@ -78,9 +78,9 @@ | |||
78 | <appendix id="media-user-func"> | 78 | <appendix id="media-user-func"> |
79 | <title>Function Reference</title> | 79 | <title>Function Reference</title> |
80 | <!-- Keep this alphabetically sorted. --> | 80 | <!-- Keep this alphabetically sorted. --> |
81 | &sub-media-open; | 81 | &sub-media-func-open; |
82 | &sub-media-close; | 82 | &sub-media-func-close; |
83 | &sub-media-ioctl; | 83 | &sub-media-func-ioctl; |
84 | <!-- All ioctls go here. --> | 84 | <!-- All ioctls go here. --> |
85 | &sub-media-ioc-device-info; | 85 | &sub-media-ioc-device-info; |
86 | &sub-media-ioc-enum-entities; | 86 | &sub-media-ioc-enum-entities; |
diff --git a/Documentation/DocBook/v4l/pixfmt.xml b/Documentation/DocBook/v4l/pixfmt.xml index dbfe3b08435f..deb660207f94 100644 --- a/Documentation/DocBook/v4l/pixfmt.xml +++ b/Documentation/DocBook/v4l/pixfmt.xml | |||
@@ -673,6 +673,7 @@ access the palette, this must be done with ioctls of the Linux framebuffer API.< | |||
673 | &sub-srggb8; | 673 | &sub-srggb8; |
674 | &sub-sbggr16; | 674 | &sub-sbggr16; |
675 | &sub-srggb10; | 675 | &sub-srggb10; |
676 | &sub-srggb12; | ||
676 | </section> | 677 | </section> |
677 | 678 | ||
678 | <section id="yuv-formats"> | 679 | <section id="yuv-formats"> |
diff --git a/Documentation/DocBook/v4l/subdev-formats.xml b/Documentation/DocBook/v4l/subdev-formats.xml index a26b10c07857..8d3409d2c632 100644 --- a/Documentation/DocBook/v4l/subdev-formats.xml +++ b/Documentation/DocBook/v4l/subdev-formats.xml | |||
@@ -2531,13 +2531,13 @@ | |||
2531 | <constant>_JPEG</constant> prefix the format code is made of | 2531 | <constant>_JPEG</constant> prefix the format code is made of |
2532 | the following information. | 2532 | the following information. |
2533 | <itemizedlist> | 2533 | <itemizedlist> |
2534 | <listitem>The number of bus samples per entropy encoded byte.</listitem> | 2534 | <listitem><para>The number of bus samples per entropy encoded byte.</para></listitem> |
2535 | <listitem>The bus width.</listitem> | 2535 | <listitem><para>The bus width.</para></listitem> |
2536 | </itemizedlist> | 2536 | </itemizedlist> |
2537 | </para> | ||
2537 | 2538 | ||
2538 | <para>For instance, for a JPEG baseline process and an 8-bit bus width | 2539 | <para>For instance, for a JPEG baseline process and an 8-bit bus width |
2539 | the format will be named <constant>V4L2_MBUS_FMT_JPEG_1X8</constant>. | 2540 | the format will be named <constant>V4L2_MBUS_FMT_JPEG_1X8</constant>. |
2540 | </para> | ||
2541 | </para> | 2541 | </para> |
2542 | 2542 | ||
2543 | <para>The following table lists existing JPEG compressed formats.</para> | 2543 | <para>The following table lists existing JPEG compressed formats.</para> |
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c index e9c77788a39d..f6318f6d7baf 100644 --- a/Documentation/accounting/getdelays.c +++ b/Documentation/accounting/getdelays.c | |||
@@ -177,6 +177,8 @@ static int get_family_id(int sd) | |||
177 | rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY, | 177 | rc = send_cmd(sd, GENL_ID_CTRL, getpid(), CTRL_CMD_GETFAMILY, |
178 | CTRL_ATTR_FAMILY_NAME, (void *)name, | 178 | CTRL_ATTR_FAMILY_NAME, (void *)name, |
179 | strlen(TASKSTATS_GENL_NAME)+1); | 179 | strlen(TASKSTATS_GENL_NAME)+1); |
180 | if (rc < 0) | ||
181 | return 0; /* sendto() failure? */ | ||
180 | 182 | ||
181 | rep_len = recv(sd, &ans, sizeof(ans), 0); | 183 | rep_len = recv(sd, &ans, sizeof(ans), 0); |
182 | if (ans.n.nlmsg_type == NLMSG_ERROR || | 184 | if (ans.n.nlmsg_type == NLMSG_ERROR || |
@@ -191,30 +193,37 @@ static int get_family_id(int sd) | |||
191 | return id; | 193 | return id; |
192 | } | 194 | } |
193 | 195 | ||
196 | #define average_ms(t, c) (t / 1000000ULL / (c ? c : 1)) | ||
197 | |||
194 | static void print_delayacct(struct taskstats *t) | 198 | static void print_delayacct(struct taskstats *t) |
195 | { | 199 | { |
196 | printf("\n\nCPU %15s%15s%15s%15s\n" | 200 | printf("\n\nCPU %15s%15s%15s%15s%15s\n" |
197 | " %15llu%15llu%15llu%15llu\n" | 201 | " %15llu%15llu%15llu%15llu%15.3fms\n" |
198 | "IO %15s%15s\n" | 202 | "IO %15s%15s%15s\n" |
199 | " %15llu%15llu\n" | 203 | " %15llu%15llu%15llums\n" |
200 | "SWAP %15s%15s\n" | 204 | "SWAP %15s%15s%15s\n" |
201 | " %15llu%15llu\n" | 205 | " %15llu%15llu%15llums\n" |
202 | "RECLAIM %12s%15s\n" | 206 | "RECLAIM %12s%15s%15s\n" |
203 | " %15llu%15llu\n", | 207 | " %15llu%15llu%15llums\n", |
204 | "count", "real total", "virtual total", "delay total", | 208 | "count", "real total", "virtual total", |
209 | "delay total", "delay average", | ||
205 | (unsigned long long)t->cpu_count, | 210 | (unsigned long long)t->cpu_count, |
206 | (unsigned long long)t->cpu_run_real_total, | 211 | (unsigned long long)t->cpu_run_real_total, |
207 | (unsigned long long)t->cpu_run_virtual_total, | 212 | (unsigned long long)t->cpu_run_virtual_total, |
208 | (unsigned long long)t->cpu_delay_total, | 213 | (unsigned long long)t->cpu_delay_total, |
209 | "count", "delay total", | 214 | average_ms((double)t->cpu_delay_total, t->cpu_count), |
215 | "count", "delay total", "delay average", | ||
210 | (unsigned long long)t->blkio_count, | 216 | (unsigned long long)t->blkio_count, |
211 | (unsigned long long)t->blkio_delay_total, | 217 | (unsigned long long)t->blkio_delay_total, |
212 | "count", "delay total", | 218 | average_ms(t->blkio_delay_total, t->blkio_count), |
219 | "count", "delay total", "delay average", | ||
213 | (unsigned long long)t->swapin_count, | 220 | (unsigned long long)t->swapin_count, |
214 | (unsigned long long)t->swapin_delay_total, | 221 | (unsigned long long)t->swapin_delay_total, |
215 | "count", "delay total", | 222 | average_ms(t->swapin_delay_total, t->swapin_count), |
223 | "count", "delay total", "delay average", | ||
216 | (unsigned long long)t->freepages_count, | 224 | (unsigned long long)t->freepages_count, |
217 | (unsigned long long)t->freepages_delay_total); | 225 | (unsigned long long)t->freepages_delay_total, |
226 | average_ms(t->freepages_delay_total, t->freepages_count)); | ||
218 | } | 227 | } |
219 | 228 | ||
220 | static void task_context_switch_counts(struct taskstats *t) | 229 | static void task_context_switch_counts(struct taskstats *t) |
@@ -433,8 +442,6 @@ int main(int argc, char *argv[]) | |||
433 | } | 442 | } |
434 | 443 | ||
435 | do { | 444 | do { |
436 | int i; | ||
437 | |||
438 | rep_len = recv(nl_sd, &msg, sizeof(msg), 0); | 445 | rep_len = recv(nl_sd, &msg, sizeof(msg), 0); |
439 | PRINTF("received %d bytes\n", rep_len); | 446 | PRINTF("received %d bytes\n", rep_len); |
440 | 447 | ||
@@ -459,7 +466,6 @@ int main(int argc, char *argv[]) | |||
459 | 466 | ||
460 | na = (struct nlattr *) GENLMSG_DATA(&msg); | 467 | na = (struct nlattr *) GENLMSG_DATA(&msg); |
461 | len = 0; | 468 | len = 0; |
462 | i = 0; | ||
463 | while (len < rep_len) { | 469 | while (len < rep_len) { |
464 | len += NLA_ALIGN(na->nla_len); | 470 | len += NLA_ALIGN(na->nla_len); |
465 | switch (na->nla_type) { | 471 | switch (na->nla_type) { |
diff --git a/Documentation/atomic_ops.txt b/Documentation/atomic_ops.txt index ac4d47187122..3bd585b44927 100644 --- a/Documentation/atomic_ops.txt +++ b/Documentation/atomic_ops.txt | |||
@@ -12,7 +12,7 @@ Also, it should be made opaque such that any kind of cast to a normal | |||
12 | C integer type will fail. Something like the following should | 12 | C integer type will fail. Something like the following should |
13 | suffice: | 13 | suffice: |
14 | 14 | ||
15 | typedef struct { volatile int counter; } atomic_t; | 15 | typedef struct { int counter; } atomic_t; |
16 | 16 | ||
17 | Historically, counter has been declared volatile. This is now discouraged. | 17 | Historically, counter has been declared volatile. This is now discouraged. |
18 | See Documentation/volatile-considered-harmful.txt for the complete rationale. | 18 | See Documentation/volatile-considered-harmful.txt for the complete rationale. |
diff --git a/Documentation/cgroups/cgroups.txt b/Documentation/cgroups/cgroups.txt index aedf1bd02fdd..0ed99f08f1f3 100644 --- a/Documentation/cgroups/cgroups.txt +++ b/Documentation/cgroups/cgroups.txt | |||
@@ -236,7 +236,8 @@ containing the following files describing that cgroup: | |||
236 | - cgroup.procs: list of tgids in the cgroup. This list is not | 236 | - cgroup.procs: list of tgids in the cgroup. This list is not |
237 | guaranteed to be sorted or free of duplicate tgids, and userspace | 237 | guaranteed to be sorted or free of duplicate tgids, and userspace |
238 | should sort/uniquify the list if this property is required. | 238 | should sort/uniquify the list if this property is required. |
239 | This is a read-only file, for now. | 239 | Writing a thread group id into this file moves all threads in that |
240 | group into this cgroup. | ||
240 | - notify_on_release flag: run the release agent on exit? | 241 | - notify_on_release flag: run the release agent on exit? |
241 | - release_agent: the path to use for release notifications (this file | 242 | - release_agent: the path to use for release notifications (this file |
242 | exists in the top cgroup only) | 243 | exists in the top cgroup only) |
@@ -430,6 +431,12 @@ You can attach the current shell task by echoing 0: | |||
430 | 431 | ||
431 | # echo 0 > tasks | 432 | # echo 0 > tasks |
432 | 433 | ||
434 | You can use the cgroup.procs file instead of the tasks file to move all | ||
435 | threads in a threadgroup at once. Echoing the pid of any task in a | ||
436 | threadgroup to cgroup.procs causes all tasks in that threadgroup to be | ||
437 | be attached to the cgroup. Writing 0 to cgroup.procs moves all tasks | ||
438 | in the writing task's threadgroup. | ||
439 | |||
433 | Note: Since every task is always a member of exactly one cgroup in each | 440 | Note: Since every task is always a member of exactly one cgroup in each |
434 | mounted hierarchy, to remove a task from its current cgroup you must | 441 | mounted hierarchy, to remove a task from its current cgroup you must |
435 | move it into a new cgroup (possibly the root cgroup) by writing to the | 442 | move it into a new cgroup (possibly the root cgroup) by writing to the |
@@ -575,7 +582,7 @@ rmdir() will fail with it. From this behavior, pre_destroy() can be | |||
575 | called multiple times against a cgroup. | 582 | called multiple times against a cgroup. |
576 | 583 | ||
577 | int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 584 | int can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
578 | struct task_struct *task, bool threadgroup) | 585 | struct task_struct *task) |
579 | (cgroup_mutex held by caller) | 586 | (cgroup_mutex held by caller) |
580 | 587 | ||
581 | Called prior to moving a task into a cgroup; if the subsystem | 588 | Called prior to moving a task into a cgroup; if the subsystem |
@@ -584,9 +591,14 @@ task is passed, then a successful result indicates that *any* | |||
584 | unspecified task can be moved into the cgroup. Note that this isn't | 591 | unspecified task can be moved into the cgroup. Note that this isn't |
585 | called on a fork. If this method returns 0 (success) then this should | 592 | called on a fork. If this method returns 0 (success) then this should |
586 | remain valid while the caller holds cgroup_mutex and it is ensured that either | 593 | remain valid while the caller holds cgroup_mutex and it is ensured that either |
587 | attach() or cancel_attach() will be called in future. If threadgroup is | 594 | attach() or cancel_attach() will be called in future. |
588 | true, then a successful result indicates that all threads in the given | 595 | |
589 | thread's threadgroup can be moved together. | 596 | int can_attach_task(struct cgroup *cgrp, struct task_struct *tsk); |
597 | (cgroup_mutex held by caller) | ||
598 | |||
599 | As can_attach, but for operations that must be run once per task to be | ||
600 | attached (possibly many when using cgroup_attach_proc). Called after | ||
601 | can_attach. | ||
590 | 602 | ||
591 | void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 603 | void cancel_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
592 | struct task_struct *task, bool threadgroup) | 604 | struct task_struct *task, bool threadgroup) |
@@ -598,15 +610,24 @@ function, so that the subsystem can implement a rollback. If not, not necessary. | |||
598 | This will be called only about subsystems whose can_attach() operation have | 610 | This will be called only about subsystems whose can_attach() operation have |
599 | succeeded. | 611 | succeeded. |
600 | 612 | ||
613 | void pre_attach(struct cgroup *cgrp); | ||
614 | (cgroup_mutex held by caller) | ||
615 | |||
616 | For any non-per-thread attachment work that needs to happen before | ||
617 | attach_task. Needed by cpuset. | ||
618 | |||
601 | void attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 619 | void attach(struct cgroup_subsys *ss, struct cgroup *cgrp, |
602 | struct cgroup *old_cgrp, struct task_struct *task, | 620 | struct cgroup *old_cgrp, struct task_struct *task) |
603 | bool threadgroup) | ||
604 | (cgroup_mutex held by caller) | 621 | (cgroup_mutex held by caller) |
605 | 622 | ||
606 | Called after the task has been attached to the cgroup, to allow any | 623 | Called after the task has been attached to the cgroup, to allow any |
607 | post-attachment activity that requires memory allocations or blocking. | 624 | post-attachment activity that requires memory allocations or blocking. |
608 | If threadgroup is true, the subsystem should take care of all threads | 625 | |
609 | in the specified thread's threadgroup. Currently does not support any | 626 | void attach_task(struct cgroup *cgrp, struct task_struct *tsk); |
627 | (cgroup_mutex held by caller) | ||
628 | |||
629 | As attach, but for operations that must be run once per task to be attached, | ||
630 | like can_attach_task. Called before attach. Currently does not support any | ||
610 | subsystem that might need the old_cgrp for every thread in the group. | 631 | subsystem that might need the old_cgrp for every thread in the group. |
611 | 632 | ||
612 | void fork(struct cgroup_subsy *ss, struct task_struct *task) | 633 | void fork(struct cgroup_subsy *ss, struct task_struct *task) |
@@ -630,7 +651,7 @@ always handled well. | |||
630 | void post_clone(struct cgroup_subsys *ss, struct cgroup *cgrp) | 651 | void post_clone(struct cgroup_subsys *ss, struct cgroup *cgrp) |
631 | (cgroup_mutex held by caller) | 652 | (cgroup_mutex held by caller) |
632 | 653 | ||
633 | Called at the end of cgroup_clone() to do any parameter | 654 | Called during cgroup_create() to do any parameter |
634 | initialization which might be required before a task could attach. For | 655 | initialization which might be required before a task could attach. For |
635 | example in cpusets, no task may attach before 'cpus' and 'mems' are set | 656 | example in cpusets, no task may attach before 'cpus' and 'mems' are set |
636 | up. | 657 | up. |
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt index 95788ad2506c..ff31b1cc50aa 100644 --- a/Documentation/feature-removal-schedule.txt +++ b/Documentation/feature-removal-schedule.txt | |||
@@ -262,16 +262,6 @@ Who: Michael Buesch <mb@bu3sch.de> | |||
262 | 262 | ||
263 | --------------------------- | 263 | --------------------------- |
264 | 264 | ||
265 | What: /sys/o2cb symlink | ||
266 | When: January 2010 | ||
267 | Why: /sys/fs/o2cb is the proper location for this information - /sys/o2cb | ||
268 | exists as a symlink for backwards compatibility for old versions of | ||
269 | ocfs2-tools. 2 years should be sufficient time to phase in new versions | ||
270 | which know to look in /sys/fs/o2cb. | ||
271 | Who: ocfs2-devel@oss.oracle.com | ||
272 | |||
273 | --------------------------- | ||
274 | |||
275 | What: Ability for non root users to shm_get hugetlb pages based on mlock | 265 | What: Ability for non root users to shm_get hugetlb pages based on mlock |
276 | resource limits | 266 | resource limits |
277 | When: 2.6.31 | 267 | When: 2.6.31 |
diff --git a/Documentation/filesystems/configfs/configfs_example_explicit.c b/Documentation/filesystems/configfs/configfs_example_explicit.c index fd53869f5633..1420233dfa55 100644 --- a/Documentation/filesystems/configfs/configfs_example_explicit.c +++ b/Documentation/filesystems/configfs/configfs_example_explicit.c | |||
@@ -464,9 +464,8 @@ static int __init configfs_example_init(void) | |||
464 | return 0; | 464 | return 0; |
465 | 465 | ||
466 | out_unregister: | 466 | out_unregister: |
467 | for (; i >= 0; i--) { | 467 | for (i--; i >= 0; i--) |
468 | configfs_unregister_subsystem(example_subsys[i]); | 468 | configfs_unregister_subsystem(example_subsys[i]); |
469 | } | ||
470 | 469 | ||
471 | return ret; | 470 | return ret; |
472 | } | 471 | } |
@@ -475,9 +474,8 @@ static void __exit configfs_example_exit(void) | |||
475 | { | 474 | { |
476 | int i; | 475 | int i; |
477 | 476 | ||
478 | for (i = 0; example_subsys[i]; i++) { | 477 | for (i = 0; example_subsys[i]; i++) |
479 | configfs_unregister_subsystem(example_subsys[i]); | 478 | configfs_unregister_subsystem(example_subsys[i]); |
480 | } | ||
481 | } | 479 | } |
482 | 480 | ||
483 | module_init(configfs_example_init); | 481 | module_init(configfs_example_init); |
diff --git a/Documentation/filesystems/configfs/configfs_example_macros.c b/Documentation/filesystems/configfs/configfs_example_macros.c index d8e30a0378aa..327dfbc640a9 100644 --- a/Documentation/filesystems/configfs/configfs_example_macros.c +++ b/Documentation/filesystems/configfs/configfs_example_macros.c | |||
@@ -427,9 +427,8 @@ static int __init configfs_example_init(void) | |||
427 | return 0; | 427 | return 0; |
428 | 428 | ||
429 | out_unregister: | 429 | out_unregister: |
430 | for (; i >= 0; i--) { | 430 | for (i--; i >= 0; i--) |
431 | configfs_unregister_subsystem(example_subsys[i]); | 431 | configfs_unregister_subsystem(example_subsys[i]); |
432 | } | ||
433 | 432 | ||
434 | return ret; | 433 | return ret; |
435 | } | 434 | } |
@@ -438,9 +437,8 @@ static void __exit configfs_example_exit(void) | |||
438 | { | 437 | { |
439 | int i; | 438 | int i; |
440 | 439 | ||
441 | for (i = 0; example_subsys[i]; i++) { | 440 | for (i = 0; example_subsys[i]; i++) |
442 | configfs_unregister_subsystem(example_subsys[i]); | 441 | configfs_unregister_subsystem(example_subsys[i]); |
443 | } | ||
444 | } | 442 | } |
445 | 443 | ||
446 | module_init(configfs_example_init); | 444 | module_init(configfs_example_init); |
diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt index c79ec58fd7f6..3ae9bc94352a 100644 --- a/Documentation/filesystems/ext4.txt +++ b/Documentation/filesystems/ext4.txt | |||
@@ -226,10 +226,6 @@ acl Enables POSIX Access Control Lists support. | |||
226 | noacl This option disables POSIX Access Control List | 226 | noacl This option disables POSIX Access Control List |
227 | support. | 227 | support. |
228 | 228 | ||
229 | reservation | ||
230 | |||
231 | noreservation | ||
232 | |||
233 | bsddf (*) Make 'df' act like BSD. | 229 | bsddf (*) Make 'df' act like BSD. |
234 | minixdf Make 'df' act like Minix. | 230 | minixdf Make 'df' act like Minix. |
235 | 231 | ||
diff --git a/Documentation/filesystems/nfs/idmapper.txt b/Documentation/filesystems/nfs/idmapper.txt index b9b4192ea8b5..9c8fd6148656 100644 --- a/Documentation/filesystems/nfs/idmapper.txt +++ b/Documentation/filesystems/nfs/idmapper.txt | |||
@@ -47,8 +47,8 @@ request-key will find the first matching line and corresponding program. In | |||
47 | this case, /some/other/program will handle all uid lookups and | 47 | this case, /some/other/program will handle all uid lookups and |
48 | /usr/sbin/nfs.idmap will handle gid, user, and group lookups. | 48 | /usr/sbin/nfs.idmap will handle gid, user, and group lookups. |
49 | 49 | ||
50 | See <file:Documentation/keys-request-keys.txt> for more information about the | 50 | See <file:Documentation/security/keys-request-keys.txt> for more information |
51 | request-key function. | 51 | about the request-key function. |
52 | 52 | ||
53 | 53 | ||
54 | ========= | 54 | ========= |
diff --git a/Documentation/filesystems/ocfs2.txt b/Documentation/filesystems/ocfs2.txt index 9ed920a8cd79..7618a287aa41 100644 --- a/Documentation/filesystems/ocfs2.txt +++ b/Documentation/filesystems/ocfs2.txt | |||
@@ -46,9 +46,15 @@ errors=panic Panic and halt the machine if an error occurs. | |||
46 | intr (*) Allow signals to interrupt cluster operations. | 46 | intr (*) Allow signals to interrupt cluster operations. |
47 | nointr Do not allow signals to interrupt cluster | 47 | nointr Do not allow signals to interrupt cluster |
48 | operations. | 48 | operations. |
49 | noatime Do not update access time. | ||
50 | relatime(*) Update atime if the previous atime is older than | ||
51 | mtime or ctime | ||
52 | strictatime Always update atime, but the minimum update interval | ||
53 | is specified by atime_quantum. | ||
49 | atime_quantum=60(*) OCFS2 will not update atime unless this number | 54 | atime_quantum=60(*) OCFS2 will not update atime unless this number |
50 | of seconds has passed since the last update. | 55 | of seconds has passed since the last update. |
51 | Set to zero to always update atime. | 56 | Set to zero to always update atime. This option need |
57 | work with strictatime. | ||
52 | data=ordered (*) All data are forced directly out to the main file | 58 | data=ordered (*) All data are forced directly out to the main file |
53 | system prior to its metadata being committed to the | 59 | system prior to its metadata being committed to the |
54 | journal. | 60 | journal. |
diff --git a/Documentation/filesystems/xfs.txt b/Documentation/filesystems/xfs.txt index 7bff3e4f35df..3fc0c31a6f5d 100644 --- a/Documentation/filesystems/xfs.txt +++ b/Documentation/filesystems/xfs.txt | |||
@@ -39,6 +39,12 @@ When mounting an XFS filesystem, the following options are accepted. | |||
39 | drive level write caching to be enabled, for devices that | 39 | drive level write caching to be enabled, for devices that |
40 | support write barriers. | 40 | support write barriers. |
41 | 41 | ||
42 | discard | ||
43 | Issue command to let the block device reclaim space freed by the | ||
44 | filesystem. This is useful for SSD devices, thinly provisioned | ||
45 | LUNs and virtual machine images, but may have a performance | ||
46 | impact. This option is incompatible with the nodelaylog option. | ||
47 | |||
42 | dmapi | 48 | dmapi |
43 | Enable the DMAPI (Data Management API) event callouts. | 49 | Enable the DMAPI (Data Management API) event callouts. |
44 | Use with the "mtpt" option. | 50 | Use with the "mtpt" option. |
diff --git a/Documentation/networking/dns_resolver.txt b/Documentation/networking/dns_resolver.txt index 04ca06325b08..7f531ad83285 100644 --- a/Documentation/networking/dns_resolver.txt +++ b/Documentation/networking/dns_resolver.txt | |||
@@ -139,8 +139,8 @@ the key will be discarded and recreated when the data it holds has expired. | |||
139 | dns_query() returns a copy of the value attached to the key, or an error if | 139 | dns_query() returns a copy of the value attached to the key, or an error if |
140 | that is indicated instead. | 140 | that is indicated instead. |
141 | 141 | ||
142 | See <file:Documentation/keys-request-key.txt> for further information about | 142 | See <file:Documentation/security/keys-request-key.txt> for further |
143 | request-key function. | 143 | information about request-key function. |
144 | 144 | ||
145 | 145 | ||
146 | ========= | 146 | ========= |
diff --git a/Documentation/power/regulator/machine.txt b/Documentation/power/regulator/machine.txt index bdec39b9bd75..b42419b52e44 100644 --- a/Documentation/power/regulator/machine.txt +++ b/Documentation/power/regulator/machine.txt | |||
@@ -53,11 +53,11 @@ static struct regulator_init_data regulator1_data = { | |||
53 | 53 | ||
54 | Regulator-1 supplies power to Regulator-2. This relationship must be registered | 54 | Regulator-1 supplies power to Regulator-2. This relationship must be registered |
55 | with the core so that Regulator-1 is also enabled when Consumer A enables its | 55 | with the core so that Regulator-1 is also enabled when Consumer A enables its |
56 | supply (Regulator-2). The supply regulator is set by the supply_regulator_dev | 56 | supply (Regulator-2). The supply regulator is set by the supply_regulator |
57 | field below:- | 57 | field below:- |
58 | 58 | ||
59 | static struct regulator_init_data regulator2_data = { | 59 | static struct regulator_init_data regulator2_data = { |
60 | .supply_regulator_dev = &platform_regulator1_device.dev, | 60 | .supply_regulator = "regulator_name", |
61 | .constraints = { | 61 | .constraints = { |
62 | .min_uV = 1800000, | 62 | .min_uV = 1800000, |
63 | .max_uV = 2000000, | 63 | .max_uV = 2000000, |
diff --git a/Documentation/security/00-INDEX b/Documentation/security/00-INDEX new file mode 100644 index 000000000000..19bc49439cac --- /dev/null +++ b/Documentation/security/00-INDEX | |||
@@ -0,0 +1,18 @@ | |||
1 | 00-INDEX | ||
2 | - this file. | ||
3 | SELinux.txt | ||
4 | - how to get started with the SELinux security enhancement. | ||
5 | Smack.txt | ||
6 | - documentation on the Smack Linux Security Module. | ||
7 | apparmor.txt | ||
8 | - documentation on the AppArmor security extension. | ||
9 | credentials.txt | ||
10 | - documentation about credentials in Linux. | ||
11 | keys-request-key.txt | ||
12 | - description of the kernel key request service. | ||
13 | keys-trusted-encrypted.txt | ||
14 | - info on the Trusted and Encrypted keys in the kernel key ring service. | ||
15 | keys.txt | ||
16 | - description of the kernel key retention service. | ||
17 | tomoyo.txt | ||
18 | - documentation on the TOMOYO Linux Security Module. | ||
diff --git a/Documentation/SELinux.txt b/Documentation/security/SELinux.txt index 07eae00f3314..07eae00f3314 100644 --- a/Documentation/SELinux.txt +++ b/Documentation/security/SELinux.txt | |||
diff --git a/Documentation/Smack.txt b/Documentation/security/Smack.txt index e9dab41c0fe0..e9dab41c0fe0 100644 --- a/Documentation/Smack.txt +++ b/Documentation/security/Smack.txt | |||
diff --git a/Documentation/apparmor.txt b/Documentation/security/apparmor.txt index 93c1fd7d0635..93c1fd7d0635 100644 --- a/Documentation/apparmor.txt +++ b/Documentation/security/apparmor.txt | |||
diff --git a/Documentation/credentials.txt b/Documentation/security/credentials.txt index 995baf379c07..fc0366cbd7ce 100644 --- a/Documentation/credentials.txt +++ b/Documentation/security/credentials.txt | |||
@@ -216,7 +216,7 @@ The Linux kernel supports the following types of credentials: | |||
216 | When a process accesses a key, if not already present, it will normally be | 216 | When a process accesses a key, if not already present, it will normally be |
217 | cached on one of these keyrings for future accesses to find. | 217 | cached on one of these keyrings for future accesses to find. |
218 | 218 | ||
219 | For more information on using keys, see Documentation/keys.txt. | 219 | For more information on using keys, see Documentation/security/keys.txt. |
220 | 220 | ||
221 | (5) LSM | 221 | (5) LSM |
222 | 222 | ||
diff --git a/Documentation/keys-request-key.txt b/Documentation/security/keys-request-key.txt index 69686ad12c66..51987bfecfed 100644 --- a/Documentation/keys-request-key.txt +++ b/Documentation/security/keys-request-key.txt | |||
@@ -3,8 +3,8 @@ | |||
3 | =================== | 3 | =================== |
4 | 4 | ||
5 | The key request service is part of the key retention service (refer to | 5 | The key request service is part of the key retention service (refer to |
6 | Documentation/keys.txt). This document explains more fully how the requesting | 6 | Documentation/security/keys.txt). This document explains more fully how |
7 | algorithm works. | 7 | the requesting algorithm works. |
8 | 8 | ||
9 | The process starts by either the kernel requesting a service by calling | 9 | The process starts by either the kernel requesting a service by calling |
10 | request_key*(): | 10 | request_key*(): |
diff --git a/Documentation/keys-trusted-encrypted.txt b/Documentation/security/keys-trusted-encrypted.txt index 8fb79bc1ac4b..8fb79bc1ac4b 100644 --- a/Documentation/keys-trusted-encrypted.txt +++ b/Documentation/security/keys-trusted-encrypted.txt | |||
diff --git a/Documentation/keys.txt b/Documentation/security/keys.txt index 6523a9e6f293..4d75931d2d79 100644 --- a/Documentation/keys.txt +++ b/Documentation/security/keys.txt | |||
@@ -434,7 +434,7 @@ The main syscalls are: | |||
434 | /sbin/request-key will be invoked in an attempt to obtain a key. The | 434 | /sbin/request-key will be invoked in an attempt to obtain a key. The |
435 | callout_info string will be passed as an argument to the program. | 435 | callout_info string will be passed as an argument to the program. |
436 | 436 | ||
437 | See also Documentation/keys-request-key.txt. | 437 | See also Documentation/security/keys-request-key.txt. |
438 | 438 | ||
439 | 439 | ||
440 | The keyctl syscall functions are: | 440 | The keyctl syscall functions are: |
@@ -864,7 +864,7 @@ payload contents" for more information. | |||
864 | If successful, the key will have been attached to the default keyring for | 864 | If successful, the key will have been attached to the default keyring for |
865 | implicitly obtained request-key keys, as set by KEYCTL_SET_REQKEY_KEYRING. | 865 | implicitly obtained request-key keys, as set by KEYCTL_SET_REQKEY_KEYRING. |
866 | 866 | ||
867 | See also Documentation/keys-request-key.txt. | 867 | See also Documentation/security/keys-request-key.txt. |
868 | 868 | ||
869 | 869 | ||
870 | (*) To search for a key, passing auxiliary data to the upcaller, call: | 870 | (*) To search for a key, passing auxiliary data to the upcaller, call: |
diff --git a/Documentation/tomoyo.txt b/Documentation/security/tomoyo.txt index 200a2d37cbc8..200a2d37cbc8 100644 --- a/Documentation/tomoyo.txt +++ b/Documentation/security/tomoyo.txt | |||
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt index 36f007514db3..5e7cb39ad195 100644 --- a/Documentation/sysctl/kernel.txt +++ b/Documentation/sysctl/kernel.txt | |||
@@ -161,7 +161,8 @@ core_pattern is used to specify a core dumpfile pattern name. | |||
161 | %s signal number | 161 | %s signal number |
162 | %t UNIX time of dump | 162 | %t UNIX time of dump |
163 | %h hostname | 163 | %h hostname |
164 | %e executable filename | 164 | %e executable filename (may be shortened) |
165 | %E executable path | ||
165 | %<OTHER> both are dropped | 166 | %<OTHER> both are dropped |
166 | . If the first character of the pattern is a '|', the kernel will treat | 167 | . If the first character of the pattern is a '|', the kernel will treat |
167 | the rest of the pattern as a command to run. The core dump will be | 168 | the rest of the pattern as a command to run. The core dump will be |
diff --git a/Documentation/vm/cleancache.txt b/Documentation/vm/cleancache.txt new file mode 100644 index 000000000000..36c367c73084 --- /dev/null +++ b/Documentation/vm/cleancache.txt | |||
@@ -0,0 +1,278 @@ | |||
1 | MOTIVATION | ||
2 | |||
3 | Cleancache is a new optional feature provided by the VFS layer that | ||
4 | potentially dramatically increases page cache effectiveness for | ||
5 | many workloads in many environments at a negligible cost. | ||
6 | |||
7 | Cleancache can be thought of as a page-granularity victim cache for clean | ||
8 | pages that the kernel's pageframe replacement algorithm (PFRA) would like | ||
9 | to keep around, but can't since there isn't enough memory. So when the | ||
10 | PFRA "evicts" a page, it first attempts to use cleancache code to | ||
11 | put the data contained in that page into "transcendent memory", memory | ||
12 | that is not directly accessible or addressable by the kernel and is | ||
13 | of unknown and possibly time-varying size. | ||
14 | |||
15 | Later, when a cleancache-enabled filesystem wishes to access a page | ||
16 | in a file on disk, it first checks cleancache to see if it already | ||
17 | contains it; if it does, the page of data is copied into the kernel | ||
18 | and a disk access is avoided. | ||
19 | |||
20 | Transcendent memory "drivers" for cleancache are currently implemented | ||
21 | in Xen (using hypervisor memory) and zcache (using in-kernel compressed | ||
22 | memory) and other implementations are in development. | ||
23 | |||
24 | FAQs are included below. | ||
25 | |||
26 | IMPLEMENTATION OVERVIEW | ||
27 | |||
28 | A cleancache "backend" that provides transcendent memory registers itself | ||
29 | to the kernel's cleancache "frontend" by calling cleancache_register_ops, | ||
30 | passing a pointer to a cleancache_ops structure with funcs set appropriately. | ||
31 | Note that cleancache_register_ops returns the previous settings so that | ||
32 | chaining can be performed if desired. The functions provided must conform to | ||
33 | certain semantics as follows: | ||
34 | |||
35 | Most important, cleancache is "ephemeral". Pages which are copied into | ||
36 | cleancache have an indefinite lifetime which is completely unknowable | ||
37 | by the kernel and so may or may not still be in cleancache at any later time. | ||
38 | Thus, as its name implies, cleancache is not suitable for dirty pages. | ||
39 | Cleancache has complete discretion over what pages to preserve and what | ||
40 | pages to discard and when. | ||
41 | |||
42 | Mounting a cleancache-enabled filesystem should call "init_fs" to obtain a | ||
43 | pool id which, if positive, must be saved in the filesystem's superblock; | ||
44 | a negative return value indicates failure. A "put_page" will copy a | ||
45 | (presumably about-to-be-evicted) page into cleancache and associate it with | ||
46 | the pool id, a file key, and a page index into the file. (The combination | ||
47 | of a pool id, a file key, and an index is sometimes called a "handle".) | ||
48 | A "get_page" will copy the page, if found, from cleancache into kernel memory. | ||
49 | A "flush_page" will ensure the page no longer is present in cleancache; | ||
50 | a "flush_inode" will flush all pages associated with the specified file; | ||
51 | and, when a filesystem is unmounted, a "flush_fs" will flush all pages in | ||
52 | all files specified by the given pool id and also surrender the pool id. | ||
53 | |||
54 | An "init_shared_fs", like init_fs, obtains a pool id but tells cleancache | ||
55 | to treat the pool as shared using a 128-bit UUID as a key. On systems | ||
56 | that may run multiple kernels (such as hard partitioned or virtualized | ||
57 | systems) that may share a clustered filesystem, and where cleancache | ||
58 | may be shared among those kernels, calls to init_shared_fs that specify the | ||
59 | same UUID will receive the same pool id, thus allowing the pages to | ||
60 | be shared. Note that any security requirements must be imposed outside | ||
61 | of the kernel (e.g. by "tools" that control cleancache). Or a | ||
62 | cleancache implementation can simply disable shared_init by always | ||
63 | returning a negative value. | ||
64 | |||
65 | If a get_page is successful on a non-shared pool, the page is flushed (thus | ||
66 | making cleancache an "exclusive" cache). On a shared pool, the page | ||
67 | is NOT flushed on a successful get_page so that it remains accessible to | ||
68 | other sharers. The kernel is responsible for ensuring coherency between | ||
69 | cleancache (shared or not), the page cache, and the filesystem, using | ||
70 | cleancache flush operations as required. | ||
71 | |||
72 | Note that cleancache must enforce put-put-get coherency and get-get | ||
73 | coherency. For the former, if two puts are made to the same handle but | ||
74 | with different data, say AAA by the first put and BBB by the second, a | ||
75 | subsequent get can never return the stale data (AAA). For get-get coherency, | ||
76 | if a get for a given handle fails, subsequent gets for that handle will | ||
77 | never succeed unless preceded by a successful put with that handle. | ||
78 | |||
79 | Last, cleancache provides no SMP serialization guarantees; if two | ||
80 | different Linux threads are simultaneously putting and flushing a page | ||
81 | with the same handle, the results are indeterminate. Callers must | ||
82 | lock the page to ensure serial behavior. | ||
83 | |||
84 | CLEANCACHE PERFORMANCE METRICS | ||
85 | |||
86 | Cleancache monitoring is done by sysfs files in the | ||
87 | /sys/kernel/mm/cleancache directory. The effectiveness of cleancache | ||
88 | can be measured (across all filesystems) with: | ||
89 | |||
90 | succ_gets - number of gets that were successful | ||
91 | failed_gets - number of gets that failed | ||
92 | puts - number of puts attempted (all "succeed") | ||
93 | flushes - number of flushes attempted | ||
94 | |||
95 | A backend implementatation may provide additional metrics. | ||
96 | |||
97 | FAQ | ||
98 | |||
99 | 1) Where's the value? (Andrew Morton) | ||
100 | |||
101 | Cleancache provides a significant performance benefit to many workloads | ||
102 | in many environments with negligible overhead by improving the | ||
103 | effectiveness of the pagecache. Clean pagecache pages are | ||
104 | saved in transcendent memory (RAM that is otherwise not directly | ||
105 | addressable to the kernel); fetching those pages later avoids "refaults" | ||
106 | and thus disk reads. | ||
107 | |||
108 | Cleancache (and its sister code "frontswap") provide interfaces for | ||
109 | this transcendent memory (aka "tmem"), which conceptually lies between | ||
110 | fast kernel-directly-addressable RAM and slower DMA/asynchronous devices. | ||
111 | Disallowing direct kernel or userland reads/writes to tmem | ||
112 | is ideal when data is transformed to a different form and size (such | ||
113 | as with compression) or secretly moved (as might be useful for write- | ||
114 | balancing for some RAM-like devices). Evicted page-cache pages (and | ||
115 | swap pages) are a great use for this kind of slower-than-RAM-but-much- | ||
116 | faster-than-disk transcendent memory, and the cleancache (and frontswap) | ||
117 | "page-object-oriented" specification provides a nice way to read and | ||
118 | write -- and indirectly "name" -- the pages. | ||
119 | |||
120 | In the virtual case, the whole point of virtualization is to statistically | ||
121 | multiplex physical resources across the varying demands of multiple | ||
122 | virtual machines. This is really hard to do with RAM and efforts to | ||
123 | do it well with no kernel change have essentially failed (except in some | ||
124 | well-publicized special-case workloads). Cleancache -- and frontswap -- | ||
125 | with a fairly small impact on the kernel, provide a huge amount | ||
126 | of flexibility for more dynamic, flexible RAM multiplexing. | ||
127 | Specifically, the Xen Transcendent Memory backend allows otherwise | ||
128 | "fallow" hypervisor-owned RAM to not only be "time-shared" between multiple | ||
129 | virtual machines, but the pages can be compressed and deduplicated to | ||
130 | optimize RAM utilization. And when guest OS's are induced to surrender | ||
131 | underutilized RAM (e.g. with "self-ballooning"), page cache pages | ||
132 | are the first to go, and cleancache allows those pages to be | ||
133 | saved and reclaimed if overall host system memory conditions allow. | ||
134 | |||
135 | And the identical interface used for cleancache can be used in | ||
136 | physical systems as well. The zcache driver acts as a memory-hungry | ||
137 | device that stores pages of data in a compressed state. And | ||
138 | the proposed "RAMster" driver shares RAM across multiple physical | ||
139 | systems. | ||
140 | |||
141 | 2) Why does cleancache have its sticky fingers so deep inside the | ||
142 | filesystems and VFS? (Andrew Morton and Christoph Hellwig) | ||
143 | |||
144 | The core hooks for cleancache in VFS are in most cases a single line | ||
145 | and the minimum set are placed precisely where needed to maintain | ||
146 | coherency (via cleancache_flush operations) between cleancache, | ||
147 | the page cache, and disk. All hooks compile into nothingness if | ||
148 | cleancache is config'ed off and turn into a function-pointer- | ||
149 | compare-to-NULL if config'ed on but no backend claims the ops | ||
150 | functions, or to a compare-struct-element-to-negative if a | ||
151 | backend claims the ops functions but a filesystem doesn't enable | ||
152 | cleancache. | ||
153 | |||
154 | Some filesystems are built entirely on top of VFS and the hooks | ||
155 | in VFS are sufficient, so don't require an "init_fs" hook; the | ||
156 | initial implementation of cleancache didn't provide this hook. | ||
157 | But for some filesystems (such as btrfs), the VFS hooks are | ||
158 | incomplete and one or more hooks in fs-specific code are required. | ||
159 | And for some other filesystems, such as tmpfs, cleancache may | ||
160 | be counterproductive. So it seemed prudent to require a filesystem | ||
161 | to "opt in" to use cleancache, which requires adding a hook in | ||
162 | each filesystem. Not all filesystems are supported by cleancache | ||
163 | only because they haven't been tested. The existing set should | ||
164 | be sufficient to validate the concept, the opt-in approach means | ||
165 | that untested filesystems are not affected, and the hooks in the | ||
166 | existing filesystems should make it very easy to add more | ||
167 | filesystems in the future. | ||
168 | |||
169 | The total impact of the hooks to existing fs and mm files is only | ||
170 | about 40 lines added (not counting comments and blank lines). | ||
171 | |||
172 | 3) Why not make cleancache asynchronous and batched so it can | ||
173 | more easily interface with real devices with DMA instead | ||
174 | of copying each individual page? (Minchan Kim) | ||
175 | |||
176 | The one-page-at-a-time copy semantics simplifies the implementation | ||
177 | on both the frontend and backend and also allows the backend to | ||
178 | do fancy things on-the-fly like page compression and | ||
179 | page deduplication. And since the data is "gone" (copied into/out | ||
180 | of the pageframe) before the cleancache get/put call returns, | ||
181 | a great deal of race conditions and potential coherency issues | ||
182 | are avoided. While the interface seems odd for a "real device" | ||
183 | or for real kernel-addressable RAM, it makes perfect sense for | ||
184 | transcendent memory. | ||
185 | |||
186 | 4) Why is non-shared cleancache "exclusive"? And where is the | ||
187 | page "flushed" after a "get"? (Minchan Kim) | ||
188 | |||
189 | The main reason is to free up space in transcendent memory and | ||
190 | to avoid unnecessary cleancache_flush calls. If you want inclusive, | ||
191 | the page can be "put" immediately following the "get". If | ||
192 | put-after-get for inclusive becomes common, the interface could | ||
193 | be easily extended to add a "get_no_flush" call. | ||
194 | |||
195 | The flush is done by the cleancache backend implementation. | ||
196 | |||
197 | 5) What's the performance impact? | ||
198 | |||
199 | Performance analysis has been presented at OLS'09 and LCA'10. | ||
200 | Briefly, performance gains can be significant on most workloads, | ||
201 | especially when memory pressure is high (e.g. when RAM is | ||
202 | overcommitted in a virtual workload); and because the hooks are | ||
203 | invoked primarily in place of or in addition to a disk read/write, | ||
204 | overhead is negligible even in worst case workloads. Basically | ||
205 | cleancache replaces I/O with memory-copy-CPU-overhead; on older | ||
206 | single-core systems with slow memory-copy speeds, cleancache | ||
207 | has little value, but in newer multicore machines, especially | ||
208 | consolidated/virtualized machines, it has great value. | ||
209 | |||
210 | 6) How do I add cleancache support for filesystem X? (Boaz Harrash) | ||
211 | |||
212 | Filesystems that are well-behaved and conform to certain | ||
213 | restrictions can utilize cleancache simply by making a call to | ||
214 | cleancache_init_fs at mount time. Unusual, misbehaving, or | ||
215 | poorly layered filesystems must either add additional hooks | ||
216 | and/or undergo extensive additional testing... or should just | ||
217 | not enable the optional cleancache. | ||
218 | |||
219 | Some points for a filesystem to consider: | ||
220 | |||
221 | - The FS should be block-device-based (e.g. a ram-based FS such | ||
222 | as tmpfs should not enable cleancache) | ||
223 | - To ensure coherency/correctness, the FS must ensure that all | ||
224 | file removal or truncation operations either go through VFS or | ||
225 | add hooks to do the equivalent cleancache "flush" operations | ||
226 | - To ensure coherency/correctness, either inode numbers must | ||
227 | be unique across the lifetime of the on-disk file OR the | ||
228 | FS must provide an "encode_fh" function. | ||
229 | - The FS must call the VFS superblock alloc and deactivate routines | ||
230 | or add hooks to do the equivalent cleancache calls done there. | ||
231 | - To maximize performance, all pages fetched from the FS should | ||
232 | go through the do_mpag_readpage routine or the FS should add | ||
233 | hooks to do the equivalent (cf. btrfs) | ||
234 | - Currently, the FS blocksize must be the same as PAGESIZE. This | ||
235 | is not an architectural restriction, but no backends currently | ||
236 | support anything different. | ||
237 | - A clustered FS should invoke the "shared_init_fs" cleancache | ||
238 | hook to get best performance for some backends. | ||
239 | |||
240 | 7) Why not use the KVA of the inode as the key? (Christoph Hellwig) | ||
241 | |||
242 | If cleancache would use the inode virtual address instead of | ||
243 | inode/filehandle, the pool id could be eliminated. But, this | ||
244 | won't work because cleancache retains pagecache data pages | ||
245 | persistently even when the inode has been pruned from the | ||
246 | inode unused list, and only flushes the data page if the file | ||
247 | gets removed/truncated. So if cleancache used the inode kva, | ||
248 | there would be potential coherency issues if/when the inode | ||
249 | kva is reused for a different file. Alternately, if cleancache | ||
250 | flushed the pages when the inode kva was freed, much of the value | ||
251 | of cleancache would be lost because the cache of pages in cleanache | ||
252 | is potentially much larger than the kernel pagecache and is most | ||
253 | useful if the pages survive inode cache removal. | ||
254 | |||
255 | 8) Why is a global variable required? | ||
256 | |||
257 | The cleancache_enabled flag is checked in all of the frequently-used | ||
258 | cleancache hooks. The alternative is a function call to check a static | ||
259 | variable. Since cleancache is enabled dynamically at runtime, systems | ||
260 | that don't enable cleancache would suffer thousands (possibly | ||
261 | tens-of-thousands) of unnecessary function calls per second. So the | ||
262 | global variable allows cleancache to be enabled by default at compile | ||
263 | time, but have insignificant performance impact when cleancache remains | ||
264 | disabled at runtime. | ||
265 | |||
266 | 9) Does cleanache work with KVM? | ||
267 | |||
268 | The memory model of KVM is sufficiently different that a cleancache | ||
269 | backend may have less value for KVM. This remains to be tested, | ||
270 | especially in an overcommitted system. | ||
271 | |||
272 | 10) Does cleancache work in userspace? It sounds useful for | ||
273 | memory hungry caches like web browsers. (Jamie Lokier) | ||
274 | |||
275 | No plans yet, though we agree it sounds useful, at least for | ||
276 | apps that bypass the page cache (e.g. O_DIRECT). | ||
277 | |||
278 | Last updated: Dan Magenheimer, April 13 2011 | ||
diff --git a/MAINTAINERS b/MAINTAINERS index 1ab17de642e5..a33b11560d3f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS | |||
@@ -931,6 +931,8 @@ F: drivers/mmc/host/msm_sdcc.h | |||
931 | F: drivers/tty/serial/msm_serial.h | 931 | F: drivers/tty/serial/msm_serial.h |
932 | F: drivers/tty/serial/msm_serial.c | 932 | F: drivers/tty/serial/msm_serial.c |
933 | F: drivers/platform/msm/ | 933 | F: drivers/platform/msm/ |
934 | F: drivers/*/pm8???-* | ||
935 | F: include/linux/mfd/pm8xxx/ | ||
934 | T: git git://codeaurora.org/quic/kernel/davidb/linux-msm.git | 936 | T: git git://codeaurora.org/quic/kernel/davidb/linux-msm.git |
935 | S: Maintained | 937 | S: Maintained |
936 | 938 | ||
@@ -2302,7 +2304,7 @@ F: net/bridge/netfilter/ebt*.c | |||
2302 | ECRYPT FILE SYSTEM | 2304 | ECRYPT FILE SYSTEM |
2303 | M: Tyler Hicks <tyhicks@linux.vnet.ibm.com> | 2305 | M: Tyler Hicks <tyhicks@linux.vnet.ibm.com> |
2304 | M: Dustin Kirkland <kirkland@canonical.com> | 2306 | M: Dustin Kirkland <kirkland@canonical.com> |
2305 | L: ecryptfs-devel@lists.launchpad.net | 2307 | L: ecryptfs@vger.kernel.org |
2306 | W: https://launchpad.net/ecryptfs | 2308 | W: https://launchpad.net/ecryptfs |
2307 | S: Supported | 2309 | S: Supported |
2308 | F: Documentation/filesystems/ecryptfs.txt | 2310 | F: Documentation/filesystems/ecryptfs.txt |
@@ -2582,6 +2584,13 @@ S: Maintained | |||
2582 | F: drivers/hwmon/f75375s.c | 2584 | F: drivers/hwmon/f75375s.c |
2583 | F: include/linux/f75375s.h | 2585 | F: include/linux/f75375s.h |
2584 | 2586 | ||
2587 | FIREWIRE AUDIO DRIVERS | ||
2588 | M: Clemens Ladisch <clemens@ladisch.de> | ||
2589 | L: alsa-devel@alsa-project.org (moderated for non-subscribers) | ||
2590 | T: git git://git.alsa-project.org/alsa-kernel.git | ||
2591 | S: Maintained | ||
2592 | F: sound/firewire/ | ||
2593 | |||
2585 | FIREWIRE SUBSYSTEM | 2594 | FIREWIRE SUBSYSTEM |
2586 | M: Stefan Richter <stefanr@s5r6.in-berlin.de> | 2595 | M: Stefan Richter <stefanr@s5r6.in-berlin.de> |
2587 | L: linux1394-devel@lists.sourceforge.net | 2596 | L: linux1394-devel@lists.sourceforge.net |
@@ -3572,9 +3581,16 @@ M: Andrew Morton <akpm@linux-foundation.org> | |||
3572 | M: Jan Kara <jack@suse.cz> | 3581 | M: Jan Kara <jack@suse.cz> |
3573 | L: linux-ext4@vger.kernel.org | 3582 | L: linux-ext4@vger.kernel.org |
3574 | S: Maintained | 3583 | S: Maintained |
3575 | F: fs/jbd*/ | 3584 | F: fs/jbd/ |
3576 | F: include/linux/ext*jbd*.h | 3585 | F: include/linux/ext3_jbd.h |
3577 | F: include/linux/jbd*.h | 3586 | F: include/linux/jbd.h |
3587 | |||
3588 | JOURNALLING LAYER FOR BLOCK DEVICES (JBD2) | ||
3589 | M: "Theodore Ts'o" <tytso@mit.edu> | ||
3590 | L: linux-ext4@vger.kernel.org | ||
3591 | S: Maintained | ||
3592 | F: fs/jbd2/ | ||
3593 | F: include/linux/jbd2.h | ||
3578 | 3594 | ||
3579 | JSM Neo PCI based serial card | 3595 | JSM Neo PCI based serial card |
3580 | M: Breno Leitao <leitao@linux.vnet.ibm.com> | 3596 | M: Breno Leitao <leitao@linux.vnet.ibm.com> |
@@ -3710,7 +3726,7 @@ KEYS/KEYRINGS: | |||
3710 | M: David Howells <dhowells@redhat.com> | 3726 | M: David Howells <dhowells@redhat.com> |
3711 | L: keyrings@linux-nfs.org | 3727 | L: keyrings@linux-nfs.org |
3712 | S: Maintained | 3728 | S: Maintained |
3713 | F: Documentation/keys.txt | 3729 | F: Documentation/security/keys.txt |
3714 | F: include/linux/key.h | 3730 | F: include/linux/key.h |
3715 | F: include/linux/key-type.h | 3731 | F: include/linux/key-type.h |
3716 | F: include/keys/ | 3732 | F: include/keys/ |
@@ -3722,7 +3738,7 @@ M: Mimi Zohar <zohar@us.ibm.com> | |||
3722 | L: linux-security-module@vger.kernel.org | 3738 | L: linux-security-module@vger.kernel.org |
3723 | L: keyrings@linux-nfs.org | 3739 | L: keyrings@linux-nfs.org |
3724 | S: Supported | 3740 | S: Supported |
3725 | F: Documentation/keys-trusted-encrypted.txt | 3741 | F: Documentation/security/keys-trusted-encrypted.txt |
3726 | F: include/keys/trusted-type.h | 3742 | F: include/keys/trusted-type.h |
3727 | F: security/keys/trusted.c | 3743 | F: security/keys/trusted.c |
3728 | F: security/keys/trusted.h | 3744 | F: security/keys/trusted.h |
@@ -3733,7 +3749,7 @@ M: David Safford <safford@watson.ibm.com> | |||
3733 | L: linux-security-module@vger.kernel.org | 3749 | L: linux-security-module@vger.kernel.org |
3734 | L: keyrings@linux-nfs.org | 3750 | L: keyrings@linux-nfs.org |
3735 | S: Supported | 3751 | S: Supported |
3736 | F: Documentation/keys-trusted-encrypted.txt | 3752 | F: Documentation/security/keys-trusted-encrypted.txt |
3737 | F: include/keys/encrypted-type.h | 3753 | F: include/keys/encrypted-type.h |
3738 | F: security/keys/encrypted.c | 3754 | F: security/keys/encrypted.c |
3739 | F: security/keys/encrypted.h | 3755 | F: security/keys/encrypted.h |
@@ -4138,6 +4154,7 @@ M: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> | |||
4138 | L: linux-mm@kvack.org | 4154 | L: linux-mm@kvack.org |
4139 | S: Maintained | 4155 | S: Maintained |
4140 | F: mm/memcontrol.c | 4156 | F: mm/memcontrol.c |
4157 | F: mm/page_cgroup.c | ||
4141 | 4158 | ||
4142 | MEMORY TECHNOLOGY DEVICES (MTD) | 4159 | MEMORY TECHNOLOGY DEVICES (MTD) |
4143 | M: David Woodhouse <dwmw2@infradead.org> | 4160 | M: David Woodhouse <dwmw2@infradead.org> |
@@ -5991,7 +6008,7 @@ F: Documentation/filesystems/spufs.txt | |||
5991 | F: arch/powerpc/platforms/cell/spufs/ | 6008 | F: arch/powerpc/platforms/cell/spufs/ |
5992 | 6009 | ||
5993 | SQUASHFS FILE SYSTEM | 6010 | SQUASHFS FILE SYSTEM |
5994 | M: Phillip Lougher <phillip@lougher.demon.co.uk> | 6011 | M: Phillip Lougher <phillip@squashfs.org.uk> |
5995 | L: squashfs-devel@lists.sourceforge.net (subscribers-only) | 6012 | L: squashfs-devel@lists.sourceforge.net (subscribers-only) |
5996 | W: http://squashfs.org.uk | 6013 | W: http://squashfs.org.uk |
5997 | S: Maintained | 6014 | S: Maintained |
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig index e3a82775f9da..60219bf94198 100644 --- a/arch/alpha/Kconfig +++ b/arch/alpha/Kconfig | |||
@@ -41,10 +41,6 @@ config ARCH_HAS_ILOG2_U64 | |||
41 | bool | 41 | bool |
42 | default n | 42 | default n |
43 | 43 | ||
44 | config GENERIC_FIND_NEXT_BIT | ||
45 | bool | ||
46 | default y | ||
47 | |||
48 | config GENERIC_CALIBRATE_DELAY | 44 | config GENERIC_CALIBRATE_DELAY |
49 | bool | 45 | bool |
50 | default y | 46 | default y |
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig index 076db52ff672..d5f00d7eb075 100644 --- a/arch/arm/configs/omap2plus_defconfig +++ b/arch/arm/configs/omap2plus_defconfig | |||
@@ -21,58 +21,22 @@ CONFIG_MODVERSIONS=y | |||
21 | CONFIG_MODULE_SRCVERSION_ALL=y | 21 | CONFIG_MODULE_SRCVERSION_ALL=y |
22 | # CONFIG_BLK_DEV_BSG is not set | 22 | # CONFIG_BLK_DEV_BSG is not set |
23 | CONFIG_ARCH_OMAP=y | 23 | CONFIG_ARCH_OMAP=y |
24 | CONFIG_ARCH_OMAP2=y | ||
25 | CONFIG_ARCH_OMAP3=y | ||
26 | CONFIG_ARCH_OMAP4=y | ||
27 | CONFIG_OMAP_RESET_CLOCKS=y | 24 | CONFIG_OMAP_RESET_CLOCKS=y |
28 | CONFIG_OMAP_MUX_DEBUG=y | 25 | CONFIG_OMAP_MUX_DEBUG=y |
29 | CONFIG_OMAP_32K_TIMER=y | ||
30 | CONFIG_MACH_OMAP_GENERIC=y | ||
31 | CONFIG_ARCH_OMAP2420=y | ||
32 | CONFIG_ARCH_OMAP2430=y | ||
33 | CONFIG_ARCH_OMAP3430=y | ||
34 | CONFIG_MACH_OMAP_H4=y | ||
35 | CONFIG_MACH_OMAP_APOLLON=y | ||
36 | CONFIG_MACH_OMAP_2430SDP=y | ||
37 | CONFIG_MACH_OMAP3_BEAGLE=y | ||
38 | CONFIG_MACH_DEVKIT8000=y | ||
39 | CONFIG_MACH_OMAP_LDP=y | ||
40 | CONFIG_MACH_OVERO=y | ||
41 | CONFIG_MACH_OMAP3EVM=y | ||
42 | CONFIG_MACH_OMAP3517EVM=y | ||
43 | CONFIG_MACH_OMAP3_PANDORA=y | ||
44 | CONFIG_MACH_OMAP3_TOUCHBOOK=y | ||
45 | CONFIG_MACH_OMAP_3430SDP=y | ||
46 | CONFIG_MACH_NOKIA_N8X0=y | ||
47 | CONFIG_MACH_NOKIA_RX51=y | ||
48 | CONFIG_MACH_OMAP_ZOOM2=y | ||
49 | CONFIG_MACH_OMAP_ZOOM3=y | ||
50 | CONFIG_MACH_CM_T35=y | ||
51 | CONFIG_MACH_IGEP0020=y | ||
52 | CONFIG_MACH_SBC3530=y | ||
53 | CONFIG_MACH_OMAP_3630SDP=y | ||
54 | CONFIG_MACH_OMAP_4430SDP=y | ||
55 | CONFIG_ARM_THUMBEE=y | 26 | CONFIG_ARM_THUMBEE=y |
56 | CONFIG_ARM_L1_CACHE_SHIFT=5 | ||
57 | CONFIG_ARM_ERRATA_411920=y | 27 | CONFIG_ARM_ERRATA_411920=y |
58 | CONFIG_NO_HZ=y | 28 | CONFIG_NO_HZ=y |
59 | CONFIG_HIGH_RES_TIMERS=y | 29 | CONFIG_HIGH_RES_TIMERS=y |
60 | CONFIG_SMP=y | 30 | CONFIG_SMP=y |
61 | CONFIG_NR_CPUS=2 | 31 | CONFIG_NR_CPUS=2 |
62 | # CONFIG_LOCAL_TIMERS is not set | ||
63 | CONFIG_AEABI=y | ||
64 | CONFIG_LEDS=y | 32 | CONFIG_LEDS=y |
65 | CONFIG_ZBOOT_ROM_TEXT=0x0 | 33 | CONFIG_ZBOOT_ROM_TEXT=0x0 |
66 | CONFIG_ZBOOT_ROM_BSS=0x0 | 34 | CONFIG_ZBOOT_ROM_BSS=0x0 |
67 | CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200" | 35 | CONFIG_CMDLINE="root=/dev/mmcblk0p2 rootwait console=ttyO2,115200" |
68 | CONFIG_KEXEC=y | 36 | CONFIG_KEXEC=y |
69 | CONFIG_FPE_NWFPE=y | 37 | CONFIG_FPE_NWFPE=y |
70 | CONFIG_VFP=y | ||
71 | CONFIG_NEON=y | ||
72 | CONFIG_BINFMT_MISC=y | 38 | CONFIG_BINFMT_MISC=y |
73 | CONFIG_PM=y | ||
74 | CONFIG_PM_DEBUG=y | 39 | CONFIG_PM_DEBUG=y |
75 | CONFIG_PM_RUNTIME=y | ||
76 | CONFIG_NET=y | 40 | CONFIG_NET=y |
77 | CONFIG_PACKET=y | 41 | CONFIG_PACKET=y |
78 | CONFIG_UNIX=y | 42 | CONFIG_UNIX=y |
@@ -89,14 +53,6 @@ CONFIG_IP_PNP_RARP=y | |||
89 | # CONFIG_IPV6 is not set | 53 | # CONFIG_IPV6 is not set |
90 | CONFIG_NETFILTER=y | 54 | CONFIG_NETFILTER=y |
91 | CONFIG_BT=m | 55 | CONFIG_BT=m |
92 | CONFIG_BT_L2CAP=m | ||
93 | CONFIG_BT_SCO=m | ||
94 | CONFIG_BT_RFCOMM=y | ||
95 | CONFIG_BT_RFCOMM_TTY=y | ||
96 | CONFIG_BT_BNEP=m | ||
97 | CONFIG_BT_BNEP_MC_FILTER=y | ||
98 | CONFIG_BT_BNEP_PROTO_FILTER=y | ||
99 | CONFIG_BT_HIDP=m | ||
100 | CONFIG_BT_HCIUART=m | 56 | CONFIG_BT_HCIUART=m |
101 | CONFIG_BT_HCIUART_H4=y | 57 | CONFIG_BT_HCIUART_H4=y |
102 | CONFIG_BT_HCIUART_BCSP=y | 58 | CONFIG_BT_HCIUART_BCSP=y |
@@ -107,11 +63,9 @@ CONFIG_CFG80211=m | |||
107 | CONFIG_MAC80211=m | 63 | CONFIG_MAC80211=m |
108 | CONFIG_MAC80211_RC_PID=y | 64 | CONFIG_MAC80211_RC_PID=y |
109 | CONFIG_MAC80211_RC_DEFAULT_PID=y | 65 | CONFIG_MAC80211_RC_DEFAULT_PID=y |
110 | CONFIG_MAC80211_LEDS=y | ||
111 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" | 66 | CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" |
112 | CONFIG_CONNECTOR=y | 67 | CONFIG_CONNECTOR=y |
113 | CONFIG_MTD=y | 68 | CONFIG_MTD=y |
114 | CONFIG_MTD_CONCAT=y | ||
115 | CONFIG_MTD_CMDLINE_PARTS=y | 69 | CONFIG_MTD_CMDLINE_PARTS=y |
116 | CONFIG_MTD_CHAR=y | 70 | CONFIG_MTD_CHAR=y |
117 | CONFIG_MTD_BLOCK=y | 71 | CONFIG_MTD_BLOCK=y |
@@ -127,7 +81,6 @@ CONFIG_MTD_UBI=y | |||
127 | CONFIG_BLK_DEV_LOOP=y | 81 | CONFIG_BLK_DEV_LOOP=y |
128 | CONFIG_BLK_DEV_RAM=y | 82 | CONFIG_BLK_DEV_RAM=y |
129 | CONFIG_BLK_DEV_RAM_SIZE=16384 | 83 | CONFIG_BLK_DEV_RAM_SIZE=16384 |
130 | CONFIG_EEPROM_LEGACY=y | ||
131 | CONFIG_SCSI=y | 84 | CONFIG_SCSI=y |
132 | CONFIG_BLK_DEV_SD=y | 85 | CONFIG_BLK_DEV_SD=y |
133 | CONFIG_SCSI_MULTI_LUN=y | 86 | CONFIG_SCSI_MULTI_LUN=y |
@@ -158,19 +111,15 @@ CONFIG_TOUCHSCREEN_ADS7846=y | |||
158 | CONFIG_INPUT_MISC=y | 111 | CONFIG_INPUT_MISC=y |
159 | CONFIG_INPUT_TWL4030_PWRBUTTON=y | 112 | CONFIG_INPUT_TWL4030_PWRBUTTON=y |
160 | CONFIG_VT_HW_CONSOLE_BINDING=y | 113 | CONFIG_VT_HW_CONSOLE_BINDING=y |
161 | CONFIG_SERIAL_8250=y | 114 | # CONFIG_LEGACY_PTYS is not set |
162 | CONFIG_SERIAL_8250_CONSOLE=y | ||
163 | CONFIG_SERIAL_8250_NR_UARTS=32 | 115 | CONFIG_SERIAL_8250_NR_UARTS=32 |
164 | CONFIG_SERIAL_8250_EXTENDED=y | 116 | CONFIG_SERIAL_8250_EXTENDED=y |
165 | CONFIG_SERIAL_8250_MANY_PORTS=y | 117 | CONFIG_SERIAL_8250_MANY_PORTS=y |
166 | CONFIG_SERIAL_8250_SHARE_IRQ=y | 118 | CONFIG_SERIAL_8250_SHARE_IRQ=y |
167 | CONFIG_SERIAL_8250_DETECT_IRQ=y | 119 | CONFIG_SERIAL_8250_DETECT_IRQ=y |
168 | CONFIG_SERIAL_8250_RSA=y | 120 | CONFIG_SERIAL_8250_RSA=y |
169 | # CONFIG_LEGACY_PTYS is not set | ||
170 | CONFIG_HW_RANDOM=y | 121 | CONFIG_HW_RANDOM=y |
171 | CONFIG_I2C=y | ||
172 | CONFIG_I2C_CHARDEV=y | 122 | CONFIG_I2C_CHARDEV=y |
173 | CONFIG_I2C_OMAP=y | ||
174 | CONFIG_SPI=y | 123 | CONFIG_SPI=y |
175 | CONFIG_SPI_OMAP24XX=y | 124 | CONFIG_SPI_OMAP24XX=y |
176 | CONFIG_DEBUG_GPIO=y | 125 | CONFIG_DEBUG_GPIO=y |
@@ -181,10 +130,6 @@ CONFIG_POWER_SUPPLY=y | |||
181 | CONFIG_WATCHDOG=y | 130 | CONFIG_WATCHDOG=y |
182 | CONFIG_OMAP_WATCHDOG=y | 131 | CONFIG_OMAP_WATCHDOG=y |
183 | CONFIG_TWL4030_WATCHDOG=y | 132 | CONFIG_TWL4030_WATCHDOG=y |
184 | CONFIG_MENELAUS=y | ||
185 | CONFIG_TWL4030_CORE=y | ||
186 | CONFIG_TWL4030_POWER=y | ||
187 | CONFIG_REGULATOR=y | ||
188 | CONFIG_REGULATOR_TWL4030=y | 133 | CONFIG_REGULATOR_TWL4030=y |
189 | CONFIG_REGULATOR_TPS65023=y | 134 | CONFIG_REGULATOR_TPS65023=y |
190 | CONFIG_REGULATOR_TPS6507X=y | 135 | CONFIG_REGULATOR_TPS6507X=y |
@@ -208,7 +153,6 @@ CONFIG_BACKLIGHT_LCD_SUPPORT=y | |||
208 | CONFIG_LCD_CLASS_DEVICE=y | 153 | CONFIG_LCD_CLASS_DEVICE=y |
209 | CONFIG_LCD_PLATFORM=y | 154 | CONFIG_LCD_PLATFORM=y |
210 | CONFIG_DISPLAY_SUPPORT=y | 155 | CONFIG_DISPLAY_SUPPORT=y |
211 | # CONFIG_VGA_CONSOLE is not set | ||
212 | CONFIG_FRAMEBUFFER_CONSOLE=y | 156 | CONFIG_FRAMEBUFFER_CONSOLE=y |
213 | CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y | 157 | CONFIG_FRAMEBUFFER_CONSOLE_ROTATION=y |
214 | CONFIG_FONTS=y | 158 | CONFIG_FONTS=y |
@@ -217,25 +161,20 @@ CONFIG_FONT_8x16=y | |||
217 | CONFIG_LOGO=y | 161 | CONFIG_LOGO=y |
218 | CONFIG_SOUND=m | 162 | CONFIG_SOUND=m |
219 | CONFIG_SND=m | 163 | CONFIG_SND=m |
220 | CONFIG_SND_MIXER_OSS=y | 164 | CONFIG_SND_MIXER_OSS=m |
221 | CONFIG_SND_PCM_OSS=y | 165 | CONFIG_SND_PCM_OSS=m |
222 | CONFIG_SND_VERBOSE_PRINTK=y | 166 | CONFIG_SND_VERBOSE_PRINTK=y |
223 | CONFIG_SND_DEBUG=y | 167 | CONFIG_SND_DEBUG=y |
224 | CONFIG_SND_USB_AUDIO=y | 168 | CONFIG_SND_USB_AUDIO=m |
225 | CONFIG_SND_SOC=y | 169 | CONFIG_SND_SOC=m |
226 | CONFIG_SND_OMAP_SOC=y | 170 | CONFIG_SND_OMAP_SOC=m |
227 | CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=y | 171 | CONFIG_SND_OMAP_SOC_OMAP3_PANDORA=m |
228 | CONFIG_USB=y | 172 | CONFIG_USB=y |
229 | CONFIG_USB_DEBUG=y | 173 | CONFIG_USB_DEBUG=y |
230 | CONFIG_USB_ANNOUNCE_NEW_DEVICES=y | 174 | CONFIG_USB_ANNOUNCE_NEW_DEVICES=y |
231 | CONFIG_USB_DEVICEFS=y | 175 | CONFIG_USB_DEVICEFS=y |
232 | CONFIG_USB_SUSPEND=y | 176 | CONFIG_USB_SUSPEND=y |
233 | # CONFIG_USB_OTG_WHITELIST is not set | ||
234 | CONFIG_USB_MON=y | 177 | CONFIG_USB_MON=y |
235 | # CONFIG_USB_MUSB_HDRC is not set | ||
236 | # CONFIG_USB_MUSB_OTG is not set | ||
237 | # CONFIG_USB_GADGET_MUSB_HDRC is not set | ||
238 | CONFIG_USB_MUSB_DEBUG=y | ||
239 | CONFIG_USB_WDM=y | 178 | CONFIG_USB_WDM=y |
240 | CONFIG_USB_STORAGE=y | 179 | CONFIG_USB_STORAGE=y |
241 | CONFIG_USB_LIBUSUAL=y | 180 | CONFIG_USB_LIBUSUAL=y |
@@ -250,18 +189,12 @@ CONFIG_MMC_UNSAFE_RESUME=y | |||
250 | CONFIG_SDIO_UART=y | 189 | CONFIG_SDIO_UART=y |
251 | CONFIG_MMC_OMAP=y | 190 | CONFIG_MMC_OMAP=y |
252 | CONFIG_MMC_OMAP_HS=y | 191 | CONFIG_MMC_OMAP_HS=y |
253 | CONFIG_LEDS_CLASS=y | ||
254 | CONFIG_LEDS_GPIO=y | ||
255 | CONFIG_LEDS_TRIGGER_TIMER=y | ||
256 | CONFIG_LEDS_TRIGGER_HEARTBEAT=y | ||
257 | CONFIG_LEDS_TRIGGER_DEFAULT_ON=y | ||
258 | CONFIG_RTC_CLASS=y | 192 | CONFIG_RTC_CLASS=y |
259 | CONFIG_RTC_DRV_TWL92330=y | 193 | CONFIG_RTC_DRV_TWL92330=y |
260 | CONFIG_RTC_DRV_TWL4030=y | 194 | CONFIG_RTC_DRV_TWL4030=y |
261 | CONFIG_EXT2_FS=y | 195 | CONFIG_EXT2_FS=y |
262 | CONFIG_EXT3_FS=y | 196 | CONFIG_EXT3_FS=y |
263 | # CONFIG_EXT3_FS_XATTR is not set | 197 | # CONFIG_EXT3_FS_XATTR is not set |
264 | CONFIG_INOTIFY=y | ||
265 | CONFIG_QUOTA=y | 198 | CONFIG_QUOTA=y |
266 | CONFIG_QFMT_V2=y | 199 | CONFIG_QFMT_V2=y |
267 | CONFIG_MSDOS_FS=y | 200 | CONFIG_MSDOS_FS=y |
@@ -285,12 +218,10 @@ CONFIG_NLS_CODEPAGE_437=y | |||
285 | CONFIG_NLS_ISO8859_1=y | 218 | CONFIG_NLS_ISO8859_1=y |
286 | CONFIG_PRINTK_TIME=y | 219 | CONFIG_PRINTK_TIME=y |
287 | CONFIG_MAGIC_SYSRQ=y | 220 | CONFIG_MAGIC_SYSRQ=y |
288 | CONFIG_DEBUG_FS=y | ||
289 | CONFIG_DEBUG_KERNEL=y | 221 | CONFIG_DEBUG_KERNEL=y |
290 | CONFIG_SCHEDSTATS=y | 222 | CONFIG_SCHEDSTATS=y |
291 | CONFIG_TIMER_STATS=y | 223 | CONFIG_TIMER_STATS=y |
292 | CONFIG_PROVE_LOCKING=y | 224 | CONFIG_PROVE_LOCKING=y |
293 | # CONFIG_LOCK_STAT is not set | ||
294 | CONFIG_DEBUG_SPINLOCK_SLEEP=y | 225 | CONFIG_DEBUG_SPINLOCK_SLEEP=y |
295 | # CONFIG_DEBUG_BUGVERBOSE is not set | 226 | # CONFIG_DEBUG_BUGVERBOSE is not set |
296 | CONFIG_DEBUG_INFO=y | 227 | CONFIG_DEBUG_INFO=y |
diff --git a/arch/arm/include/asm/bitops.h b/arch/arm/include/asm/bitops.h index 6b7403fd8f54..b4892a06442c 100644 --- a/arch/arm/include/asm/bitops.h +++ b/arch/arm/include/asm/bitops.h | |||
@@ -203,8 +203,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | |||
203 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) | 203 | #define find_first_bit(p,sz) _find_first_bit_le(p,sz) |
204 | #define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) | 204 | #define find_next_bit(p,sz,off) _find_next_bit_le(p,sz,off) |
205 | 205 | ||
206 | #define WORD_BITOFF_TO_LE(x) ((x)) | ||
207 | |||
208 | #else | 206 | #else |
209 | /* | 207 | /* |
210 | * These are the big endian, atomic definitions. | 208 | * These are the big endian, atomic definitions. |
@@ -214,8 +212,6 @@ extern int _find_next_bit_be(const unsigned long *p, int size, int offset); | |||
214 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) | 212 | #define find_first_bit(p,sz) _find_first_bit_be(p,sz) |
215 | #define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) | 213 | #define find_next_bit(p,sz,off) _find_next_bit_be(p,sz,off) |
216 | 214 | ||
217 | #define WORD_BITOFF_TO_LE(x) ((x) ^ 0x18) | ||
218 | |||
219 | #endif | 215 | #endif |
220 | 216 | ||
221 | #if __LINUX_ARM_ARCH__ < 5 | 217 | #if __LINUX_ARM_ARCH__ < 5 |
@@ -287,55 +283,29 @@ static inline int fls(int x) | |||
287 | #include <asm-generic/bitops/hweight.h> | 283 | #include <asm-generic/bitops/hweight.h> |
288 | #include <asm-generic/bitops/lock.h> | 284 | #include <asm-generic/bitops/lock.h> |
289 | 285 | ||
290 | static inline void __set_bit_le(int nr, void *addr) | 286 | #ifdef __ARMEB__ |
291 | { | ||
292 | __set_bit(WORD_BITOFF_TO_LE(nr), addr); | ||
293 | } | ||
294 | |||
295 | static inline void __clear_bit_le(int nr, void *addr) | ||
296 | { | ||
297 | __clear_bit(WORD_BITOFF_TO_LE(nr), addr); | ||
298 | } | ||
299 | |||
300 | static inline int __test_and_set_bit_le(int nr, void *addr) | ||
301 | { | ||
302 | return __test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr); | ||
303 | } | ||
304 | |||
305 | static inline int test_and_set_bit_le(int nr, void *addr) | ||
306 | { | ||
307 | return test_and_set_bit(WORD_BITOFF_TO_LE(nr), addr); | ||
308 | } | ||
309 | |||
310 | static inline int __test_and_clear_bit_le(int nr, void *addr) | ||
311 | { | ||
312 | return __test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr); | ||
313 | } | ||
314 | |||
315 | static inline int test_and_clear_bit_le(int nr, void *addr) | ||
316 | { | ||
317 | return test_and_clear_bit(WORD_BITOFF_TO_LE(nr), addr); | ||
318 | } | ||
319 | |||
320 | static inline int test_bit_le(int nr, const void *addr) | ||
321 | { | ||
322 | return test_bit(WORD_BITOFF_TO_LE(nr), addr); | ||
323 | } | ||
324 | 287 | ||
325 | static inline int find_first_zero_bit_le(const void *p, unsigned size) | 288 | static inline int find_first_zero_bit_le(const void *p, unsigned size) |
326 | { | 289 | { |
327 | return _find_first_zero_bit_le(p, size); | 290 | return _find_first_zero_bit_le(p, size); |
328 | } | 291 | } |
292 | #define find_first_zero_bit_le find_first_zero_bit_le | ||
329 | 293 | ||
330 | static inline int find_next_zero_bit_le(const void *p, int size, int offset) | 294 | static inline int find_next_zero_bit_le(const void *p, int size, int offset) |
331 | { | 295 | { |
332 | return _find_next_zero_bit_le(p, size, offset); | 296 | return _find_next_zero_bit_le(p, size, offset); |
333 | } | 297 | } |
298 | #define find_next_zero_bit_le find_next_zero_bit_le | ||
334 | 299 | ||
335 | static inline int find_next_bit_le(const void *p, int size, int offset) | 300 | static inline int find_next_bit_le(const void *p, int size, int offset) |
336 | { | 301 | { |
337 | return _find_next_bit_le(p, size, offset); | 302 | return _find_next_bit_le(p, size, offset); |
338 | } | 303 | } |
304 | #define find_next_bit_le find_next_bit_le | ||
305 | |||
306 | #endif | ||
307 | |||
308 | #include <asm-generic/bitops/le.h> | ||
339 | 309 | ||
340 | /* | 310 | /* |
341 | * Ext2 is defined to use little-endian byte ordering. | 311 | * Ext2 is defined to use little-endian byte ordering. |
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig index b997a35830fc..19d5891c48e3 100644 --- a/arch/arm/mach-omap2/Kconfig +++ b/arch/arm/mach-omap2/Kconfig | |||
@@ -288,6 +288,7 @@ config MACH_IGEP0030 | |||
288 | depends on ARCH_OMAP3 | 288 | depends on ARCH_OMAP3 |
289 | default y | 289 | default y |
290 | select OMAP_PACKAGE_CBB | 290 | select OMAP_PACKAGE_CBB |
291 | select MACH_IGEP0020 | ||
291 | 292 | ||
292 | config MACH_SBC3530 | 293 | config MACH_SBC3530 |
293 | bool "OMAP3 SBC STALKER board" | 294 | bool "OMAP3 SBC STALKER board" |
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile index 66dfbccacd25..b14807794401 100644 --- a/arch/arm/mach-omap2/Makefile +++ b/arch/arm/mach-omap2/Makefile | |||
@@ -229,8 +229,6 @@ obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o \ | |||
229 | obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o | 229 | obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o |
230 | obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o \ | 230 | obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o \ |
231 | hsmmc.o | 231 | hsmmc.o |
232 | obj-$(CONFIG_MACH_IGEP0030) += board-igep0030.o \ | ||
233 | hsmmc.o | ||
234 | obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o \ | 232 | obj-$(CONFIG_MACH_OMAP3_TOUCHBOOK) += board-omap3touchbook.o \ |
235 | hsmmc.o | 233 | hsmmc.o |
236 | obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o \ | 234 | obj-$(CONFIG_MACH_OMAP_4430SDP) += board-4430sdp.o \ |
@@ -270,3 +268,5 @@ obj-$(CONFIG_ARCH_OMAP4) += hwspinlock.o | |||
270 | 268 | ||
271 | disp-$(CONFIG_OMAP2_DSS) := display.o | 269 | disp-$(CONFIG_OMAP2_DSS) := display.o |
272 | obj-y += $(disp-m) $(disp-y) | 270 | obj-y += $(disp-m) $(disp-y) |
271 | |||
272 | obj-y += common-board-devices.o | ||
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c index 1fa6bb896f41..d54969be0a54 100644 --- a/arch/arm/mach-omap2/board-2430sdp.c +++ b/arch/arm/mach-omap2/board-2430sdp.c | |||
@@ -41,6 +41,7 @@ | |||
41 | 41 | ||
42 | #include "mux.h" | 42 | #include "mux.h" |
43 | #include "hsmmc.h" | 43 | #include "hsmmc.h" |
44 | #include "common-board-devices.h" | ||
44 | 45 | ||
45 | #define SDP2430_CS0_BASE 0x04000000 | 46 | #define SDP2430_CS0_BASE 0x04000000 |
46 | #define SECONDARY_LCD_GPIO 147 | 47 | #define SECONDARY_LCD_GPIO 147 |
@@ -180,15 +181,6 @@ static struct twl4030_platform_data sdp2430_twldata = { | |||
180 | .vmmc1 = &sdp2430_vmmc1, | 181 | .vmmc1 = &sdp2430_vmmc1, |
181 | }; | 182 | }; |
182 | 183 | ||
183 | static struct i2c_board_info __initdata sdp2430_i2c_boardinfo[] = { | ||
184 | { | ||
185 | I2C_BOARD_INFO("twl4030", 0x48), | ||
186 | .flags = I2C_CLIENT_WAKE, | ||
187 | .irq = INT_24XX_SYS_NIRQ, | ||
188 | .platform_data = &sdp2430_twldata, | ||
189 | }, | ||
190 | }; | ||
191 | |||
192 | static struct i2c_board_info __initdata sdp2430_i2c1_boardinfo[] = { | 184 | static struct i2c_board_info __initdata sdp2430_i2c1_boardinfo[] = { |
193 | { | 185 | { |
194 | I2C_BOARD_INFO("isp1301_omap", 0x2D), | 186 | I2C_BOARD_INFO("isp1301_omap", 0x2D), |
@@ -201,8 +193,7 @@ static int __init omap2430_i2c_init(void) | |||
201 | { | 193 | { |
202 | omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo, | 194 | omap_register_i2c_bus(1, 100, sdp2430_i2c1_boardinfo, |
203 | ARRAY_SIZE(sdp2430_i2c1_boardinfo)); | 195 | ARRAY_SIZE(sdp2430_i2c1_boardinfo)); |
204 | omap_register_i2c_bus(2, 2600, sdp2430_i2c_boardinfo, | 196 | omap2_pmic_init("twl4030", &sdp2430_twldata); |
205 | ARRAY_SIZE(sdp2430_i2c_boardinfo)); | ||
206 | return 0; | 197 | return 0; |
207 | } | 198 | } |
208 | 199 | ||
@@ -217,11 +208,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = { | |||
217 | {} /* Terminator */ | 208 | {} /* Terminator */ |
218 | }; | 209 | }; |
219 | 210 | ||
220 | static struct omap_musb_board_data musb_board_data = { | ||
221 | .interface_type = MUSB_INTERFACE_ULPI, | ||
222 | .mode = MUSB_OTG, | ||
223 | .power = 100, | ||
224 | }; | ||
225 | static struct omap_usb_config sdp2430_usb_config __initdata = { | 211 | static struct omap_usb_config sdp2430_usb_config __initdata = { |
226 | .otg = 1, | 212 | .otg = 1, |
227 | #ifdef CONFIG_USB_GADGET_OMAP | 213 | #ifdef CONFIG_USB_GADGET_OMAP |
@@ -240,8 +226,6 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
240 | 226 | ||
241 | static void __init omap_2430sdp_init(void) | 227 | static void __init omap_2430sdp_init(void) |
242 | { | 228 | { |
243 | int ret; | ||
244 | |||
245 | omap2430_mux_init(board_mux, OMAP_PACKAGE_ZAC); | 229 | omap2430_mux_init(board_mux, OMAP_PACKAGE_ZAC); |
246 | 230 | ||
247 | omap_board_config = sdp2430_config; | 231 | omap_board_config = sdp2430_config; |
@@ -255,14 +239,13 @@ static void __init omap_2430sdp_init(void) | |||
255 | omap2_usbfs_init(&sdp2430_usb_config); | 239 | omap2_usbfs_init(&sdp2430_usb_config); |
256 | 240 | ||
257 | omap_mux_init_signal("usb0hs_stp", OMAP_PULL_ENA | OMAP_PULL_UP); | 241 | omap_mux_init_signal("usb0hs_stp", OMAP_PULL_ENA | OMAP_PULL_UP); |
258 | usb_musb_init(&musb_board_data); | 242 | usb_musb_init(NULL); |
259 | 243 | ||
260 | board_smc91x_init(); | 244 | board_smc91x_init(); |
261 | 245 | ||
262 | /* Turn off secondary LCD backlight */ | 246 | /* Turn off secondary LCD backlight */ |
263 | ret = gpio_request(SECONDARY_LCD_GPIO, "Secondary LCD backlight"); | 247 | gpio_request_one(SECONDARY_LCD_GPIO, GPIOF_OUT_INIT_LOW, |
264 | if (ret == 0) | 248 | "Secondary LCD backlight"); |
265 | gpio_direction_output(SECONDARY_LCD_GPIO, 0); | ||
266 | } | 249 | } |
267 | 250 | ||
268 | static void __init omap_2430sdp_map_io(void) | 251 | static void __init omap_2430sdp_map_io(void) |
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c index 23244cd0a5b6..ae2963a98041 100644 --- a/arch/arm/mach-omap2/board-3430sdp.c +++ b/arch/arm/mach-omap2/board-3430sdp.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/input.h> | 19 | #include <linux/input.h> |
20 | #include <linux/input/matrix_keypad.h> | 20 | #include <linux/input/matrix_keypad.h> |
21 | #include <linux/spi/spi.h> | 21 | #include <linux/spi/spi.h> |
22 | #include <linux/spi/ads7846.h> | ||
23 | #include <linux/i2c/twl.h> | 22 | #include <linux/i2c/twl.h> |
24 | #include <linux/regulator/machine.h> | 23 | #include <linux/regulator/machine.h> |
25 | #include <linux/io.h> | 24 | #include <linux/io.h> |
@@ -48,6 +47,7 @@ | |||
48 | #include "hsmmc.h" | 47 | #include "hsmmc.h" |
49 | #include "pm.h" | 48 | #include "pm.h" |
50 | #include "control.h" | 49 | #include "control.h" |
50 | #include "common-board-devices.h" | ||
51 | 51 | ||
52 | #define CONFIG_DISABLE_HFCLK 1 | 52 | #define CONFIG_DISABLE_HFCLK 1 |
53 | 53 | ||
@@ -59,24 +59,6 @@ | |||
59 | 59 | ||
60 | #define TWL4030_MSECURE_GPIO 22 | 60 | #define TWL4030_MSECURE_GPIO 22 |
61 | 61 | ||
62 | /* FIXME: These values need to be updated based on more profiling on 3430sdp*/ | ||
63 | static struct cpuidle_params omap3_cpuidle_params_table[] = { | ||
64 | /* C1 */ | ||
65 | {1, 2, 2, 5}, | ||
66 | /* C2 */ | ||
67 | {1, 10, 10, 30}, | ||
68 | /* C3 */ | ||
69 | {1, 50, 50, 300}, | ||
70 | /* C4 */ | ||
71 | {1, 1500, 1800, 4000}, | ||
72 | /* C5 */ | ||
73 | {1, 2500, 7500, 12000}, | ||
74 | /* C6 */ | ||
75 | {1, 3000, 8500, 15000}, | ||
76 | /* C7 */ | ||
77 | {1, 10000, 30000, 300000}, | ||
78 | }; | ||
79 | |||
80 | static uint32_t board_keymap[] = { | 62 | static uint32_t board_keymap[] = { |
81 | KEY(0, 0, KEY_LEFT), | 63 | KEY(0, 0, KEY_LEFT), |
82 | KEY(0, 1, KEY_RIGHT), | 64 | KEY(0, 1, KEY_RIGHT), |
@@ -123,63 +105,14 @@ static struct twl4030_keypad_data sdp3430_kp_data = { | |||
123 | .rep = 1, | 105 | .rep = 1, |
124 | }; | 106 | }; |
125 | 107 | ||
126 | static int ts_gpio; /* Needed for ads7846_get_pendown_state */ | ||
127 | |||
128 | /** | ||
129 | * @brief ads7846_dev_init : Requests & sets GPIO line for pen-irq | ||
130 | * | ||
131 | * @return - void. If request gpio fails then Flag KERN_ERR. | ||
132 | */ | ||
133 | static void ads7846_dev_init(void) | ||
134 | { | ||
135 | if (gpio_request(ts_gpio, "ADS7846 pendown") < 0) { | ||
136 | printk(KERN_ERR "can't get ads746 pen down GPIO\n"); | ||
137 | return; | ||
138 | } | ||
139 | |||
140 | gpio_direction_input(ts_gpio); | ||
141 | gpio_set_debounce(ts_gpio, 310); | ||
142 | } | ||
143 | |||
144 | static int ads7846_get_pendown_state(void) | ||
145 | { | ||
146 | return !gpio_get_value(ts_gpio); | ||
147 | } | ||
148 | |||
149 | static struct ads7846_platform_data tsc2046_config __initdata = { | ||
150 | .get_pendown_state = ads7846_get_pendown_state, | ||
151 | .keep_vref_on = 1, | ||
152 | .wakeup = true, | ||
153 | }; | ||
154 | |||
155 | |||
156 | static struct omap2_mcspi_device_config tsc2046_mcspi_config = { | ||
157 | .turbo_mode = 0, | ||
158 | .single_channel = 1, /* 0: slave, 1: master */ | ||
159 | }; | ||
160 | |||
161 | static struct spi_board_info sdp3430_spi_board_info[] __initdata = { | ||
162 | [0] = { | ||
163 | /* | ||
164 | * TSC2046 operates at a max freqency of 2MHz, so | ||
165 | * operate slightly below at 1.5MHz | ||
166 | */ | ||
167 | .modalias = "ads7846", | ||
168 | .bus_num = 1, | ||
169 | .chip_select = 0, | ||
170 | .max_speed_hz = 1500000, | ||
171 | .controller_data = &tsc2046_mcspi_config, | ||
172 | .irq = 0, | ||
173 | .platform_data = &tsc2046_config, | ||
174 | }, | ||
175 | }; | ||
176 | |||
177 | |||
178 | #define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 8 | 108 | #define SDP3430_LCD_PANEL_BACKLIGHT_GPIO 8 |
179 | #define SDP3430_LCD_PANEL_ENABLE_GPIO 5 | 109 | #define SDP3430_LCD_PANEL_ENABLE_GPIO 5 |
180 | 110 | ||
181 | static unsigned backlight_gpio; | 111 | static struct gpio sdp3430_dss_gpios[] __initdata = { |
182 | static unsigned enable_gpio; | 112 | {SDP3430_LCD_PANEL_ENABLE_GPIO, GPIOF_OUT_INIT_LOW, "LCD reset" }, |
113 | {SDP3430_LCD_PANEL_BACKLIGHT_GPIO, GPIOF_OUT_INIT_LOW, "LCD Backlight"}, | ||
114 | }; | ||
115 | |||
183 | static int lcd_enabled; | 116 | static int lcd_enabled; |
184 | static int dvi_enabled; | 117 | static int dvi_enabled; |
185 | 118 | ||
@@ -187,29 +120,11 @@ static void __init sdp3430_display_init(void) | |||
187 | { | 120 | { |
188 | int r; | 121 | int r; |
189 | 122 | ||
190 | enable_gpio = SDP3430_LCD_PANEL_ENABLE_GPIO; | 123 | r = gpio_request_array(sdp3430_dss_gpios, |
191 | backlight_gpio = SDP3430_LCD_PANEL_BACKLIGHT_GPIO; | 124 | ARRAY_SIZE(sdp3430_dss_gpios)); |
192 | 125 | if (r) | |
193 | r = gpio_request(enable_gpio, "LCD reset"); | 126 | printk(KERN_ERR "failed to get LCD control GPIOs\n"); |
194 | if (r) { | ||
195 | printk(KERN_ERR "failed to get LCD reset GPIO\n"); | ||
196 | goto err0; | ||
197 | } | ||
198 | |||
199 | r = gpio_request(backlight_gpio, "LCD Backlight"); | ||
200 | if (r) { | ||
201 | printk(KERN_ERR "failed to get LCD backlight GPIO\n"); | ||
202 | goto err1; | ||
203 | } | ||
204 | |||
205 | gpio_direction_output(enable_gpio, 0); | ||
206 | gpio_direction_output(backlight_gpio, 0); | ||
207 | 127 | ||
208 | return; | ||
209 | err1: | ||
210 | gpio_free(enable_gpio); | ||
211 | err0: | ||
212 | return; | ||
213 | } | 128 | } |
214 | 129 | ||
215 | static int sdp3430_panel_enable_lcd(struct omap_dss_device *dssdev) | 130 | static int sdp3430_panel_enable_lcd(struct omap_dss_device *dssdev) |
@@ -219,8 +134,8 @@ static int sdp3430_panel_enable_lcd(struct omap_dss_device *dssdev) | |||
219 | return -EINVAL; | 134 | return -EINVAL; |
220 | } | 135 | } |
221 | 136 | ||
222 | gpio_direction_output(enable_gpio, 1); | 137 | gpio_direction_output(SDP3430_LCD_PANEL_ENABLE_GPIO, 1); |
223 | gpio_direction_output(backlight_gpio, 1); | 138 | gpio_direction_output(SDP3430_LCD_PANEL_BACKLIGHT_GPIO, 1); |
224 | 139 | ||
225 | lcd_enabled = 1; | 140 | lcd_enabled = 1; |
226 | 141 | ||
@@ -231,8 +146,8 @@ static void sdp3430_panel_disable_lcd(struct omap_dss_device *dssdev) | |||
231 | { | 146 | { |
232 | lcd_enabled = 0; | 147 | lcd_enabled = 0; |
233 | 148 | ||
234 | gpio_direction_output(enable_gpio, 0); | 149 | gpio_direction_output(SDP3430_LCD_PANEL_ENABLE_GPIO, 0); |
235 | gpio_direction_output(backlight_gpio, 0); | 150 | gpio_direction_output(SDP3430_LCD_PANEL_BACKLIGHT_GPIO, 0); |
236 | } | 151 | } |
237 | 152 | ||
238 | static int sdp3430_panel_enable_dvi(struct omap_dss_device *dssdev) | 153 | static int sdp3430_panel_enable_dvi(struct omap_dss_device *dssdev) |
@@ -360,12 +275,10 @@ static int sdp3430_twl_gpio_setup(struct device *dev, | |||
360 | omap2_hsmmc_init(mmc); | 275 | omap2_hsmmc_init(mmc); |
361 | 276 | ||
362 | /* gpio + 7 is "sub_lcd_en_bkl" (output/PWM1) */ | 277 | /* gpio + 7 is "sub_lcd_en_bkl" (output/PWM1) */ |
363 | gpio_request(gpio + 7, "sub_lcd_en_bkl"); | 278 | gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "sub_lcd_en_bkl"); |
364 | gpio_direction_output(gpio + 7, 0); | ||
365 | 279 | ||
366 | /* gpio + 15 is "sub_lcd_nRST" (output) */ | 280 | /* gpio + 15 is "sub_lcd_nRST" (output) */ |
367 | gpio_request(gpio + 15, "sub_lcd_nRST"); | 281 | gpio_request_one(gpio + 15, GPIOF_OUT_INIT_LOW, "sub_lcd_nRST"); |
368 | gpio_direction_output(gpio + 15, 0); | ||
369 | 282 | ||
370 | return 0; | 283 | return 0; |
371 | } | 284 | } |
@@ -580,20 +493,10 @@ static struct twl4030_platform_data sdp3430_twldata = { | |||
580 | .vpll2 = &sdp3430_vpll2, | 493 | .vpll2 = &sdp3430_vpll2, |
581 | }; | 494 | }; |
582 | 495 | ||
583 | static struct i2c_board_info __initdata sdp3430_i2c_boardinfo[] = { | ||
584 | { | ||
585 | I2C_BOARD_INFO("twl4030", 0x48), | ||
586 | .flags = I2C_CLIENT_WAKE, | ||
587 | .irq = INT_34XX_SYS_NIRQ, | ||
588 | .platform_data = &sdp3430_twldata, | ||
589 | }, | ||
590 | }; | ||
591 | |||
592 | static int __init omap3430_i2c_init(void) | 496 | static int __init omap3430_i2c_init(void) |
593 | { | 497 | { |
594 | /* i2c1 for PMIC only */ | 498 | /* i2c1 for PMIC only */ |
595 | omap_register_i2c_bus(1, 2600, sdp3430_i2c_boardinfo, | 499 | omap3_pmic_init("twl4030", &sdp3430_twldata); |
596 | ARRAY_SIZE(sdp3430_i2c_boardinfo)); | ||
597 | /* i2c2 on camera connector (for sensor control) and optional isp1301 */ | 500 | /* i2c2 on camera connector (for sensor control) and optional isp1301 */ |
598 | omap_register_i2c_bus(2, 400, NULL, 0); | 501 | omap_register_i2c_bus(2, 400, NULL, 0); |
599 | /* i2c3 on display connector (for DVI, tfp410) */ | 502 | /* i2c3 on display connector (for DVI, tfp410) */ |
@@ -872,30 +775,22 @@ static struct flash_partitions sdp_flash_partitions[] = { | |||
872 | }, | 775 | }, |
873 | }; | 776 | }; |
874 | 777 | ||
875 | static struct omap_musb_board_data musb_board_data = { | ||
876 | .interface_type = MUSB_INTERFACE_ULPI, | ||
877 | .mode = MUSB_OTG, | ||
878 | .power = 100, | ||
879 | }; | ||
880 | |||
881 | static void __init omap_3430sdp_init(void) | 778 | static void __init omap_3430sdp_init(void) |
882 | { | 779 | { |
780 | int gpio_pendown; | ||
781 | |||
883 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); | 782 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); |
884 | omap_board_config = sdp3430_config; | 783 | omap_board_config = sdp3430_config; |
885 | omap_board_config_size = ARRAY_SIZE(sdp3430_config); | 784 | omap_board_config_size = ARRAY_SIZE(sdp3430_config); |
886 | omap3_pm_init_cpuidle(omap3_cpuidle_params_table); | ||
887 | omap3430_i2c_init(); | 785 | omap3430_i2c_init(); |
888 | omap_display_init(&sdp3430_dss_data); | 786 | omap_display_init(&sdp3430_dss_data); |
889 | if (omap_rev() > OMAP3430_REV_ES1_0) | 787 | if (omap_rev() > OMAP3430_REV_ES1_0) |
890 | ts_gpio = SDP3430_TS_GPIO_IRQ_SDPV2; | 788 | gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV2; |
891 | else | 789 | else |
892 | ts_gpio = SDP3430_TS_GPIO_IRQ_SDPV1; | 790 | gpio_pendown = SDP3430_TS_GPIO_IRQ_SDPV1; |
893 | sdp3430_spi_board_info[0].irq = gpio_to_irq(ts_gpio); | 791 | omap_ads7846_init(1, gpio_pendown, 310, NULL); |
894 | spi_register_board_info(sdp3430_spi_board_info, | ||
895 | ARRAY_SIZE(sdp3430_spi_board_info)); | ||
896 | ads7846_dev_init(); | ||
897 | board_serial_init(); | 792 | board_serial_init(); |
898 | usb_musb_init(&musb_board_data); | 793 | usb_musb_init(NULL); |
899 | board_smc91x_init(); | 794 | board_smc91x_init(); |
900 | board_flash_init(sdp_flash_partitions, chip_sel_3430, 0); | 795 | board_flash_init(sdp_flash_partitions, chip_sel_3430, 0); |
901 | sdp3430_display_init(); | 796 | sdp3430_display_init(); |
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c index 93edd7fcf451..73fa90bb6953 100644 --- a/arch/arm/mach-omap2/board-4430sdp.c +++ b/arch/arm/mach-omap2/board-4430sdp.c | |||
@@ -42,6 +42,7 @@ | |||
42 | #include "hsmmc.h" | 42 | #include "hsmmc.h" |
43 | #include "timer-gp.h" | 43 | #include "timer-gp.h" |
44 | #include "control.h" | 44 | #include "control.h" |
45 | #include "common-board-devices.h" | ||
45 | 46 | ||
46 | #define ETH_KS8851_IRQ 34 | 47 | #define ETH_KS8851_IRQ 34 |
47 | #define ETH_KS8851_POWER_ON 48 | 48 | #define ETH_KS8851_POWER_ON 48 |
@@ -251,58 +252,22 @@ static struct spi_board_info sdp4430_spi_board_info[] __initdata = { | |||
251 | }, | 252 | }, |
252 | }; | 253 | }; |
253 | 254 | ||
255 | static struct gpio sdp4430_eth_gpios[] __initdata = { | ||
256 | { ETH_KS8851_POWER_ON, GPIOF_OUT_INIT_HIGH, "eth_power" }, | ||
257 | { ETH_KS8851_QUART, GPIOF_OUT_INIT_HIGH, "quart" }, | ||
258 | { ETH_KS8851_IRQ, GPIOF_IN, "eth_irq" }, | ||
259 | }; | ||
260 | |||
254 | static int omap_ethernet_init(void) | 261 | static int omap_ethernet_init(void) |
255 | { | 262 | { |
256 | int status; | 263 | int status; |
257 | 264 | ||
258 | /* Request of GPIO lines */ | 265 | /* Request of GPIO lines */ |
266 | status = gpio_request_array(sdp4430_eth_gpios, | ||
267 | ARRAY_SIZE(sdp4430_eth_gpios)); | ||
268 | if (status) | ||
269 | pr_err("Cannot request ETH GPIOs\n"); | ||
259 | 270 | ||
260 | status = gpio_request(ETH_KS8851_POWER_ON, "eth_power"); | ||
261 | if (status) { | ||
262 | pr_err("Cannot request GPIO %d\n", ETH_KS8851_POWER_ON); | ||
263 | return status; | ||
264 | } | ||
265 | |||
266 | status = gpio_request(ETH_KS8851_QUART, "quart"); | ||
267 | if (status) { | ||
268 | pr_err("Cannot request GPIO %d\n", ETH_KS8851_QUART); | ||
269 | goto error1; | ||
270 | } | ||
271 | |||
272 | status = gpio_request(ETH_KS8851_IRQ, "eth_irq"); | ||
273 | if (status) { | ||
274 | pr_err("Cannot request GPIO %d\n", ETH_KS8851_IRQ); | ||
275 | goto error2; | ||
276 | } | ||
277 | |||
278 | /* Configuration of requested GPIO lines */ | ||
279 | |||
280 | status = gpio_direction_output(ETH_KS8851_POWER_ON, 1); | ||
281 | if (status) { | ||
282 | pr_err("Cannot set output GPIO %d\n", ETH_KS8851_IRQ); | ||
283 | goto error3; | ||
284 | } | ||
285 | |||
286 | status = gpio_direction_output(ETH_KS8851_QUART, 1); | ||
287 | if (status) { | ||
288 | pr_err("Cannot set output GPIO %d\n", ETH_KS8851_QUART); | ||
289 | goto error3; | ||
290 | } | ||
291 | |||
292 | status = gpio_direction_input(ETH_KS8851_IRQ); | ||
293 | if (status) { | ||
294 | pr_err("Cannot set input GPIO %d\n", ETH_KS8851_IRQ); | ||
295 | goto error3; | ||
296 | } | ||
297 | |||
298 | return 0; | ||
299 | |||
300 | error3: | ||
301 | gpio_free(ETH_KS8851_IRQ); | ||
302 | error2: | ||
303 | gpio_free(ETH_KS8851_QUART); | ||
304 | error1: | ||
305 | gpio_free(ETH_KS8851_POWER_ON); | ||
306 | return status; | 271 | return status; |
307 | } | 272 | } |
308 | 273 | ||
@@ -575,14 +540,6 @@ static struct twl4030_platform_data sdp4430_twldata = { | |||
575 | .usb = &omap4_usbphy_data | 540 | .usb = &omap4_usbphy_data |
576 | }; | 541 | }; |
577 | 542 | ||
578 | static struct i2c_board_info __initdata sdp4430_i2c_boardinfo[] = { | ||
579 | { | ||
580 | I2C_BOARD_INFO("twl6030", 0x48), | ||
581 | .flags = I2C_CLIENT_WAKE, | ||
582 | .irq = OMAP44XX_IRQ_SYS_1N, | ||
583 | .platform_data = &sdp4430_twldata, | ||
584 | }, | ||
585 | }; | ||
586 | static struct i2c_board_info __initdata sdp4430_i2c_3_boardinfo[] = { | 543 | static struct i2c_board_info __initdata sdp4430_i2c_3_boardinfo[] = { |
587 | { | 544 | { |
588 | I2C_BOARD_INFO("tmp105", 0x48), | 545 | I2C_BOARD_INFO("tmp105", 0x48), |
@@ -598,12 +555,7 @@ static struct i2c_board_info __initdata sdp4430_i2c_4_boardinfo[] = { | |||
598 | }; | 555 | }; |
599 | static int __init omap4_i2c_init(void) | 556 | static int __init omap4_i2c_init(void) |
600 | { | 557 | { |
601 | /* | 558 | omap4_pmic_init("twl6030", &sdp4430_twldata); |
602 | * Phoenix Audio IC needs I2C1 to | ||
603 | * start with 400 KHz or less | ||
604 | */ | ||
605 | omap_register_i2c_bus(1, 400, sdp4430_i2c_boardinfo, | ||
606 | ARRAY_SIZE(sdp4430_i2c_boardinfo)); | ||
607 | omap_register_i2c_bus(2, 400, NULL, 0); | 559 | omap_register_i2c_bus(2, 400, NULL, 0); |
608 | omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo, | 560 | omap_register_i2c_bus(3, 400, sdp4430_i2c_3_boardinfo, |
609 | ARRAY_SIZE(sdp4430_i2c_3_boardinfo)); | 561 | ARRAY_SIZE(sdp4430_i2c_3_boardinfo)); |
@@ -614,21 +566,13 @@ static int __init omap4_i2c_init(void) | |||
614 | 566 | ||
615 | static void __init omap_sfh7741prox_init(void) | 567 | static void __init omap_sfh7741prox_init(void) |
616 | { | 568 | { |
617 | int error; | 569 | int error; |
618 | 570 | ||
619 | error = gpio_request(OMAP4_SFH7741_ENABLE_GPIO, "sfh7741"); | 571 | error = gpio_request_one(OMAP4_SFH7741_ENABLE_GPIO, |
620 | if (error < 0) { | 572 | GPIOF_OUT_INIT_LOW, "sfh7741"); |
573 | if (error < 0) | ||
621 | pr_err("%s:failed to request GPIO %d, error %d\n", | 574 | pr_err("%s:failed to request GPIO %d, error %d\n", |
622 | __func__, OMAP4_SFH7741_ENABLE_GPIO, error); | 575 | __func__, OMAP4_SFH7741_ENABLE_GPIO, error); |
623 | return; | ||
624 | } | ||
625 | |||
626 | error = gpio_direction_output(OMAP4_SFH7741_ENABLE_GPIO , 0); | ||
627 | if (error < 0) { | ||
628 | pr_err("%s: GPIO configuration failed: GPIO %d,error %d\n", | ||
629 | __func__, OMAP4_SFH7741_ENABLE_GPIO, error); | ||
630 | gpio_free(OMAP4_SFH7741_ENABLE_GPIO); | ||
631 | } | ||
632 | } | 576 | } |
633 | 577 | ||
634 | static void sdp4430_hdmi_mux_init(void) | 578 | static void sdp4430_hdmi_mux_init(void) |
@@ -645,27 +589,19 @@ static void sdp4430_hdmi_mux_init(void) | |||
645 | OMAP_PIN_INPUT_PULLUP); | 589 | OMAP_PIN_INPUT_PULLUP); |
646 | } | 590 | } |
647 | 591 | ||
592 | static struct gpio sdp4430_hdmi_gpios[] = { | ||
593 | { HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" }, | ||
594 | { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" }, | ||
595 | }; | ||
596 | |||
648 | static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev) | 597 | static int sdp4430_panel_enable_hdmi(struct omap_dss_device *dssdev) |
649 | { | 598 | { |
650 | int status; | 599 | int status; |
651 | 600 | ||
652 | status = gpio_request_one(HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, | 601 | status = gpio_request_array(sdp4430_hdmi_gpios, |
653 | "hdmi_gpio_hpd"); | 602 | ARRAY_SIZE(sdp4430_hdmi_gpios)); |
654 | if (status) { | 603 | if (status) |
655 | pr_err("Cannot request GPIO %d\n", HDMI_GPIO_HPD); | 604 | pr_err("%s: Cannot request HDMI GPIOs\n", __func__); |
656 | return status; | ||
657 | } | ||
658 | status = gpio_request_one(HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, | ||
659 | "hdmi_gpio_ls_oe"); | ||
660 | if (status) { | ||
661 | pr_err("Cannot request GPIO %d\n", HDMI_GPIO_LS_OE); | ||
662 | goto error1; | ||
663 | } | ||
664 | |||
665 | return 0; | ||
666 | |||
667 | error1: | ||
668 | gpio_free(HDMI_GPIO_HPD); | ||
669 | 605 | ||
670 | return status; | 606 | return status; |
671 | } | 607 | } |
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c index a890d244fec6..5e438a77cd72 100644 --- a/arch/arm/mach-omap2/board-am3517crane.c +++ b/arch/arm/mach-omap2/board-am3517crane.c | |||
@@ -89,19 +89,13 @@ static void __init am3517_crane_init(void) | |||
89 | return; | 89 | return; |
90 | } | 90 | } |
91 | 91 | ||
92 | ret = gpio_request(GPIO_USB_POWER, "usb_ehci_enable"); | 92 | ret = gpio_request_one(GPIO_USB_POWER, GPIOF_OUT_INIT_HIGH, |
93 | "usb_ehci_enable"); | ||
93 | if (ret < 0) { | 94 | if (ret < 0) { |
94 | pr_err("Can not request GPIO %d\n", GPIO_USB_POWER); | 95 | pr_err("Can not request GPIO %d\n", GPIO_USB_POWER); |
95 | return; | 96 | return; |
96 | } | 97 | } |
97 | 98 | ||
98 | ret = gpio_direction_output(GPIO_USB_POWER, 1); | ||
99 | if (ret < 0) { | ||
100 | gpio_free(GPIO_USB_POWER); | ||
101 | pr_err("Unable to initialize EHCI power\n"); | ||
102 | return; | ||
103 | } | ||
104 | |||
105 | usbhs_init(&usbhs_bdata); | 99 | usbhs_init(&usbhs_bdata); |
106 | } | 100 | } |
107 | 101 | ||
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c index ff8c59be36e5..63af4171c043 100644 --- a/arch/arm/mach-omap2/board-am3517evm.c +++ b/arch/arm/mach-omap2/board-am3517evm.c | |||
@@ -174,19 +174,14 @@ static void __init am3517_evm_rtc_init(void) | |||
174 | int r; | 174 | int r; |
175 | 175 | ||
176 | omap_mux_init_gpio(GPIO_RTCS35390A_IRQ, OMAP_PIN_INPUT_PULLUP); | 176 | omap_mux_init_gpio(GPIO_RTCS35390A_IRQ, OMAP_PIN_INPUT_PULLUP); |
177 | r = gpio_request(GPIO_RTCS35390A_IRQ, "rtcs35390a-irq"); | 177 | |
178 | r = gpio_request_one(GPIO_RTCS35390A_IRQ, GPIOF_IN, "rtcs35390a-irq"); | ||
178 | if (r < 0) { | 179 | if (r < 0) { |
179 | printk(KERN_WARNING "failed to request GPIO#%d\n", | 180 | printk(KERN_WARNING "failed to request GPIO#%d\n", |
180 | GPIO_RTCS35390A_IRQ); | 181 | GPIO_RTCS35390A_IRQ); |
181 | return; | 182 | return; |
182 | } | 183 | } |
183 | r = gpio_direction_input(GPIO_RTCS35390A_IRQ); | 184 | |
184 | if (r < 0) { | ||
185 | printk(KERN_WARNING "GPIO#%d cannot be configured as input\n", | ||
186 | GPIO_RTCS35390A_IRQ); | ||
187 | gpio_free(GPIO_RTCS35390A_IRQ); | ||
188 | return; | ||
189 | } | ||
190 | am3517evm_i2c1_boardinfo[0].irq = gpio_to_irq(GPIO_RTCS35390A_IRQ); | 185 | am3517evm_i2c1_boardinfo[0].irq = gpio_to_irq(GPIO_RTCS35390A_IRQ); |
191 | } | 186 | } |
192 | 187 | ||
@@ -242,6 +237,15 @@ static int dvi_enabled; | |||
242 | 237 | ||
243 | #if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \ | 238 | #if defined(CONFIG_PANEL_SHARP_LQ043T1DG01) || \ |
244 | defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE) | 239 | defined(CONFIG_PANEL_SHARP_LQ043T1DG01_MODULE) |
240 | static struct gpio am3517_evm_dss_gpios[] __initdata = { | ||
241 | /* GPIO 182 = LCD Backlight Power */ | ||
242 | { LCD_PANEL_BKLIGHT_PWR, GPIOF_OUT_INIT_HIGH, "lcd_backlight_pwr" }, | ||
243 | /* GPIO 181 = LCD Panel PWM */ | ||
244 | { LCD_PANEL_PWM, GPIOF_OUT_INIT_HIGH, "lcd bl enable" }, | ||
245 | /* GPIO 176 = LCD Panel Power enable pin */ | ||
246 | { LCD_PANEL_PWR, GPIOF_OUT_INIT_HIGH, "dvi enable" }, | ||
247 | }; | ||
248 | |||
245 | static void __init am3517_evm_display_init(void) | 249 | static void __init am3517_evm_display_init(void) |
246 | { | 250 | { |
247 | int r; | 251 | int r; |
@@ -249,41 +253,15 @@ static void __init am3517_evm_display_init(void) | |||
249 | omap_mux_init_gpio(LCD_PANEL_PWR, OMAP_PIN_INPUT_PULLUP); | 253 | omap_mux_init_gpio(LCD_PANEL_PWR, OMAP_PIN_INPUT_PULLUP); |
250 | omap_mux_init_gpio(LCD_PANEL_BKLIGHT_PWR, OMAP_PIN_INPUT_PULLDOWN); | 254 | omap_mux_init_gpio(LCD_PANEL_BKLIGHT_PWR, OMAP_PIN_INPUT_PULLDOWN); |
251 | omap_mux_init_gpio(LCD_PANEL_PWM, OMAP_PIN_INPUT_PULLDOWN); | 255 | omap_mux_init_gpio(LCD_PANEL_PWM, OMAP_PIN_INPUT_PULLDOWN); |
252 | /* | 256 | |
253 | * Enable GPIO 182 = LCD Backlight Power | 257 | r = gpio_request_array(am3517_evm_dss_gpios, |
254 | */ | 258 | ARRAY_SIZE(am3517_evm_dss_gpios)); |
255 | r = gpio_request(LCD_PANEL_BKLIGHT_PWR, "lcd_backlight_pwr"); | ||
256 | if (r) { | 259 | if (r) { |
257 | printk(KERN_ERR "failed to get lcd_backlight_pwr\n"); | 260 | printk(KERN_ERR "failed to get DSS panel control GPIOs\n"); |
258 | return; | 261 | return; |
259 | } | 262 | } |
260 | gpio_direction_output(LCD_PANEL_BKLIGHT_PWR, 1); | ||
261 | /* | ||
262 | * Enable GPIO 181 = LCD Panel PWM | ||
263 | */ | ||
264 | r = gpio_request(LCD_PANEL_PWM, "lcd_pwm"); | ||
265 | if (r) { | ||
266 | printk(KERN_ERR "failed to get lcd_pwm\n"); | ||
267 | goto err_1; | ||
268 | } | ||
269 | gpio_direction_output(LCD_PANEL_PWM, 1); | ||
270 | /* | ||
271 | * Enable GPIO 176 = LCD Panel Power enable pin | ||
272 | */ | ||
273 | r = gpio_request(LCD_PANEL_PWR, "lcd_panel_pwr"); | ||
274 | if (r) { | ||
275 | printk(KERN_ERR "failed to get lcd_panel_pwr\n"); | ||
276 | goto err_2; | ||
277 | } | ||
278 | gpio_direction_output(LCD_PANEL_PWR, 1); | ||
279 | 263 | ||
280 | printk(KERN_INFO "Display initialized successfully\n"); | 264 | printk(KERN_INFO "Display initialized successfully\n"); |
281 | return; | ||
282 | |||
283 | err_2: | ||
284 | gpio_free(LCD_PANEL_PWM); | ||
285 | err_1: | ||
286 | gpio_free(LCD_PANEL_BKLIGHT_PWR); | ||
287 | } | 265 | } |
288 | #else | 266 | #else |
289 | static void __init am3517_evm_display_init(void) {} | 267 | static void __init am3517_evm_display_init(void) {} |
@@ -396,7 +374,7 @@ static struct omap_musb_board_data musb_board_data = { | |||
396 | .power = 500, | 374 | .power = 500, |
397 | .set_phy_power = am35x_musb_phy_power, | 375 | .set_phy_power = am35x_musb_phy_power, |
398 | .clear_irq = am35x_musb_clear_irq, | 376 | .clear_irq = am35x_musb_clear_irq, |
399 | .set_mode = am35x_musb_set_mode, | 377 | .set_mode = am35x_set_mode, |
400 | .reset = am35x_musb_reset, | 378 | .reset = am35x_musb_reset, |
401 | }; | 379 | }; |
402 | 380 | ||
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c index f4f8374a0298..f3beb8eeef77 100644 --- a/arch/arm/mach-omap2/board-apollon.c +++ b/arch/arm/mach-omap2/board-apollon.c | |||
@@ -202,6 +202,7 @@ static inline void __init apollon_init_smc91x(void) | |||
202 | unsigned int rate; | 202 | unsigned int rate; |
203 | struct clk *gpmc_fck; | 203 | struct clk *gpmc_fck; |
204 | int eth_cs; | 204 | int eth_cs; |
205 | int err; | ||
205 | 206 | ||
206 | gpmc_fck = clk_get(NULL, "gpmc_fck"); /* Always on ENABLE_ON_INIT */ | 207 | gpmc_fck = clk_get(NULL, "gpmc_fck"); /* Always on ENABLE_ON_INIT */ |
207 | if (IS_ERR(gpmc_fck)) { | 208 | if (IS_ERR(gpmc_fck)) { |
@@ -245,15 +246,13 @@ static inline void __init apollon_init_smc91x(void) | |||
245 | apollon_smc91x_resources[0].end = base + 0x30f; | 246 | apollon_smc91x_resources[0].end = base + 0x30f; |
246 | udelay(100); | 247 | udelay(100); |
247 | 248 | ||
248 | omap_mux_init_gpio(74, 0); | 249 | omap_mux_init_gpio(APOLLON_ETHR_GPIO_IRQ, 0); |
249 | if (gpio_request(APOLLON_ETHR_GPIO_IRQ, "SMC91x irq") < 0) { | 250 | err = gpio_request_one(APOLLON_ETHR_GPIO_IRQ, GPIOF_IN, "SMC91x irq"); |
251 | if (err) { | ||
250 | printk(KERN_ERR "Failed to request GPIO%d for smc91x IRQ\n", | 252 | printk(KERN_ERR "Failed to request GPIO%d for smc91x IRQ\n", |
251 | APOLLON_ETHR_GPIO_IRQ); | 253 | APOLLON_ETHR_GPIO_IRQ); |
252 | gpmc_cs_free(APOLLON_ETH_CS); | 254 | gpmc_cs_free(APOLLON_ETH_CS); |
253 | goto out; | ||
254 | } | 255 | } |
255 | gpio_direction_input(APOLLON_ETHR_GPIO_IRQ); | ||
256 | |||
257 | out: | 256 | out: |
258 | clk_disable(gpmc_fck); | 257 | clk_disable(gpmc_fck); |
259 | clk_put(gpmc_fck); | 258 | clk_put(gpmc_fck); |
@@ -280,20 +279,19 @@ static void __init omap_apollon_init_early(void) | |||
280 | omap2_init_common_devices(NULL, NULL); | 279 | omap2_init_common_devices(NULL, NULL); |
281 | } | 280 | } |
282 | 281 | ||
282 | static struct gpio apollon_gpio_leds[] __initdata = { | ||
283 | { LED0_GPIO13, GPIOF_OUT_INIT_LOW, "LED0" }, /* LED0 - AA10 */ | ||
284 | { LED1_GPIO14, GPIOF_OUT_INIT_LOW, "LED1" }, /* LED1 - AA6 */ | ||
285 | { LED2_GPIO15, GPIOF_OUT_INIT_LOW, "LED2" }, /* LED2 - AA4 */ | ||
286 | }; | ||
287 | |||
283 | static void __init apollon_led_init(void) | 288 | static void __init apollon_led_init(void) |
284 | { | 289 | { |
285 | /* LED0 - AA10 */ | ||
286 | omap_mux_init_signal("vlynq_clk.gpio_13", 0); | 290 | omap_mux_init_signal("vlynq_clk.gpio_13", 0); |
287 | gpio_request(LED0_GPIO13, "LED0"); | ||
288 | gpio_direction_output(LED0_GPIO13, 0); | ||
289 | /* LED1 - AA6 */ | ||
290 | omap_mux_init_signal("vlynq_rx1.gpio_14", 0); | 291 | omap_mux_init_signal("vlynq_rx1.gpio_14", 0); |
291 | gpio_request(LED1_GPIO14, "LED1"); | ||
292 | gpio_direction_output(LED1_GPIO14, 0); | ||
293 | /* LED2 - AA4 */ | ||
294 | omap_mux_init_signal("vlynq_rx0.gpio_15", 0); | 292 | omap_mux_init_signal("vlynq_rx0.gpio_15", 0); |
295 | gpio_request(LED2_GPIO15, "LED2"); | 293 | |
296 | gpio_direction_output(LED2_GPIO15, 0); | 294 | gpio_request_array(apollon_gpio_leds, ARRAY_SIZE(apollon_gpio_leds)); |
297 | } | 295 | } |
298 | 296 | ||
299 | static void __init apollon_usb_init(void) | 297 | static void __init apollon_usb_init(void) |
@@ -301,8 +299,7 @@ static void __init apollon_usb_init(void) | |||
301 | /* USB device */ | 299 | /* USB device */ |
302 | /* DEVICE_SUSPEND */ | 300 | /* DEVICE_SUSPEND */ |
303 | omap_mux_init_signal("mcbsp2_clkx.gpio_12", 0); | 301 | omap_mux_init_signal("mcbsp2_clkx.gpio_12", 0); |
304 | gpio_request(12, "USB suspend"); | 302 | gpio_request_one(12, GPIOF_OUT_INIT_LOW, "USB suspend"); |
305 | gpio_direction_output(12, 0); | ||
306 | omap2_usbfs_init(&apollon_usb_config); | 303 | omap2_usbfs_init(&apollon_usb_config); |
307 | } | 304 | } |
308 | 305 | ||
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c index 9340f6a06f4a..c63115bc1536 100644 --- a/arch/arm/mach-omap2/board-cm-t35.c +++ b/arch/arm/mach-omap2/board-cm-t35.c | |||
@@ -54,6 +54,7 @@ | |||
54 | #include "mux.h" | 54 | #include "mux.h" |
55 | #include "sdram-micron-mt46h32m32lf-6.h" | 55 | #include "sdram-micron-mt46h32m32lf-6.h" |
56 | #include "hsmmc.h" | 56 | #include "hsmmc.h" |
57 | #include "common-board-devices.h" | ||
57 | 58 | ||
58 | #define CM_T35_GPIO_PENDOWN 57 | 59 | #define CM_T35_GPIO_PENDOWN 57 |
59 | 60 | ||
@@ -66,86 +67,28 @@ | |||
66 | 67 | ||
67 | #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) | 68 | #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) |
68 | #include <linux/smsc911x.h> | 69 | #include <linux/smsc911x.h> |
70 | #include <plat/gpmc-smsc911x.h> | ||
69 | 71 | ||
70 | static struct smsc911x_platform_config cm_t35_smsc911x_config = { | 72 | static struct omap_smsc911x_platform_data cm_t35_smsc911x_cfg = { |
71 | .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, | ||
72 | .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, | ||
73 | .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, | ||
74 | .phy_interface = PHY_INTERFACE_MODE_MII, | ||
75 | }; | ||
76 | |||
77 | static struct resource cm_t35_smsc911x_resources[] = { | ||
78 | { | ||
79 | .flags = IORESOURCE_MEM, | ||
80 | }, | ||
81 | { | ||
82 | .start = OMAP_GPIO_IRQ(CM_T35_SMSC911X_GPIO), | ||
83 | .end = OMAP_GPIO_IRQ(CM_T35_SMSC911X_GPIO), | ||
84 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, | ||
85 | }, | ||
86 | }; | ||
87 | |||
88 | static struct platform_device cm_t35_smsc911x_device = { | ||
89 | .name = "smsc911x", | ||
90 | .id = 0, | 73 | .id = 0, |
91 | .num_resources = ARRAY_SIZE(cm_t35_smsc911x_resources), | 74 | .cs = CM_T35_SMSC911X_CS, |
92 | .resource = cm_t35_smsc911x_resources, | 75 | .gpio_irq = CM_T35_SMSC911X_GPIO, |
93 | .dev = { | 76 | .gpio_reset = -EINVAL, |
94 | .platform_data = &cm_t35_smsc911x_config, | 77 | .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, |
95 | }, | ||
96 | }; | ||
97 | |||
98 | static struct resource sb_t35_smsc911x_resources[] = { | ||
99 | { | ||
100 | .flags = IORESOURCE_MEM, | ||
101 | }, | ||
102 | { | ||
103 | .start = OMAP_GPIO_IRQ(SB_T35_SMSC911X_GPIO), | ||
104 | .end = OMAP_GPIO_IRQ(SB_T35_SMSC911X_GPIO), | ||
105 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, | ||
106 | }, | ||
107 | }; | 78 | }; |
108 | 79 | ||
109 | static struct platform_device sb_t35_smsc911x_device = { | 80 | static struct omap_smsc911x_platform_data sb_t35_smsc911x_cfg = { |
110 | .name = "smsc911x", | ||
111 | .id = 1, | 81 | .id = 1, |
112 | .num_resources = ARRAY_SIZE(sb_t35_smsc911x_resources), | 82 | .cs = SB_T35_SMSC911X_CS, |
113 | .resource = sb_t35_smsc911x_resources, | 83 | .gpio_irq = SB_T35_SMSC911X_GPIO, |
114 | .dev = { | 84 | .gpio_reset = -EINVAL, |
115 | .platform_data = &cm_t35_smsc911x_config, | 85 | .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, |
116 | }, | ||
117 | }; | 86 | }; |
118 | 87 | ||
119 | static void __init cm_t35_init_smsc911x(struct platform_device *dev, | ||
120 | int cs, int irq_gpio) | ||
121 | { | ||
122 | unsigned long cs_mem_base; | ||
123 | |||
124 | if (gpmc_cs_request(cs, SZ_16M, &cs_mem_base) < 0) { | ||
125 | pr_err("CM-T35: Failed request for GPMC mem for smsc911x\n"); | ||
126 | return; | ||
127 | } | ||
128 | |||
129 | dev->resource[0].start = cs_mem_base + 0x0; | ||
130 | dev->resource[0].end = cs_mem_base + 0xff; | ||
131 | |||
132 | if ((gpio_request(irq_gpio, "ETH IRQ") == 0) && | ||
133 | (gpio_direction_input(irq_gpio) == 0)) { | ||
134 | gpio_export(irq_gpio, 0); | ||
135 | } else { | ||
136 | pr_err("CM-T35: could not obtain gpio for SMSC911X IRQ\n"); | ||
137 | return; | ||
138 | } | ||
139 | |||
140 | platform_device_register(dev); | ||
141 | } | ||
142 | |||
143 | static void __init cm_t35_init_ethernet(void) | 88 | static void __init cm_t35_init_ethernet(void) |
144 | { | 89 | { |
145 | cm_t35_init_smsc911x(&cm_t35_smsc911x_device, | 90 | gpmc_smsc911x_init(&cm_t35_smsc911x_cfg); |
146 | CM_T35_SMSC911X_CS, CM_T35_SMSC911X_GPIO); | 91 | gpmc_smsc911x_init(&sb_t35_smsc911x_cfg); |
147 | cm_t35_init_smsc911x(&sb_t35_smsc911x_device, | ||
148 | SB_T35_SMSC911X_CS, SB_T35_SMSC911X_GPIO); | ||
149 | } | 92 | } |
150 | #else | 93 | #else |
151 | static inline void __init cm_t35_init_ethernet(void) { return; } | 94 | static inline void __init cm_t35_init_ethernet(void) { return; } |
@@ -235,69 +178,10 @@ static void __init cm_t35_init_nand(void) | |||
235 | static inline void cm_t35_init_nand(void) {} | 178 | static inline void cm_t35_init_nand(void) {} |
236 | #endif | 179 | #endif |
237 | 180 | ||
238 | #if defined(CONFIG_TOUCHSCREEN_ADS7846) || \ | ||
239 | defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) | ||
240 | #include <linux/spi/ads7846.h> | ||
241 | |||
242 | static struct omap2_mcspi_device_config ads7846_mcspi_config = { | ||
243 | .turbo_mode = 0, | ||
244 | .single_channel = 1, /* 0: slave, 1: master */ | ||
245 | }; | ||
246 | |||
247 | static int ads7846_get_pendown_state(void) | ||
248 | { | ||
249 | return !gpio_get_value(CM_T35_GPIO_PENDOWN); | ||
250 | } | ||
251 | |||
252 | static struct ads7846_platform_data ads7846_config = { | ||
253 | .x_max = 0x0fff, | ||
254 | .y_max = 0x0fff, | ||
255 | .x_plate_ohms = 180, | ||
256 | .pressure_max = 255, | ||
257 | .debounce_max = 10, | ||
258 | .debounce_tol = 3, | ||
259 | .debounce_rep = 1, | ||
260 | .get_pendown_state = ads7846_get_pendown_state, | ||
261 | .keep_vref_on = 1, | ||
262 | }; | ||
263 | |||
264 | static struct spi_board_info cm_t35_spi_board_info[] __initdata = { | ||
265 | { | ||
266 | .modalias = "ads7846", | ||
267 | .bus_num = 1, | ||
268 | .chip_select = 0, | ||
269 | .max_speed_hz = 1500000, | ||
270 | .controller_data = &ads7846_mcspi_config, | ||
271 | .irq = OMAP_GPIO_IRQ(CM_T35_GPIO_PENDOWN), | ||
272 | .platform_data = &ads7846_config, | ||
273 | }, | ||
274 | }; | ||
275 | |||
276 | static void __init cm_t35_init_ads7846(void) | ||
277 | { | ||
278 | if ((gpio_request(CM_T35_GPIO_PENDOWN, "ADS7846_PENDOWN") == 0) && | ||
279 | (gpio_direction_input(CM_T35_GPIO_PENDOWN) == 0)) { | ||
280 | gpio_export(CM_T35_GPIO_PENDOWN, 0); | ||
281 | } else { | ||
282 | pr_err("CM-T35: could not obtain gpio for ADS7846_PENDOWN\n"); | ||
283 | return; | ||
284 | } | ||
285 | |||
286 | spi_register_board_info(cm_t35_spi_board_info, | ||
287 | ARRAY_SIZE(cm_t35_spi_board_info)); | ||
288 | } | ||
289 | #else | ||
290 | static inline void cm_t35_init_ads7846(void) {} | ||
291 | #endif | ||
292 | |||
293 | #define CM_T35_LCD_EN_GPIO 157 | 181 | #define CM_T35_LCD_EN_GPIO 157 |
294 | #define CM_T35_LCD_BL_GPIO 58 | 182 | #define CM_T35_LCD_BL_GPIO 58 |
295 | #define CM_T35_DVI_EN_GPIO 54 | 183 | #define CM_T35_DVI_EN_GPIO 54 |
296 | 184 | ||
297 | static int lcd_bl_gpio; | ||
298 | static int lcd_en_gpio; | ||
299 | static int dvi_en_gpio; | ||
300 | |||
301 | static int lcd_enabled; | 185 | static int lcd_enabled; |
302 | static int dvi_enabled; | 186 | static int dvi_enabled; |
303 | 187 | ||
@@ -308,8 +192,8 @@ static int cm_t35_panel_enable_lcd(struct omap_dss_device *dssdev) | |||
308 | return -EINVAL; | 192 | return -EINVAL; |
309 | } | 193 | } |
310 | 194 | ||
311 | gpio_set_value(lcd_en_gpio, 1); | 195 | gpio_set_value(CM_T35_LCD_EN_GPIO, 1); |
312 | gpio_set_value(lcd_bl_gpio, 1); | 196 | gpio_set_value(CM_T35_LCD_BL_GPIO, 1); |
313 | 197 | ||
314 | lcd_enabled = 1; | 198 | lcd_enabled = 1; |
315 | 199 | ||
@@ -320,8 +204,8 @@ static void cm_t35_panel_disable_lcd(struct omap_dss_device *dssdev) | |||
320 | { | 204 | { |
321 | lcd_enabled = 0; | 205 | lcd_enabled = 0; |
322 | 206 | ||
323 | gpio_set_value(lcd_bl_gpio, 0); | 207 | gpio_set_value(CM_T35_LCD_BL_GPIO, 0); |
324 | gpio_set_value(lcd_en_gpio, 0); | 208 | gpio_set_value(CM_T35_LCD_EN_GPIO, 0); |
325 | } | 209 | } |
326 | 210 | ||
327 | static int cm_t35_panel_enable_dvi(struct omap_dss_device *dssdev) | 211 | static int cm_t35_panel_enable_dvi(struct omap_dss_device *dssdev) |
@@ -331,7 +215,7 @@ static int cm_t35_panel_enable_dvi(struct omap_dss_device *dssdev) | |||
331 | return -EINVAL; | 215 | return -EINVAL; |
332 | } | 216 | } |
333 | 217 | ||
334 | gpio_set_value(dvi_en_gpio, 0); | 218 | gpio_set_value(CM_T35_DVI_EN_GPIO, 0); |
335 | dvi_enabled = 1; | 219 | dvi_enabled = 1; |
336 | 220 | ||
337 | return 0; | 221 | return 0; |
@@ -339,7 +223,7 @@ static int cm_t35_panel_enable_dvi(struct omap_dss_device *dssdev) | |||
339 | 223 | ||
340 | static void cm_t35_panel_disable_dvi(struct omap_dss_device *dssdev) | 224 | static void cm_t35_panel_disable_dvi(struct omap_dss_device *dssdev) |
341 | { | 225 | { |
342 | gpio_set_value(dvi_en_gpio, 1); | 226 | gpio_set_value(CM_T35_DVI_EN_GPIO, 1); |
343 | dvi_enabled = 0; | 227 | dvi_enabled = 0; |
344 | } | 228 | } |
345 | 229 | ||
@@ -421,62 +305,38 @@ static struct spi_board_info cm_t35_lcd_spi_board_info[] __initdata = { | |||
421 | }, | 305 | }, |
422 | }; | 306 | }; |
423 | 307 | ||
308 | static struct gpio cm_t35_dss_gpios[] __initdata = { | ||
309 | { CM_T35_LCD_EN_GPIO, GPIOF_OUT_INIT_LOW, "lcd enable" }, | ||
310 | { CM_T35_LCD_BL_GPIO, GPIOF_OUT_INIT_LOW, "lcd bl enable" }, | ||
311 | { CM_T35_DVI_EN_GPIO, GPIOF_OUT_INIT_HIGH, "dvi enable" }, | ||
312 | }; | ||
313 | |||
424 | static void __init cm_t35_init_display(void) | 314 | static void __init cm_t35_init_display(void) |
425 | { | 315 | { |
426 | int err; | 316 | int err; |
427 | 317 | ||
428 | lcd_en_gpio = CM_T35_LCD_EN_GPIO; | ||
429 | lcd_bl_gpio = CM_T35_LCD_BL_GPIO; | ||
430 | dvi_en_gpio = CM_T35_DVI_EN_GPIO; | ||
431 | |||
432 | spi_register_board_info(cm_t35_lcd_spi_board_info, | 318 | spi_register_board_info(cm_t35_lcd_spi_board_info, |
433 | ARRAY_SIZE(cm_t35_lcd_spi_board_info)); | 319 | ARRAY_SIZE(cm_t35_lcd_spi_board_info)); |
434 | 320 | ||
435 | err = gpio_request(lcd_en_gpio, "LCD RST"); | 321 | err = gpio_request_array(cm_t35_dss_gpios, |
436 | if (err) { | 322 | ARRAY_SIZE(cm_t35_dss_gpios)); |
437 | pr_err("CM-T35: failed to get LCD reset GPIO\n"); | ||
438 | goto out; | ||
439 | } | ||
440 | |||
441 | err = gpio_request(lcd_bl_gpio, "LCD BL"); | ||
442 | if (err) { | 323 | if (err) { |
443 | pr_err("CM-T35: failed to get LCD backlight control GPIO\n"); | 324 | pr_err("CM-T35: failed to request DSS control GPIOs\n"); |
444 | goto err_lcd_bl; | 325 | return; |
445 | } | ||
446 | |||
447 | err = gpio_request(dvi_en_gpio, "DVI EN"); | ||
448 | if (err) { | ||
449 | pr_err("CM-T35: failed to get DVI reset GPIO\n"); | ||
450 | goto err_dvi_en; | ||
451 | } | 326 | } |
452 | 327 | ||
453 | gpio_export(lcd_en_gpio, 0); | 328 | gpio_export(CM_T35_LCD_EN_GPIO, 0); |
454 | gpio_export(lcd_bl_gpio, 0); | 329 | gpio_export(CM_T35_LCD_BL_GPIO, 0); |
455 | gpio_export(dvi_en_gpio, 0); | 330 | gpio_export(CM_T35_DVI_EN_GPIO, 0); |
456 | gpio_direction_output(lcd_en_gpio, 0); | ||
457 | gpio_direction_output(lcd_bl_gpio, 0); | ||
458 | gpio_direction_output(dvi_en_gpio, 1); | ||
459 | 331 | ||
460 | msleep(50); | 332 | msleep(50); |
461 | gpio_set_value(lcd_en_gpio, 1); | 333 | gpio_set_value(CM_T35_LCD_EN_GPIO, 1); |
462 | 334 | ||
463 | err = omap_display_init(&cm_t35_dss_data); | 335 | err = omap_display_init(&cm_t35_dss_data); |
464 | if (err) { | 336 | if (err) { |
465 | pr_err("CM-T35: failed to register DSS device\n"); | 337 | pr_err("CM-T35: failed to register DSS device\n"); |
466 | goto err_dev_reg; | 338 | gpio_free_array(cm_t35_dss_gpios, ARRAY_SIZE(cm_t35_dss_gpios)); |
467 | } | 339 | } |
468 | |||
469 | return; | ||
470 | |||
471 | err_dev_reg: | ||
472 | gpio_free(dvi_en_gpio); | ||
473 | err_dvi_en: | ||
474 | gpio_free(lcd_bl_gpio); | ||
475 | err_lcd_bl: | ||
476 | gpio_free(lcd_en_gpio); | ||
477 | out: | ||
478 | |||
479 | return; | ||
480 | } | 340 | } |
481 | 341 | ||
482 | static struct regulator_consumer_supply cm_t35_vmmc1_supply = { | 342 | static struct regulator_consumer_supply cm_t35_vmmc1_supply = { |
@@ -609,10 +469,8 @@ static int cm_t35_twl_gpio_setup(struct device *dev, unsigned gpio, | |||
609 | { | 469 | { |
610 | int wlan_rst = gpio + 2; | 470 | int wlan_rst = gpio + 2; |
611 | 471 | ||
612 | if ((gpio_request(wlan_rst, "WLAN RST") == 0) && | 472 | if (gpio_request_one(wlan_rst, GPIOF_OUT_INIT_HIGH, "WLAN RST") == 0) { |
613 | (gpio_direction_output(wlan_rst, 1) == 0)) { | ||
614 | gpio_export(wlan_rst, 0); | 473 | gpio_export(wlan_rst, 0); |
615 | |||
616 | udelay(10); | 474 | udelay(10); |
617 | gpio_set_value(wlan_rst, 0); | 475 | gpio_set_value(wlan_rst, 0); |
618 | udelay(10); | 476 | udelay(10); |
@@ -653,19 +511,9 @@ static struct twl4030_platform_data cm_t35_twldata = { | |||
653 | .vpll2 = &cm_t35_vpll2, | 511 | .vpll2 = &cm_t35_vpll2, |
654 | }; | 512 | }; |
655 | 513 | ||
656 | static struct i2c_board_info __initdata cm_t35_i2c_boardinfo[] = { | ||
657 | { | ||
658 | I2C_BOARD_INFO("tps65930", 0x48), | ||
659 | .flags = I2C_CLIENT_WAKE, | ||
660 | .irq = INT_34XX_SYS_NIRQ, | ||
661 | .platform_data = &cm_t35_twldata, | ||
662 | }, | ||
663 | }; | ||
664 | |||
665 | static void __init cm_t35_init_i2c(void) | 514 | static void __init cm_t35_init_i2c(void) |
666 | { | 515 | { |
667 | omap_register_i2c_bus(1, 2600, cm_t35_i2c_boardinfo, | 516 | omap3_pmic_init("tps65930", &cm_t35_twldata); |
668 | ARRAY_SIZE(cm_t35_i2c_boardinfo)); | ||
669 | } | 517 | } |
670 | 518 | ||
671 | static void __init cm_t35_init_early(void) | 519 | static void __init cm_t35_init_early(void) |
@@ -775,12 +623,6 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
775 | }; | 623 | }; |
776 | #endif | 624 | #endif |
777 | 625 | ||
778 | static struct omap_musb_board_data musb_board_data = { | ||
779 | .interface_type = MUSB_INTERFACE_ULPI, | ||
780 | .mode = MUSB_OTG, | ||
781 | .power = 100, | ||
782 | }; | ||
783 | |||
784 | static struct omap_board_config_kernel cm_t35_config[] __initdata = { | 626 | static struct omap_board_config_kernel cm_t35_config[] __initdata = { |
785 | }; | 627 | }; |
786 | 628 | ||
@@ -792,12 +634,12 @@ static void __init cm_t35_init(void) | |||
792 | omap_serial_init(); | 634 | omap_serial_init(); |
793 | cm_t35_init_i2c(); | 635 | cm_t35_init_i2c(); |
794 | cm_t35_init_nand(); | 636 | cm_t35_init_nand(); |
795 | cm_t35_init_ads7846(); | 637 | omap_ads7846_init(1, CM_T35_GPIO_PENDOWN, 0, NULL); |
796 | cm_t35_init_ethernet(); | 638 | cm_t35_init_ethernet(); |
797 | cm_t35_init_led(); | 639 | cm_t35_init_led(); |
798 | cm_t35_init_display(); | 640 | cm_t35_init_display(); |
799 | 641 | ||
800 | usb_musb_init(&musb_board_data); | 642 | usb_musb_init(NULL); |
801 | usbhs_init(&usbhs_bdata); | 643 | usbhs_init(&usbhs_bdata); |
802 | } | 644 | } |
803 | 645 | ||
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c index a27e3eee8292..08f08e812492 100644 --- a/arch/arm/mach-omap2/board-cm-t3517.c +++ b/arch/arm/mach-omap2/board-cm-t3517.c | |||
@@ -148,14 +148,13 @@ static void __init cm_t3517_init_rtc(void) | |||
148 | { | 148 | { |
149 | int err; | 149 | int err; |
150 | 150 | ||
151 | err = gpio_request(RTC_CS_EN_GPIO, "rtc cs en"); | 151 | err = gpio_request_one(RTC_CS_EN_GPIO, GPIOF_OUT_INIT_HIGH, |
152 | "rtc cs en"); | ||
152 | if (err) { | 153 | if (err) { |
153 | pr_err("CM-T3517: rtc cs en gpio request failed: %d\n", err); | 154 | pr_err("CM-T3517: rtc cs en gpio request failed: %d\n", err); |
154 | return; | 155 | return; |
155 | } | 156 | } |
156 | 157 | ||
157 | gpio_direction_output(RTC_CS_EN_GPIO, 1); | ||
158 | |||
159 | platform_device_register(&cm_t3517_rtc_device); | 158 | platform_device_register(&cm_t3517_rtc_device); |
160 | } | 159 | } |
161 | #else | 160 | #else |
@@ -182,11 +181,11 @@ static int cm_t3517_init_usbh(void) | |||
182 | { | 181 | { |
183 | int err; | 182 | int err; |
184 | 183 | ||
185 | err = gpio_request(USB_HUB_RESET_GPIO, "usb hub rst"); | 184 | err = gpio_request_one(USB_HUB_RESET_GPIO, GPIOF_OUT_INIT_LOW, |
185 | "usb hub rst"); | ||
186 | if (err) { | 186 | if (err) { |
187 | pr_err("CM-T3517: usb hub rst gpio request failed: %d\n", err); | 187 | pr_err("CM-T3517: usb hub rst gpio request failed: %d\n", err); |
188 | } else { | 188 | } else { |
189 | gpio_direction_output(USB_HUB_RESET_GPIO, 0); | ||
190 | udelay(10); | 189 | udelay(10); |
191 | gpio_set_value(USB_HUB_RESET_GPIO, 1); | 190 | gpio_set_value(USB_HUB_RESET_GPIO, 1); |
192 | msleep(1); | 191 | msleep(1); |
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c index 1d1b56a29fb1..cf520d7dd614 100644 --- a/arch/arm/mach-omap2/board-devkit8000.c +++ b/arch/arm/mach-omap2/board-devkit8000.c | |||
@@ -51,7 +51,6 @@ | |||
51 | #include <plat/mcspi.h> | 51 | #include <plat/mcspi.h> |
52 | #include <linux/input/matrix_keypad.h> | 52 | #include <linux/input/matrix_keypad.h> |
53 | #include <linux/spi/spi.h> | 53 | #include <linux/spi/spi.h> |
54 | #include <linux/spi/ads7846.h> | ||
55 | #include <linux/dm9000.h> | 54 | #include <linux/dm9000.h> |
56 | #include <linux/interrupt.h> | 55 | #include <linux/interrupt.h> |
57 | 56 | ||
@@ -60,6 +59,7 @@ | |||
60 | #include "mux.h" | 59 | #include "mux.h" |
61 | #include "hsmmc.h" | 60 | #include "hsmmc.h" |
62 | #include "timer-gp.h" | 61 | #include "timer-gp.h" |
62 | #include "common-board-devices.h" | ||
63 | 63 | ||
64 | #define NAND_BLOCK_SIZE SZ_128K | 64 | #define NAND_BLOCK_SIZE SZ_128K |
65 | 65 | ||
@@ -97,13 +97,6 @@ static struct mtd_partition devkit8000_nand_partitions[] = { | |||
97 | }, | 97 | }, |
98 | }; | 98 | }; |
99 | 99 | ||
100 | static struct omap_nand_platform_data devkit8000_nand_data = { | ||
101 | .options = NAND_BUSWIDTH_16, | ||
102 | .parts = devkit8000_nand_partitions, | ||
103 | .nr_parts = ARRAY_SIZE(devkit8000_nand_partitions), | ||
104 | .dma_channel = -1, /* disable DMA in OMAP NAND driver */ | ||
105 | }; | ||
106 | |||
107 | static struct omap2_hsmmc_info mmc[] = { | 100 | static struct omap2_hsmmc_info mmc[] = { |
108 | { | 101 | { |
109 | .mmc = 1, | 102 | .mmc = 1, |
@@ -249,7 +242,7 @@ static int devkit8000_twl_gpio_setup(struct device *dev, | |||
249 | /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */ | 242 | /* TWL4030_GPIO_MAX + 0 is "LCD_PWREN" (out, active high) */ |
250 | devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0; | 243 | devkit8000_lcd_device.reset_gpio = gpio + TWL4030_GPIO_MAX + 0; |
251 | ret = gpio_request_one(devkit8000_lcd_device.reset_gpio, | 244 | ret = gpio_request_one(devkit8000_lcd_device.reset_gpio, |
252 | GPIOF_DIR_OUT | GPIOF_INIT_LOW, "LCD_PWREN"); | 245 | GPIOF_OUT_INIT_LOW, "LCD_PWREN"); |
253 | if (ret < 0) { | 246 | if (ret < 0) { |
254 | devkit8000_lcd_device.reset_gpio = -EINVAL; | 247 | devkit8000_lcd_device.reset_gpio = -EINVAL; |
255 | printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n"); | 248 | printk(KERN_ERR "Failed to request GPIO for LCD_PWRN\n"); |
@@ -258,7 +251,7 @@ static int devkit8000_twl_gpio_setup(struct device *dev, | |||
258 | /* gpio + 7 is "DVI_PD" (out, active low) */ | 251 | /* gpio + 7 is "DVI_PD" (out, active low) */ |
259 | devkit8000_dvi_device.reset_gpio = gpio + 7; | 252 | devkit8000_dvi_device.reset_gpio = gpio + 7; |
260 | ret = gpio_request_one(devkit8000_dvi_device.reset_gpio, | 253 | ret = gpio_request_one(devkit8000_dvi_device.reset_gpio, |
261 | GPIOF_DIR_OUT | GPIOF_INIT_LOW, "DVI PowerDown"); | 254 | GPIOF_OUT_INIT_LOW, "DVI PowerDown"); |
262 | if (ret < 0) { | 255 | if (ret < 0) { |
263 | devkit8000_dvi_device.reset_gpio = -EINVAL; | 256 | devkit8000_dvi_device.reset_gpio = -EINVAL; |
264 | printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n"); | 257 | printk(KERN_ERR "Failed to request GPIO for DVI PowerDown\n"); |
@@ -366,19 +359,9 @@ static struct twl4030_platform_data devkit8000_twldata = { | |||
366 | .keypad = &devkit8000_kp_data, | 359 | .keypad = &devkit8000_kp_data, |
367 | }; | 360 | }; |
368 | 361 | ||
369 | static struct i2c_board_info __initdata devkit8000_i2c_boardinfo[] = { | ||
370 | { | ||
371 | I2C_BOARD_INFO("tps65930", 0x48), | ||
372 | .flags = I2C_CLIENT_WAKE, | ||
373 | .irq = INT_34XX_SYS_NIRQ, | ||
374 | .platform_data = &devkit8000_twldata, | ||
375 | }, | ||
376 | }; | ||
377 | |||
378 | static int __init devkit8000_i2c_init(void) | 362 | static int __init devkit8000_i2c_init(void) |
379 | { | 363 | { |
380 | omap_register_i2c_bus(1, 2600, devkit8000_i2c_boardinfo, | 364 | omap3_pmic_init("tps65930", &devkit8000_twldata); |
381 | ARRAY_SIZE(devkit8000_i2c_boardinfo)); | ||
382 | /* Bus 3 is attached to the DVI port where devices like the pico DLP | 365 | /* Bus 3 is attached to the DVI port where devices like the pico DLP |
383 | * projector don't work reliably with 400kHz */ | 366 | * projector don't work reliably with 400kHz */ |
384 | omap_register_i2c_bus(3, 400, NULL, 0); | 367 | omap_register_i2c_bus(3, 400, NULL, 0); |
@@ -463,56 +446,6 @@ static void __init devkit8000_init_irq(void) | |||
463 | #endif | 446 | #endif |
464 | } | 447 | } |
465 | 448 | ||
466 | static void __init devkit8000_ads7846_init(void) | ||
467 | { | ||
468 | int gpio = OMAP3_DEVKIT_TS_GPIO; | ||
469 | int ret; | ||
470 | |||
471 | ret = gpio_request(gpio, "ads7846_pen_down"); | ||
472 | if (ret < 0) { | ||
473 | printk(KERN_ERR "Failed to request GPIO %d for " | ||
474 | "ads7846 pen down IRQ\n", gpio); | ||
475 | return; | ||
476 | } | ||
477 | |||
478 | gpio_direction_input(gpio); | ||
479 | } | ||
480 | |||
481 | static int ads7846_get_pendown_state(void) | ||
482 | { | ||
483 | return !gpio_get_value(OMAP3_DEVKIT_TS_GPIO); | ||
484 | } | ||
485 | |||
486 | static struct ads7846_platform_data ads7846_config = { | ||
487 | .x_max = 0x0fff, | ||
488 | .y_max = 0x0fff, | ||
489 | .x_plate_ohms = 180, | ||
490 | .pressure_max = 255, | ||
491 | .debounce_max = 10, | ||
492 | .debounce_tol = 5, | ||
493 | .debounce_rep = 1, | ||
494 | .get_pendown_state = ads7846_get_pendown_state, | ||
495 | .keep_vref_on = 1, | ||
496 | .settle_delay_usecs = 150, | ||
497 | }; | ||
498 | |||
499 | static struct omap2_mcspi_device_config ads7846_mcspi_config = { | ||
500 | .turbo_mode = 0, | ||
501 | .single_channel = 1, /* 0: slave, 1: master */ | ||
502 | }; | ||
503 | |||
504 | static struct spi_board_info devkit8000_spi_board_info[] __initdata = { | ||
505 | { | ||
506 | .modalias = "ads7846", | ||
507 | .bus_num = 2, | ||
508 | .chip_select = 0, | ||
509 | .max_speed_hz = 1500000, | ||
510 | .controller_data = &ads7846_mcspi_config, | ||
511 | .irq = OMAP_GPIO_IRQ(OMAP3_DEVKIT_TS_GPIO), | ||
512 | .platform_data = &ads7846_config, | ||
513 | } | ||
514 | }; | ||
515 | |||
516 | #define OMAP_DM9000_BASE 0x2c000000 | 449 | #define OMAP_DM9000_BASE 0x2c000000 |
517 | 450 | ||
518 | static struct resource omap_dm9000_resources[] = { | 451 | static struct resource omap_dm9000_resources[] = { |
@@ -550,14 +483,14 @@ static void __init omap_dm9000_init(void) | |||
550 | { | 483 | { |
551 | unsigned char *eth_addr = omap_dm9000_platdata.dev_addr; | 484 | unsigned char *eth_addr = omap_dm9000_platdata.dev_addr; |
552 | struct omap_die_id odi; | 485 | struct omap_die_id odi; |
486 | int ret; | ||
553 | 487 | ||
554 | if (gpio_request(OMAP_DM9000_GPIO_IRQ, "dm9000 irq") < 0) { | 488 | ret = gpio_request_one(OMAP_DM9000_GPIO_IRQ, GPIOF_IN, "dm9000 irq"); |
489 | if (ret < 0) { | ||
555 | printk(KERN_ERR "Failed to request GPIO%d for dm9000 IRQ\n", | 490 | printk(KERN_ERR "Failed to request GPIO%d for dm9000 IRQ\n", |
556 | OMAP_DM9000_GPIO_IRQ); | 491 | OMAP_DM9000_GPIO_IRQ); |
557 | return; | 492 | return; |
558 | } | 493 | } |
559 | |||
560 | gpio_direction_input(OMAP_DM9000_GPIO_IRQ); | ||
561 | 494 | ||
562 | /* init the mac address using DIE id */ | 495 | /* init the mac address using DIE id */ |
563 | omap_get_die_id(&odi); | 496 | omap_get_die_id(&odi); |
@@ -576,45 +509,6 @@ static struct platform_device *devkit8000_devices[] __initdata = { | |||
576 | &omap_dm9000_dev, | 509 | &omap_dm9000_dev, |
577 | }; | 510 | }; |
578 | 511 | ||
579 | static void __init devkit8000_flash_init(void) | ||
580 | { | ||
581 | u8 cs = 0; | ||
582 | u8 nandcs = GPMC_CS_NUM + 1; | ||
583 | |||
584 | /* find out the chip-select on which NAND exists */ | ||
585 | while (cs < GPMC_CS_NUM) { | ||
586 | u32 ret = 0; | ||
587 | ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); | ||
588 | |||
589 | if ((ret & 0xC00) == 0x800) { | ||
590 | printk(KERN_INFO "Found NAND on CS%d\n", cs); | ||
591 | if (nandcs > GPMC_CS_NUM) | ||
592 | nandcs = cs; | ||
593 | } | ||
594 | cs++; | ||
595 | } | ||
596 | |||
597 | if (nandcs > GPMC_CS_NUM) { | ||
598 | printk(KERN_INFO "NAND: Unable to find configuration " | ||
599 | "in GPMC\n "); | ||
600 | return; | ||
601 | } | ||
602 | |||
603 | if (nandcs < GPMC_CS_NUM) { | ||
604 | devkit8000_nand_data.cs = nandcs; | ||
605 | |||
606 | printk(KERN_INFO "Registering NAND on CS%d\n", nandcs); | ||
607 | if (gpmc_nand_init(&devkit8000_nand_data) < 0) | ||
608 | printk(KERN_ERR "Unable to register NAND device\n"); | ||
609 | } | ||
610 | } | ||
611 | |||
612 | static struct omap_musb_board_data musb_board_data = { | ||
613 | .interface_type = MUSB_INTERFACE_ULPI, | ||
614 | .mode = MUSB_OTG, | ||
615 | .power = 100, | ||
616 | }; | ||
617 | |||
618 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { | 512 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
619 | 513 | ||
620 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, | 514 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
@@ -795,14 +689,13 @@ static void __init devkit8000_init(void) | |||
795 | ARRAY_SIZE(devkit8000_devices)); | 689 | ARRAY_SIZE(devkit8000_devices)); |
796 | 690 | ||
797 | omap_display_init(&devkit8000_dss_data); | 691 | omap_display_init(&devkit8000_dss_data); |
798 | spi_register_board_info(devkit8000_spi_board_info, | ||
799 | ARRAY_SIZE(devkit8000_spi_board_info)); | ||
800 | 692 | ||
801 | devkit8000_ads7846_init(); | 693 | omap_ads7846_init(2, OMAP3_DEVKIT_TS_GPIO, 0, NULL); |
802 | 694 | ||
803 | usb_musb_init(&musb_board_data); | 695 | usb_musb_init(NULL); |
804 | usbhs_init(&usbhs_bdata); | 696 | usbhs_init(&usbhs_bdata); |
805 | devkit8000_flash_init(); | 697 | omap_nand_flash_init(NAND_BUSWIDTH_16, devkit8000_nand_partitions, |
698 | ARRAY_SIZE(devkit8000_nand_partitions)); | ||
806 | 699 | ||
807 | /* Ensure SDRC pins are mux'd for self-refresh */ | 700 | /* Ensure SDRC pins are mux'd for self-refresh */ |
808 | omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); | 701 | omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); |
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c index 3da64d361651..0c1bfca3f731 100644 --- a/arch/arm/mach-omap2/board-igep0020.c +++ b/arch/arm/mach-omap2/board-igep0020.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include "mux.h" | 38 | #include "mux.h" |
39 | #include "hsmmc.h" | 39 | #include "hsmmc.h" |
40 | #include "sdram-numonyx-m65kxxxxam.h" | 40 | #include "sdram-numonyx-m65kxxxxam.h" |
41 | #include "common-board-devices.h" | ||
41 | 42 | ||
42 | #define IGEP2_SMSC911X_CS 5 | 43 | #define IGEP2_SMSC911X_CS 5 |
43 | #define IGEP2_SMSC911X_GPIO 176 | 44 | #define IGEP2_SMSC911X_GPIO 176 |
@@ -54,6 +55,11 @@ | |||
54 | #define IGEP2_RC_GPIO_WIFI_NRESET 139 | 55 | #define IGEP2_RC_GPIO_WIFI_NRESET 139 |
55 | #define IGEP2_RC_GPIO_BT_NRESET 137 | 56 | #define IGEP2_RC_GPIO_BT_NRESET 137 |
56 | 57 | ||
58 | #define IGEP3_GPIO_LED0_GREEN 54 | ||
59 | #define IGEP3_GPIO_LED0_RED 53 | ||
60 | #define IGEP3_GPIO_LED1_RED 16 | ||
61 | #define IGEP3_GPIO_USBH_NRESET 183 | ||
62 | |||
57 | /* | 63 | /* |
58 | * IGEP2 Hardware Revision Table | 64 | * IGEP2 Hardware Revision Table |
59 | * | 65 | * |
@@ -68,6 +74,7 @@ | |||
68 | 74 | ||
69 | #define IGEP2_BOARD_HWREV_B 0 | 75 | #define IGEP2_BOARD_HWREV_B 0 |
70 | #define IGEP2_BOARD_HWREV_C 1 | 76 | #define IGEP2_BOARD_HWREV_C 1 |
77 | #define IGEP3_BOARD_HWREV 2 | ||
71 | 78 | ||
72 | static u8 hwrev; | 79 | static u8 hwrev; |
73 | 80 | ||
@@ -75,24 +82,29 @@ static void __init igep2_get_revision(void) | |||
75 | { | 82 | { |
76 | u8 ret; | 83 | u8 ret; |
77 | 84 | ||
85 | if (machine_is_igep0030()) { | ||
86 | hwrev = IGEP3_BOARD_HWREV; | ||
87 | return; | ||
88 | } | ||
89 | |||
78 | omap_mux_init_gpio(IGEP2_GPIO_LED1_RED, OMAP_PIN_INPUT); | 90 | omap_mux_init_gpio(IGEP2_GPIO_LED1_RED, OMAP_PIN_INPUT); |
79 | 91 | ||
80 | if ((gpio_request(IGEP2_GPIO_LED1_RED, "GPIO_HW0_REV") == 0) && | 92 | if (gpio_request_one(IGEP2_GPIO_LED1_RED, GPIOF_IN, "GPIO_HW0_REV")) { |
81 | (gpio_direction_input(IGEP2_GPIO_LED1_RED) == 0)) { | ||
82 | ret = gpio_get_value(IGEP2_GPIO_LED1_RED); | ||
83 | if (ret == 0) { | ||
84 | pr_info("IGEP2: Hardware Revision C (B-NON compatible)\n"); | ||
85 | hwrev = IGEP2_BOARD_HWREV_C; | ||
86 | } else if (ret == 1) { | ||
87 | pr_info("IGEP2: Hardware Revision B/C (B compatible)\n"); | ||
88 | hwrev = IGEP2_BOARD_HWREV_B; | ||
89 | } else { | ||
90 | pr_err("IGEP2: Unknown Hardware Revision\n"); | ||
91 | hwrev = -1; | ||
92 | } | ||
93 | } else { | ||
94 | pr_warning("IGEP2: Could not obtain gpio GPIO_HW0_REV\n"); | 93 | pr_warning("IGEP2: Could not obtain gpio GPIO_HW0_REV\n"); |
95 | pr_err("IGEP2: Unknown Hardware Revision\n"); | 94 | pr_err("IGEP2: Unknown Hardware Revision\n"); |
95 | return; | ||
96 | } | ||
97 | |||
98 | ret = gpio_get_value(IGEP2_GPIO_LED1_RED); | ||
99 | if (ret == 0) { | ||
100 | pr_info("IGEP2: Hardware Revision C (B-NON compatible)\n"); | ||
101 | hwrev = IGEP2_BOARD_HWREV_C; | ||
102 | } else if (ret == 1) { | ||
103 | pr_info("IGEP2: Hardware Revision B/C (B compatible)\n"); | ||
104 | hwrev = IGEP2_BOARD_HWREV_B; | ||
105 | } else { | ||
106 | pr_err("IGEP2: Unknown Hardware Revision\n"); | ||
107 | hwrev = -1; | ||
96 | } | 108 | } |
97 | 109 | ||
98 | gpio_free(IGEP2_GPIO_LED1_RED); | 110 | gpio_free(IGEP2_GPIO_LED1_RED); |
@@ -111,7 +123,7 @@ static void __init igep2_get_revision(void) | |||
111 | * So MTD regards it as 4KiB page size and 256KiB block size 64*(2*2048) | 123 | * So MTD regards it as 4KiB page size and 256KiB block size 64*(2*2048) |
112 | */ | 124 | */ |
113 | 125 | ||
114 | static struct mtd_partition igep2_onenand_partitions[] = { | 126 | static struct mtd_partition igep_onenand_partitions[] = { |
115 | { | 127 | { |
116 | .name = "X-Loader", | 128 | .name = "X-Loader", |
117 | .offset = 0, | 129 | .offset = 0, |
@@ -139,21 +151,21 @@ static struct mtd_partition igep2_onenand_partitions[] = { | |||
139 | }, | 151 | }, |
140 | }; | 152 | }; |
141 | 153 | ||
142 | static struct omap_onenand_platform_data igep2_onenand_data = { | 154 | static struct omap_onenand_platform_data igep_onenand_data = { |
143 | .parts = igep2_onenand_partitions, | 155 | .parts = igep_onenand_partitions, |
144 | .nr_parts = ARRAY_SIZE(igep2_onenand_partitions), | 156 | .nr_parts = ARRAY_SIZE(igep_onenand_partitions), |
145 | .dma_channel = -1, /* disable DMA in OMAP OneNAND driver */ | 157 | .dma_channel = -1, /* disable DMA in OMAP OneNAND driver */ |
146 | }; | 158 | }; |
147 | 159 | ||
148 | static struct platform_device igep2_onenand_device = { | 160 | static struct platform_device igep_onenand_device = { |
149 | .name = "omap2-onenand", | 161 | .name = "omap2-onenand", |
150 | .id = -1, | 162 | .id = -1, |
151 | .dev = { | 163 | .dev = { |
152 | .platform_data = &igep2_onenand_data, | 164 | .platform_data = &igep_onenand_data, |
153 | }, | 165 | }, |
154 | }; | 166 | }; |
155 | 167 | ||
156 | static void __init igep2_flash_init(void) | 168 | static void __init igep_flash_init(void) |
157 | { | 169 | { |
158 | u8 cs = 0; | 170 | u8 cs = 0; |
159 | u8 onenandcs = GPMC_CS_NUM + 1; | 171 | u8 onenandcs = GPMC_CS_NUM + 1; |
@@ -165,7 +177,7 @@ static void __init igep2_flash_init(void) | |||
165 | /* Check if NAND/oneNAND is configured */ | 177 | /* Check if NAND/oneNAND is configured */ |
166 | if ((ret & 0xC00) == 0x800) | 178 | if ((ret & 0xC00) == 0x800) |
167 | /* NAND found */ | 179 | /* NAND found */ |
168 | pr_err("IGEP2: Unsupported NAND found\n"); | 180 | pr_err("IGEP: Unsupported NAND found\n"); |
169 | else { | 181 | else { |
170 | ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); | 182 | ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); |
171 | if ((ret & 0x3F) == (ONENAND_MAP >> 24)) | 183 | if ((ret & 0x3F) == (ONENAND_MAP >> 24)) |
@@ -175,85 +187,46 @@ static void __init igep2_flash_init(void) | |||
175 | } | 187 | } |
176 | 188 | ||
177 | if (onenandcs > GPMC_CS_NUM) { | 189 | if (onenandcs > GPMC_CS_NUM) { |
178 | pr_err("IGEP2: Unable to find configuration in GPMC\n"); | 190 | pr_err("IGEP: Unable to find configuration in GPMC\n"); |
179 | return; | 191 | return; |
180 | } | 192 | } |
181 | 193 | ||
182 | igep2_onenand_data.cs = onenandcs; | 194 | igep_onenand_data.cs = onenandcs; |
183 | 195 | ||
184 | if (platform_device_register(&igep2_onenand_device) < 0) | 196 | if (platform_device_register(&igep_onenand_device) < 0) |
185 | pr_err("IGEP2: Unable to register OneNAND device\n"); | 197 | pr_err("IGEP: Unable to register OneNAND device\n"); |
186 | } | 198 | } |
187 | 199 | ||
188 | #else | 200 | #else |
189 | static void __init igep2_flash_init(void) {} | 201 | static void __init igep_flash_init(void) {} |
190 | #endif | 202 | #endif |
191 | 203 | ||
192 | #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) | 204 | #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) |
193 | 205 | ||
194 | #include <linux/smsc911x.h> | 206 | #include <linux/smsc911x.h> |
207 | #include <plat/gpmc-smsc911x.h> | ||
195 | 208 | ||
196 | static struct smsc911x_platform_config igep2_smsc911x_config = { | 209 | static struct omap_smsc911x_platform_data smsc911x_cfg = { |
197 | .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, | 210 | .cs = IGEP2_SMSC911X_CS, |
198 | .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, | 211 | .gpio_irq = IGEP2_SMSC911X_GPIO, |
199 | .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS , | 212 | .gpio_reset = -EINVAL, |
200 | .phy_interface = PHY_INTERFACE_MODE_MII, | 213 | .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, |
201 | }; | ||
202 | |||
203 | static struct resource igep2_smsc911x_resources[] = { | ||
204 | { | ||
205 | .flags = IORESOURCE_MEM, | ||
206 | }, | ||
207 | { | ||
208 | .start = OMAP_GPIO_IRQ(IGEP2_SMSC911X_GPIO), | ||
209 | .end = OMAP_GPIO_IRQ(IGEP2_SMSC911X_GPIO), | ||
210 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, | ||
211 | }, | ||
212 | }; | ||
213 | |||
214 | static struct platform_device igep2_smsc911x_device = { | ||
215 | .name = "smsc911x", | ||
216 | .id = 0, | ||
217 | .num_resources = ARRAY_SIZE(igep2_smsc911x_resources), | ||
218 | .resource = igep2_smsc911x_resources, | ||
219 | .dev = { | ||
220 | .platform_data = &igep2_smsc911x_config, | ||
221 | }, | ||
222 | }; | 214 | }; |
223 | 215 | ||
224 | static inline void __init igep2_init_smsc911x(void) | 216 | static inline void __init igep2_init_smsc911x(void) |
225 | { | 217 | { |
226 | unsigned long cs_mem_base; | 218 | gpmc_smsc911x_init(&smsc911x_cfg); |
227 | |||
228 | if (gpmc_cs_request(IGEP2_SMSC911X_CS, SZ_16M, &cs_mem_base) < 0) { | ||
229 | pr_err("IGEP v2: Failed request for GPMC mem for smsc911x\n"); | ||
230 | gpmc_cs_free(IGEP2_SMSC911X_CS); | ||
231 | return; | ||
232 | } | ||
233 | |||
234 | igep2_smsc911x_resources[0].start = cs_mem_base + 0x0; | ||
235 | igep2_smsc911x_resources[0].end = cs_mem_base + 0xff; | ||
236 | |||
237 | if ((gpio_request(IGEP2_SMSC911X_GPIO, "SMSC911X IRQ") == 0) && | ||
238 | (gpio_direction_input(IGEP2_SMSC911X_GPIO) == 0)) { | ||
239 | gpio_export(IGEP2_SMSC911X_GPIO, 0); | ||
240 | } else { | ||
241 | pr_err("IGEP v2: Could not obtain gpio for for SMSC911X IRQ\n"); | ||
242 | return; | ||
243 | } | ||
244 | |||
245 | platform_device_register(&igep2_smsc911x_device); | ||
246 | } | 219 | } |
247 | 220 | ||
248 | #else | 221 | #else |
249 | static inline void __init igep2_init_smsc911x(void) { } | 222 | static inline void __init igep2_init_smsc911x(void) { } |
250 | #endif | 223 | #endif |
251 | 224 | ||
252 | static struct regulator_consumer_supply igep2_vmmc1_supply = | 225 | static struct regulator_consumer_supply igep_vmmc1_supply = |
253 | REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"); | 226 | REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"); |
254 | 227 | ||
255 | /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */ | 228 | /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */ |
256 | static struct regulator_init_data igep2_vmmc1 = { | 229 | static struct regulator_init_data igep_vmmc1 = { |
257 | .constraints = { | 230 | .constraints = { |
258 | .min_uV = 1850000, | 231 | .min_uV = 1850000, |
259 | .max_uV = 3150000, | 232 | .max_uV = 3150000, |
@@ -264,13 +237,13 @@ static struct regulator_init_data igep2_vmmc1 = { | |||
264 | | REGULATOR_CHANGE_STATUS, | 237 | | REGULATOR_CHANGE_STATUS, |
265 | }, | 238 | }, |
266 | .num_consumer_supplies = 1, | 239 | .num_consumer_supplies = 1, |
267 | .consumer_supplies = &igep2_vmmc1_supply, | 240 | .consumer_supplies = &igep_vmmc1_supply, |
268 | }; | 241 | }; |
269 | 242 | ||
270 | static struct regulator_consumer_supply igep2_vio_supply = | 243 | static struct regulator_consumer_supply igep_vio_supply = |
271 | REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1"); | 244 | REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1"); |
272 | 245 | ||
273 | static struct regulator_init_data igep2_vio = { | 246 | static struct regulator_init_data igep_vio = { |
274 | .constraints = { | 247 | .constraints = { |
275 | .min_uV = 1800000, | 248 | .min_uV = 1800000, |
276 | .max_uV = 1800000, | 249 | .max_uV = 1800000, |
@@ -282,34 +255,34 @@ static struct regulator_init_data igep2_vio = { | |||
282 | | REGULATOR_CHANGE_STATUS, | 255 | | REGULATOR_CHANGE_STATUS, |
283 | }, | 256 | }, |
284 | .num_consumer_supplies = 1, | 257 | .num_consumer_supplies = 1, |
285 | .consumer_supplies = &igep2_vio_supply, | 258 | .consumer_supplies = &igep_vio_supply, |
286 | }; | 259 | }; |
287 | 260 | ||
288 | static struct regulator_consumer_supply igep2_vmmc2_supply = | 261 | static struct regulator_consumer_supply igep_vmmc2_supply = |
289 | REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"); | 262 | REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"); |
290 | 263 | ||
291 | static struct regulator_init_data igep2_vmmc2 = { | 264 | static struct regulator_init_data igep_vmmc2 = { |
292 | .constraints = { | 265 | .constraints = { |
293 | .valid_modes_mask = REGULATOR_MODE_NORMAL, | 266 | .valid_modes_mask = REGULATOR_MODE_NORMAL, |
294 | .always_on = 1, | 267 | .always_on = 1, |
295 | }, | 268 | }, |
296 | .num_consumer_supplies = 1, | 269 | .num_consumer_supplies = 1, |
297 | .consumer_supplies = &igep2_vmmc2_supply, | 270 | .consumer_supplies = &igep_vmmc2_supply, |
298 | }; | 271 | }; |
299 | 272 | ||
300 | static struct fixed_voltage_config igep2_vwlan = { | 273 | static struct fixed_voltage_config igep_vwlan = { |
301 | .supply_name = "vwlan", | 274 | .supply_name = "vwlan", |
302 | .microvolts = 3300000, | 275 | .microvolts = 3300000, |
303 | .gpio = -EINVAL, | 276 | .gpio = -EINVAL, |
304 | .enabled_at_boot = 1, | 277 | .enabled_at_boot = 1, |
305 | .init_data = &igep2_vmmc2, | 278 | .init_data = &igep_vmmc2, |
306 | }; | 279 | }; |
307 | 280 | ||
308 | static struct platform_device igep2_vwlan_device = { | 281 | static struct platform_device igep_vwlan_device = { |
309 | .name = "reg-fixed-voltage", | 282 | .name = "reg-fixed-voltage", |
310 | .id = 0, | 283 | .id = 0, |
311 | .dev = { | 284 | .dev = { |
312 | .platform_data = &igep2_vwlan, | 285 | .platform_data = &igep_vwlan, |
313 | }, | 286 | }, |
314 | }; | 287 | }; |
315 | 288 | ||
@@ -334,20 +307,17 @@ static struct omap2_hsmmc_info mmc[] = { | |||
334 | #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) | 307 | #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) |
335 | #include <linux/leds.h> | 308 | #include <linux/leds.h> |
336 | 309 | ||
337 | static struct gpio_led igep2_gpio_leds[] = { | 310 | static struct gpio_led igep_gpio_leds[] = { |
338 | [0] = { | 311 | [0] = { |
339 | .name = "gpio-led:red:d0", | 312 | .name = "gpio-led:red:d0", |
340 | .gpio = IGEP2_GPIO_LED0_RED, | ||
341 | .default_trigger = "default-off" | 313 | .default_trigger = "default-off" |
342 | }, | 314 | }, |
343 | [1] = { | 315 | [1] = { |
344 | .name = "gpio-led:green:d0", | 316 | .name = "gpio-led:green:d0", |
345 | .gpio = IGEP2_GPIO_LED0_GREEN, | ||
346 | .default_trigger = "default-off", | 317 | .default_trigger = "default-off", |
347 | }, | 318 | }, |
348 | [2] = { | 319 | [2] = { |
349 | .name = "gpio-led:red:d1", | 320 | .name = "gpio-led:red:d1", |
350 | .gpio = IGEP2_GPIO_LED1_RED, | ||
351 | .default_trigger = "default-off", | 321 | .default_trigger = "default-off", |
352 | }, | 322 | }, |
353 | [3] = { | 323 | [3] = { |
@@ -358,94 +328,119 @@ static struct gpio_led igep2_gpio_leds[] = { | |||
358 | }, | 328 | }, |
359 | }; | 329 | }; |
360 | 330 | ||
361 | static struct gpio_led_platform_data igep2_led_pdata = { | 331 | static struct gpio_led_platform_data igep_led_pdata = { |
362 | .leds = igep2_gpio_leds, | 332 | .leds = igep_gpio_leds, |
363 | .num_leds = ARRAY_SIZE(igep2_gpio_leds), | 333 | .num_leds = ARRAY_SIZE(igep_gpio_leds), |
364 | }; | 334 | }; |
365 | 335 | ||
366 | static struct platform_device igep2_led_device = { | 336 | static struct platform_device igep_led_device = { |
367 | .name = "leds-gpio", | 337 | .name = "leds-gpio", |
368 | .id = -1, | 338 | .id = -1, |
369 | .dev = { | 339 | .dev = { |
370 | .platform_data = &igep2_led_pdata, | 340 | .platform_data = &igep_led_pdata, |
371 | }, | 341 | }, |
372 | }; | 342 | }; |
373 | 343 | ||
374 | static void __init igep2_leds_init(void) | 344 | static void __init igep_leds_init(void) |
375 | { | 345 | { |
376 | platform_device_register(&igep2_led_device); | 346 | if (machine_is_igep0020()) { |
347 | igep_gpio_leds[0].gpio = IGEP2_GPIO_LED0_RED; | ||
348 | igep_gpio_leds[1].gpio = IGEP2_GPIO_LED0_GREEN; | ||
349 | igep_gpio_leds[2].gpio = IGEP2_GPIO_LED1_RED; | ||
350 | } else { | ||
351 | igep_gpio_leds[0].gpio = IGEP3_GPIO_LED0_RED; | ||
352 | igep_gpio_leds[1].gpio = IGEP3_GPIO_LED0_GREEN; | ||
353 | igep_gpio_leds[2].gpio = IGEP3_GPIO_LED1_RED; | ||
354 | } | ||
355 | |||
356 | platform_device_register(&igep_led_device); | ||
377 | } | 357 | } |
378 | 358 | ||
379 | #else | 359 | #else |
380 | static inline void igep2_leds_init(void) | 360 | static struct gpio igep_gpio_leds[] __initdata = { |
361 | { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:red:d0" }, | ||
362 | { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:green:d0" }, | ||
363 | { -EINVAL, GPIOF_OUT_INIT_LOW, "gpio-led:red:d1" }, | ||
364 | }; | ||
365 | |||
366 | static inline void igep_leds_init(void) | ||
381 | { | 367 | { |
382 | if ((gpio_request(IGEP2_GPIO_LED0_RED, "gpio-led:red:d0") == 0) && | 368 | int i; |
383 | (gpio_direction_output(IGEP2_GPIO_LED0_RED, 0) == 0)) | ||
384 | gpio_export(IGEP2_GPIO_LED0_RED, 0); | ||
385 | else | ||
386 | pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_RED\n"); | ||
387 | 369 | ||
388 | if ((gpio_request(IGEP2_GPIO_LED0_GREEN, "gpio-led:green:d0") == 0) && | 370 | if (machine_is_igep0020()) { |
389 | (gpio_direction_output(IGEP2_GPIO_LED0_GREEN, 0) == 0)) | 371 | igep_gpio_leds[0].gpio = IGEP2_GPIO_LED0_RED; |
390 | gpio_export(IGEP2_GPIO_LED0_GREEN, 0); | 372 | igep_gpio_leds[1].gpio = IGEP2_GPIO_LED0_GREEN; |
391 | else | 373 | igep_gpio_leds[2].gpio = IGEP2_GPIO_LED1_RED; |
392 | pr_warning("IGEP v2: Could not obtain gpio GPIO_LED0_GREEN\n"); | 374 | } else { |
375 | igep_gpio_leds[0].gpio = IGEP3_GPIO_LED0_RED; | ||
376 | igep_gpio_leds[1].gpio = IGEP3_GPIO_LED0_GREEN; | ||
377 | igep_gpio_leds[2].gpio = IGEP3_GPIO_LED1_RED; | ||
378 | } | ||
393 | 379 | ||
394 | if ((gpio_request(IGEP2_GPIO_LED1_RED, "gpio-led:red:d1") == 0) && | 380 | if (gpio_request_array(igep_gpio_leds, ARRAY_SIZE(igep_gpio_leds))) { |
395 | (gpio_direction_output(IGEP2_GPIO_LED1_RED, 0) == 0)) | 381 | pr_warning("IGEP v2: Could not obtain leds gpios\n"); |
396 | gpio_export(IGEP2_GPIO_LED1_RED, 0); | 382 | return; |
397 | else | 383 | } |
398 | pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_RED\n"); | ||
399 | 384 | ||
385 | for (i = 0; i < ARRAY_SIZE(igep_gpio_leds); i++) | ||
386 | gpio_export(igep_gpio_leds[i].gpio, 0); | ||
400 | } | 387 | } |
401 | #endif | 388 | #endif |
402 | 389 | ||
403 | static int igep2_twl_gpio_setup(struct device *dev, | 390 | static struct gpio igep2_twl_gpios[] = { |
391 | { -EINVAL, GPIOF_IN, "GPIO_EHCI_NOC" }, | ||
392 | { -EINVAL, GPIOF_OUT_INIT_LOW, "GPIO_USBH_CPEN" }, | ||
393 | }; | ||
394 | |||
395 | static int igep_twl_gpio_setup(struct device *dev, | ||
404 | unsigned gpio, unsigned ngpio) | 396 | unsigned gpio, unsigned ngpio) |
405 | { | 397 | { |
398 | int ret; | ||
399 | |||
406 | /* gpio + 0 is "mmc0_cd" (input/IRQ) */ | 400 | /* gpio + 0 is "mmc0_cd" (input/IRQ) */ |
407 | mmc[0].gpio_cd = gpio + 0; | 401 | mmc[0].gpio_cd = gpio + 0; |
408 | omap2_hsmmc_init(mmc); | 402 | omap2_hsmmc_init(mmc); |
409 | 403 | ||
410 | /* | ||
411 | * REVISIT: need ehci-omap hooks for external VBUS | ||
412 | * power switch and overcurrent detect | ||
413 | */ | ||
414 | if ((gpio_request(gpio + 1, "GPIO_EHCI_NOC") < 0) || | ||
415 | (gpio_direction_input(gpio + 1) < 0)) | ||
416 | pr_err("IGEP2: Could not obtain gpio for EHCI NOC"); | ||
417 | |||
418 | /* | ||
419 | * TWL4030_GPIO_MAX + 0 == ledA, GPIO_USBH_CPEN | ||
420 | * (out, active low) | ||
421 | */ | ||
422 | if ((gpio_request(gpio + TWL4030_GPIO_MAX, "GPIO_USBH_CPEN") < 0) || | ||
423 | (gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0) < 0)) | ||
424 | pr_err("IGEP2: Could not obtain gpio for USBH_CPEN"); | ||
425 | |||
426 | /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */ | 404 | /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */ |
427 | #if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE) | 405 | #if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE) |
428 | if ((gpio_request(gpio+TWL4030_GPIO_MAX+1, "gpio-led:green:d1") == 0) | 406 | ret = gpio_request_one(gpio + TWL4030_GPIO_MAX + 1, GPIOF_OUT_INIT_HIGH, |
429 | && (gpio_direction_output(gpio + TWL4030_GPIO_MAX + 1, 1) == 0)) | 407 | "gpio-led:green:d1"); |
408 | if (ret == 0) | ||
430 | gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0); | 409 | gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0); |
431 | else | 410 | else |
432 | pr_warning("IGEP v2: Could not obtain gpio GPIO_LED1_GREEN\n"); | 411 | pr_warning("IGEP: Could not obtain gpio GPIO_LED1_GREEN\n"); |
433 | #else | 412 | #else |
434 | igep2_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1; | 413 | igep_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1; |
435 | #endif | 414 | #endif |
436 | 415 | ||
416 | if (machine_is_igep0030()) | ||
417 | return 0; | ||
418 | |||
419 | /* | ||
420 | * REVISIT: need ehci-omap hooks for external VBUS | ||
421 | * power switch and overcurrent detect | ||
422 | */ | ||
423 | igep2_twl_gpios[0].gpio = gpio + 1; | ||
424 | |||
425 | /* TWL4030_GPIO_MAX + 0 == ledA, GPIO_USBH_CPEN (out, active low) */ | ||
426 | igep2_twl_gpios[1].gpio = gpio + TWL4030_GPIO_MAX; | ||
427 | |||
428 | ret = gpio_request_array(igep2_twl_gpios, ARRAY_SIZE(igep2_twl_gpios)); | ||
429 | if (ret < 0) | ||
430 | pr_err("IGEP2: Could not obtain gpio for USBH_CPEN"); | ||
431 | |||
437 | return 0; | 432 | return 0; |
438 | }; | 433 | }; |
439 | 434 | ||
440 | static struct twl4030_gpio_platform_data igep2_twl4030_gpio_pdata = { | 435 | static struct twl4030_gpio_platform_data igep_twl4030_gpio_pdata = { |
441 | .gpio_base = OMAP_MAX_GPIO_LINES, | 436 | .gpio_base = OMAP_MAX_GPIO_LINES, |
442 | .irq_base = TWL4030_GPIO_IRQ_BASE, | 437 | .irq_base = TWL4030_GPIO_IRQ_BASE, |
443 | .irq_end = TWL4030_GPIO_IRQ_END, | 438 | .irq_end = TWL4030_GPIO_IRQ_END, |
444 | .use_leds = true, | 439 | .use_leds = true, |
445 | .setup = igep2_twl_gpio_setup, | 440 | .setup = igep_twl_gpio_setup, |
446 | }; | 441 | }; |
447 | 442 | ||
448 | static struct twl4030_usb_data igep2_usb_data = { | 443 | static struct twl4030_usb_data igep_usb_data = { |
449 | .usb_mode = T2_USB_MODE_ULPI, | 444 | .usb_mode = T2_USB_MODE_ULPI, |
450 | }; | 445 | }; |
451 | 446 | ||
@@ -507,16 +502,17 @@ static struct regulator_init_data igep2_vpll2 = { | |||
507 | 502 | ||
508 | static void __init igep2_display_init(void) | 503 | static void __init igep2_display_init(void) |
509 | { | 504 | { |
510 | if (gpio_request(IGEP2_GPIO_DVI_PUP, "GPIO_DVI_PUP") && | 505 | int err = gpio_request_one(IGEP2_GPIO_DVI_PUP, GPIOF_OUT_INIT_HIGH, |
511 | gpio_direction_output(IGEP2_GPIO_DVI_PUP, 1)) | 506 | "GPIO_DVI_PUP"); |
507 | if (err) | ||
512 | pr_err("IGEP v2: Could not obtain gpio GPIO_DVI_PUP\n"); | 508 | pr_err("IGEP v2: Could not obtain gpio GPIO_DVI_PUP\n"); |
513 | } | 509 | } |
514 | 510 | ||
515 | static struct platform_device *igep2_devices[] __initdata = { | 511 | static struct platform_device *igep_devices[] __initdata = { |
516 | &igep2_vwlan_device, | 512 | &igep_vwlan_device, |
517 | }; | 513 | }; |
518 | 514 | ||
519 | static void __init igep2_init_early(void) | 515 | static void __init igep_init_early(void) |
520 | { | 516 | { |
521 | omap2_init_common_infrastructure(); | 517 | omap2_init_common_infrastructure(); |
522 | omap2_init_common_devices(m65kxxxxam_sdrc_params, | 518 | omap2_init_common_devices(m65kxxxxam_sdrc_params, |
@@ -561,27 +557,15 @@ static struct twl4030_keypad_data igep2_keypad_pdata = { | |||
561 | .rep = 1, | 557 | .rep = 1, |
562 | }; | 558 | }; |
563 | 559 | ||
564 | static struct twl4030_platform_data igep2_twldata = { | 560 | static struct twl4030_platform_data igep_twldata = { |
565 | .irq_base = TWL4030_IRQ_BASE, | 561 | .irq_base = TWL4030_IRQ_BASE, |
566 | .irq_end = TWL4030_IRQ_END, | 562 | .irq_end = TWL4030_IRQ_END, |
567 | 563 | ||
568 | /* platform_data for children goes here */ | 564 | /* platform_data for children goes here */ |
569 | .usb = &igep2_usb_data, | 565 | .usb = &igep_usb_data, |
570 | .codec = &igep2_codec_data, | 566 | .gpio = &igep_twl4030_gpio_pdata, |
571 | .gpio = &igep2_twl4030_gpio_pdata, | 567 | .vmmc1 = &igep_vmmc1, |
572 | .keypad = &igep2_keypad_pdata, | 568 | .vio = &igep_vio, |
573 | .vmmc1 = &igep2_vmmc1, | ||
574 | .vpll2 = &igep2_vpll2, | ||
575 | .vio = &igep2_vio, | ||
576 | }; | ||
577 | |||
578 | static struct i2c_board_info __initdata igep2_i2c1_boardinfo[] = { | ||
579 | { | ||
580 | I2C_BOARD_INFO("twl4030", 0x48), | ||
581 | .flags = I2C_CLIENT_WAKE, | ||
582 | .irq = INT_34XX_SYS_NIRQ, | ||
583 | .platform_data = &igep2_twldata, | ||
584 | }, | ||
585 | }; | 569 | }; |
586 | 570 | ||
587 | static struct i2c_board_info __initdata igep2_i2c3_boardinfo[] = { | 571 | static struct i2c_board_info __initdata igep2_i2c3_boardinfo[] = { |
@@ -590,32 +574,29 @@ static struct i2c_board_info __initdata igep2_i2c3_boardinfo[] = { | |||
590 | }, | 574 | }, |
591 | }; | 575 | }; |
592 | 576 | ||
593 | static void __init igep2_i2c_init(void) | 577 | static void __init igep_i2c_init(void) |
594 | { | 578 | { |
595 | int ret; | 579 | int ret; |
596 | 580 | ||
597 | ret = omap_register_i2c_bus(1, 2600, igep2_i2c1_boardinfo, | 581 | if (machine_is_igep0020()) { |
598 | ARRAY_SIZE(igep2_i2c1_boardinfo)); | 582 | /* |
599 | if (ret) | 583 | * Bus 3 is attached to the DVI port where devices like the |
600 | pr_warning("IGEP2: Could not register I2C1 bus (%d)\n", ret); | 584 | * pico DLP projector don't work reliably with 400kHz |
585 | */ | ||
586 | ret = omap_register_i2c_bus(3, 100, igep2_i2c3_boardinfo, | ||
587 | ARRAY_SIZE(igep2_i2c3_boardinfo)); | ||
588 | if (ret) | ||
589 | pr_warning("IGEP2: Could not register I2C3 bus (%d)\n", ret); | ||
590 | |||
591 | igep_twldata.codec = &igep2_codec_data; | ||
592 | igep_twldata.keypad = &igep2_keypad_pdata; | ||
593 | igep_twldata.vpll2 = &igep2_vpll2; | ||
594 | } | ||
601 | 595 | ||
602 | /* | 596 | omap3_pmic_init("twl4030", &igep_twldata); |
603 | * Bus 3 is attached to the DVI port where devices like the pico DLP | ||
604 | * projector don't work reliably with 400kHz | ||
605 | */ | ||
606 | ret = omap_register_i2c_bus(3, 100, igep2_i2c3_boardinfo, | ||
607 | ARRAY_SIZE(igep2_i2c3_boardinfo)); | ||
608 | if (ret) | ||
609 | pr_warning("IGEP2: Could not register I2C3 bus (%d)\n", ret); | ||
610 | } | 597 | } |
611 | 598 | ||
612 | static struct omap_musb_board_data musb_board_data = { | 599 | static const struct usbhs_omap_board_data igep2_usbhs_bdata __initconst = { |
613 | .interface_type = MUSB_INTERFACE_ULPI, | ||
614 | .mode = MUSB_OTG, | ||
615 | .power = 100, | ||
616 | }; | ||
617 | |||
618 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { | ||
619 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, | 600 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
620 | .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, | 601 | .port_mode[1] = OMAP_USBHS_PORT_MODE_UNUSED, |
621 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, | 602 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, |
@@ -626,6 +607,17 @@ static const struct usbhs_omap_board_data usbhs_bdata __initconst = { | |||
626 | .reset_gpio_port[2] = -EINVAL, | 607 | .reset_gpio_port[2] = -EINVAL, |
627 | }; | 608 | }; |
628 | 609 | ||
610 | static const struct usbhs_omap_board_data igep3_usbhs_bdata __initconst = { | ||
611 | .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, | ||
612 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, | ||
613 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, | ||
614 | |||
615 | .phy_reset = true, | ||
616 | .reset_gpio_port[0] = -EINVAL, | ||
617 | .reset_gpio_port[1] = IGEP3_GPIO_USBH_NRESET, | ||
618 | .reset_gpio_port[2] = -EINVAL, | ||
619 | }; | ||
620 | |||
629 | #ifdef CONFIG_OMAP_MUX | 621 | #ifdef CONFIG_OMAP_MUX |
630 | static struct omap_board_mux board_mux[] __initdata = { | 622 | static struct omap_board_mux board_mux[] __initdata = { |
631 | { .reg_offset = OMAP_MUX_TERMINATOR }, | 623 | { .reg_offset = OMAP_MUX_TERMINATOR }, |
@@ -633,82 +625,95 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
633 | #endif | 625 | #endif |
634 | 626 | ||
635 | #if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE) | 627 | #if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE) |
628 | static struct gpio igep_wlan_bt_gpios[] __initdata = { | ||
629 | { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_WIFI_NPD" }, | ||
630 | { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_WIFI_NRESET" }, | ||
631 | { -EINVAL, GPIOF_OUT_INIT_HIGH, "GPIO_BT_NRESET" }, | ||
632 | }; | ||
636 | 633 | ||
637 | static void __init igep2_wlan_bt_init(void) | 634 | static void __init igep_wlan_bt_init(void) |
638 | { | 635 | { |
639 | unsigned npd, wreset, btreset; | 636 | int err; |
640 | 637 | ||
641 | /* GPIO's for WLAN-BT combo depends on hardware revision */ | 638 | /* GPIO's for WLAN-BT combo depends on hardware revision */ |
642 | if (hwrev == IGEP2_BOARD_HWREV_B) { | 639 | if (hwrev == IGEP2_BOARD_HWREV_B) { |
643 | npd = IGEP2_RB_GPIO_WIFI_NPD; | 640 | igep_wlan_bt_gpios[0].gpio = IGEP2_RB_GPIO_WIFI_NPD; |
644 | wreset = IGEP2_RB_GPIO_WIFI_NRESET; | 641 | igep_wlan_bt_gpios[1].gpio = IGEP2_RB_GPIO_WIFI_NRESET; |
645 | btreset = IGEP2_RB_GPIO_BT_NRESET; | 642 | igep_wlan_bt_gpios[2].gpio = IGEP2_RB_GPIO_BT_NRESET; |
646 | } else if (hwrev == IGEP2_BOARD_HWREV_C) { | 643 | } else if (hwrev == IGEP2_BOARD_HWREV_C || machine_is_igep0030()) { |
647 | npd = IGEP2_RC_GPIO_WIFI_NPD; | 644 | igep_wlan_bt_gpios[0].gpio = IGEP2_RC_GPIO_WIFI_NPD; |
648 | wreset = IGEP2_RC_GPIO_WIFI_NRESET; | 645 | igep_wlan_bt_gpios[1].gpio = IGEP2_RC_GPIO_WIFI_NRESET; |
649 | btreset = IGEP2_RC_GPIO_BT_NRESET; | 646 | igep_wlan_bt_gpios[2].gpio = IGEP2_RC_GPIO_BT_NRESET; |
650 | } else | 647 | } else |
651 | return; | 648 | return; |
652 | 649 | ||
653 | /* Set GPIO's for WLAN-BT combo module */ | 650 | err = gpio_request_array(igep_wlan_bt_gpios, |
654 | if ((gpio_request(npd, "GPIO_WIFI_NPD") == 0) && | 651 | ARRAY_SIZE(igep_wlan_bt_gpios)); |
655 | (gpio_direction_output(npd, 1) == 0)) { | 652 | if (err) { |
656 | gpio_export(npd, 0); | 653 | pr_warning("IGEP2: Could not obtain WIFI/BT gpios\n"); |
657 | } else | 654 | return; |
658 | pr_warning("IGEP2: Could not obtain gpio GPIO_WIFI_NPD\n"); | 655 | } |
659 | 656 | ||
660 | if ((gpio_request(wreset, "GPIO_WIFI_NRESET") == 0) && | 657 | gpio_export(igep_wlan_bt_gpios[0].gpio, 0); |
661 | (gpio_direction_output(wreset, 1) == 0)) { | 658 | gpio_export(igep_wlan_bt_gpios[1].gpio, 0); |
662 | gpio_export(wreset, 0); | 659 | gpio_export(igep_wlan_bt_gpios[2].gpio, 0); |
663 | gpio_set_value(wreset, 0); | 660 | |
664 | udelay(10); | 661 | gpio_set_value(igep_wlan_bt_gpios[1].gpio, 0); |
665 | gpio_set_value(wreset, 1); | 662 | udelay(10); |
666 | } else | 663 | gpio_set_value(igep_wlan_bt_gpios[1].gpio, 1); |
667 | pr_warning("IGEP2: Could not obtain gpio GPIO_WIFI_NRESET\n"); | ||
668 | 664 | ||
669 | if ((gpio_request(btreset, "GPIO_BT_NRESET") == 0) && | ||
670 | (gpio_direction_output(btreset, 1) == 0)) { | ||
671 | gpio_export(btreset, 0); | ||
672 | } else | ||
673 | pr_warning("IGEP2: Could not obtain gpio GPIO_BT_NRESET\n"); | ||
674 | } | 665 | } |
675 | #else | 666 | #else |
676 | static inline void __init igep2_wlan_bt_init(void) { } | 667 | static inline void __init igep_wlan_bt_init(void) { } |
677 | #endif | 668 | #endif |
678 | 669 | ||
679 | static void __init igep2_init(void) | 670 | static void __init igep_init(void) |
680 | { | 671 | { |
681 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); | 672 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); |
682 | 673 | ||
683 | /* Get IGEP2 hardware revision */ | 674 | /* Get IGEP2 hardware revision */ |
684 | igep2_get_revision(); | 675 | igep2_get_revision(); |
685 | /* Register I2C busses and drivers */ | 676 | /* Register I2C busses and drivers */ |
686 | igep2_i2c_init(); | 677 | igep_i2c_init(); |
687 | platform_add_devices(igep2_devices, ARRAY_SIZE(igep2_devices)); | 678 | platform_add_devices(igep_devices, ARRAY_SIZE(igep_devices)); |
688 | omap_display_init(&igep2_dss_data); | ||
689 | omap_serial_init(); | 679 | omap_serial_init(); |
690 | usb_musb_init(&musb_board_data); | 680 | usb_musb_init(NULL); |
691 | usbhs_init(&usbhs_bdata); | ||
692 | 681 | ||
693 | igep2_flash_init(); | 682 | igep_flash_init(); |
694 | igep2_leds_init(); | 683 | igep_leds_init(); |
695 | igep2_display_init(); | ||
696 | igep2_init_smsc911x(); | ||
697 | 684 | ||
698 | /* | 685 | /* |
699 | * WLAN-BT combo module from MuRata which has a Marvell WLAN | 686 | * WLAN-BT combo module from MuRata which has a Marvell WLAN |
700 | * (88W8686) + CSR Bluetooth chipset. Uses SDIO interface. | 687 | * (88W8686) + CSR Bluetooth chipset. Uses SDIO interface. |
701 | */ | 688 | */ |
702 | igep2_wlan_bt_init(); | 689 | igep_wlan_bt_init(); |
703 | 690 | ||
691 | if (machine_is_igep0020()) { | ||
692 | omap_display_init(&igep2_dss_data); | ||
693 | igep2_display_init(); | ||
694 | igep2_init_smsc911x(); | ||
695 | usbhs_init(&igep2_usbhs_bdata); | ||
696 | } else { | ||
697 | usbhs_init(&igep3_usbhs_bdata); | ||
698 | } | ||
704 | } | 699 | } |
705 | 700 | ||
706 | MACHINE_START(IGEP0020, "IGEP v2 board") | 701 | MACHINE_START(IGEP0020, "IGEP v2 board") |
707 | .boot_params = 0x80000100, | 702 | .boot_params = 0x80000100, |
708 | .reserve = omap_reserve, | 703 | .reserve = omap_reserve, |
709 | .map_io = omap3_map_io, | 704 | .map_io = omap3_map_io, |
710 | .init_early = igep2_init_early, | 705 | .init_early = igep_init_early, |
706 | .init_irq = omap_init_irq, | ||
707 | .init_machine = igep_init, | ||
708 | .timer = &omap_timer, | ||
709 | MACHINE_END | ||
710 | |||
711 | MACHINE_START(IGEP0030, "IGEP OMAP3 module") | ||
712 | .boot_params = 0x80000100, | ||
713 | .reserve = omap_reserve, | ||
714 | .map_io = omap3_map_io, | ||
715 | .init_early = igep_init_early, | ||
711 | .init_irq = omap_init_irq, | 716 | .init_irq = omap_init_irq, |
712 | .init_machine = igep2_init, | 717 | .init_machine = igep_init, |
713 | .timer = &omap_timer, | 718 | .timer = &omap_timer, |
714 | MACHINE_END | 719 | MACHINE_END |
diff --git a/arch/arm/mach-omap2/board-igep0030.c b/arch/arm/mach-omap2/board-igep0030.c deleted file mode 100644 index 2cf86c3cb1a3..000000000000 --- a/arch/arm/mach-omap2/board-igep0030.c +++ /dev/null | |||
@@ -1,458 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2010 - ISEE 2007 SL | ||
3 | * | ||
4 | * Modified from mach-omap2/board-generic.c | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License version 2 as | ||
8 | * published by the Free Software Foundation. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/platform_device.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/err.h> | ||
16 | #include <linux/clk.h> | ||
17 | #include <linux/io.h> | ||
18 | #include <linux/gpio.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | |||
21 | #include <linux/regulator/machine.h> | ||
22 | #include <linux/regulator/fixed.h> | ||
23 | #include <linux/i2c/twl.h> | ||
24 | #include <linux/mmc/host.h> | ||
25 | |||
26 | #include <asm/mach-types.h> | ||
27 | #include <asm/mach/arch.h> | ||
28 | |||
29 | #include <plat/board.h> | ||
30 | #include <plat/common.h> | ||
31 | #include <plat/gpmc.h> | ||
32 | #include <plat/usb.h> | ||
33 | #include <plat/onenand.h> | ||
34 | |||
35 | #include "mux.h" | ||
36 | #include "hsmmc.h" | ||
37 | #include "sdram-numonyx-m65kxxxxam.h" | ||
38 | |||
39 | #define IGEP3_GPIO_LED0_GREEN 54 | ||
40 | #define IGEP3_GPIO_LED0_RED 53 | ||
41 | #define IGEP3_GPIO_LED1_RED 16 | ||
42 | |||
43 | #define IGEP3_GPIO_WIFI_NPD 138 | ||
44 | #define IGEP3_GPIO_WIFI_NRESET 139 | ||
45 | #define IGEP3_GPIO_BT_NRESET 137 | ||
46 | |||
47 | #define IGEP3_GPIO_USBH_NRESET 183 | ||
48 | |||
49 | |||
50 | #if defined(CONFIG_MTD_ONENAND_OMAP2) || \ | ||
51 | defined(CONFIG_MTD_ONENAND_OMAP2_MODULE) | ||
52 | |||
53 | #define ONENAND_MAP 0x20000000 | ||
54 | |||
55 | /* | ||
56 | * x2 Flash built-in COMBO POP MEMORY | ||
57 | * Since the device is equipped with two DataRAMs, and two-plane NAND | ||
58 | * Flash memory array, these two component enables simultaneous program | ||
59 | * of 4KiB. Plane1 has only even blocks such as block0, block2, block4 | ||
60 | * while Plane2 has only odd blocks such as block1, block3, block5. | ||
61 | * So MTD regards it as 4KiB page size and 256KiB block size 64*(2*2048) | ||
62 | */ | ||
63 | |||
64 | static struct mtd_partition igep3_onenand_partitions[] = { | ||
65 | { | ||
66 | .name = "X-Loader", | ||
67 | .offset = 0, | ||
68 | .size = 2 * (64*(2*2048)) | ||
69 | }, | ||
70 | { | ||
71 | .name = "U-Boot", | ||
72 | .offset = MTDPART_OFS_APPEND, | ||
73 | .size = 6 * (64*(2*2048)), | ||
74 | }, | ||
75 | { | ||
76 | .name = "Environment", | ||
77 | .offset = MTDPART_OFS_APPEND, | ||
78 | .size = 2 * (64*(2*2048)), | ||
79 | }, | ||
80 | { | ||
81 | .name = "Kernel", | ||
82 | .offset = MTDPART_OFS_APPEND, | ||
83 | .size = 12 * (64*(2*2048)), | ||
84 | }, | ||
85 | { | ||
86 | .name = "File System", | ||
87 | .offset = MTDPART_OFS_APPEND, | ||
88 | .size = MTDPART_SIZ_FULL, | ||
89 | }, | ||
90 | }; | ||
91 | |||
92 | static struct omap_onenand_platform_data igep3_onenand_pdata = { | ||
93 | .parts = igep3_onenand_partitions, | ||
94 | .nr_parts = ARRAY_SIZE(igep3_onenand_partitions), | ||
95 | .onenand_setup = NULL, | ||
96 | .dma_channel = -1, /* disable DMA in OMAP OneNAND driver */ | ||
97 | }; | ||
98 | |||
99 | static struct platform_device igep3_onenand_device = { | ||
100 | .name = "omap2-onenand", | ||
101 | .id = -1, | ||
102 | .dev = { | ||
103 | .platform_data = &igep3_onenand_pdata, | ||
104 | }, | ||
105 | }; | ||
106 | |||
107 | static void __init igep3_flash_init(void) | ||
108 | { | ||
109 | u8 cs = 0; | ||
110 | u8 onenandcs = GPMC_CS_NUM + 1; | ||
111 | |||
112 | for (cs = 0; cs < GPMC_CS_NUM; cs++) { | ||
113 | u32 ret; | ||
114 | ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); | ||
115 | |||
116 | /* Check if NAND/oneNAND is configured */ | ||
117 | if ((ret & 0xC00) == 0x800) | ||
118 | /* NAND found */ | ||
119 | pr_err("IGEP3: Unsupported NAND found\n"); | ||
120 | else { | ||
121 | ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG7); | ||
122 | |||
123 | if ((ret & 0x3F) == (ONENAND_MAP >> 24)) | ||
124 | /* OneNAND found */ | ||
125 | onenandcs = cs; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | if (onenandcs > GPMC_CS_NUM) { | ||
130 | pr_err("IGEP3: Unable to find configuration in GPMC\n"); | ||
131 | return; | ||
132 | } | ||
133 | |||
134 | igep3_onenand_pdata.cs = onenandcs; | ||
135 | |||
136 | if (platform_device_register(&igep3_onenand_device) < 0) | ||
137 | pr_err("IGEP3: Unable to register OneNAND device\n"); | ||
138 | } | ||
139 | |||
140 | #else | ||
141 | static void __init igep3_flash_init(void) {} | ||
142 | #endif | ||
143 | |||
144 | static struct regulator_consumer_supply igep3_vmmc1_supply = | ||
145 | REGULATOR_SUPPLY("vmmc", "omap_hsmmc.0"); | ||
146 | |||
147 | /* VMMC1 for OMAP VDD_MMC1 (i/o) and MMC1 card */ | ||
148 | static struct regulator_init_data igep3_vmmc1 = { | ||
149 | .constraints = { | ||
150 | .min_uV = 1850000, | ||
151 | .max_uV = 3150000, | ||
152 | .valid_modes_mask = REGULATOR_MODE_NORMAL | ||
153 | | REGULATOR_MODE_STANDBY, | ||
154 | .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | ||
155 | | REGULATOR_CHANGE_MODE | ||
156 | | REGULATOR_CHANGE_STATUS, | ||
157 | }, | ||
158 | .num_consumer_supplies = 1, | ||
159 | .consumer_supplies = &igep3_vmmc1_supply, | ||
160 | }; | ||
161 | |||
162 | static struct regulator_consumer_supply igep3_vio_supply = | ||
163 | REGULATOR_SUPPLY("vmmc_aux", "omap_hsmmc.1"); | ||
164 | |||
165 | static struct regulator_init_data igep3_vio = { | ||
166 | .constraints = { | ||
167 | .min_uV = 1800000, | ||
168 | .max_uV = 1800000, | ||
169 | .apply_uV = 1, | ||
170 | .valid_modes_mask = REGULATOR_MODE_NORMAL | ||
171 | | REGULATOR_MODE_STANDBY, | ||
172 | .valid_ops_mask = REGULATOR_CHANGE_VOLTAGE | ||
173 | | REGULATOR_CHANGE_MODE | ||
174 | | REGULATOR_CHANGE_STATUS, | ||
175 | }, | ||
176 | .num_consumer_supplies = 1, | ||
177 | .consumer_supplies = &igep3_vio_supply, | ||
178 | }; | ||
179 | |||
180 | static struct regulator_consumer_supply igep3_vmmc2_supply = | ||
181 | REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"); | ||
182 | |||
183 | static struct regulator_init_data igep3_vmmc2 = { | ||
184 | .constraints = { | ||
185 | .valid_modes_mask = REGULATOR_MODE_NORMAL, | ||
186 | .always_on = 1, | ||
187 | }, | ||
188 | .num_consumer_supplies = 1, | ||
189 | .consumer_supplies = &igep3_vmmc2_supply, | ||
190 | }; | ||
191 | |||
192 | static struct fixed_voltage_config igep3_vwlan = { | ||
193 | .supply_name = "vwlan", | ||
194 | .microvolts = 3300000, | ||
195 | .gpio = -EINVAL, | ||
196 | .enabled_at_boot = 1, | ||
197 | .init_data = &igep3_vmmc2, | ||
198 | }; | ||
199 | |||
200 | static struct platform_device igep3_vwlan_device = { | ||
201 | .name = "reg-fixed-voltage", | ||
202 | .id = 0, | ||
203 | .dev = { | ||
204 | .platform_data = &igep3_vwlan, | ||
205 | }, | ||
206 | }; | ||
207 | |||
208 | static struct omap2_hsmmc_info mmc[] = { | ||
209 | [0] = { | ||
210 | .mmc = 1, | ||
211 | .caps = MMC_CAP_4_BIT_DATA, | ||
212 | .gpio_cd = -EINVAL, | ||
213 | .gpio_wp = -EINVAL, | ||
214 | }, | ||
215 | #if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE) | ||
216 | [1] = { | ||
217 | .mmc = 2, | ||
218 | .caps = MMC_CAP_4_BIT_DATA, | ||
219 | .gpio_cd = -EINVAL, | ||
220 | .gpio_wp = -EINVAL, | ||
221 | }, | ||
222 | #endif | ||
223 | {} /* Terminator */ | ||
224 | }; | ||
225 | |||
226 | #if defined(CONFIG_LEDS_GPIO) || defined(CONFIG_LEDS_GPIO_MODULE) | ||
227 | #include <linux/leds.h> | ||
228 | |||
229 | static struct gpio_led igep3_gpio_leds[] = { | ||
230 | [0] = { | ||
231 | .name = "gpio-led:red:d0", | ||
232 | .gpio = IGEP3_GPIO_LED0_RED, | ||
233 | .default_trigger = "default-off" | ||
234 | }, | ||
235 | [1] = { | ||
236 | .name = "gpio-led:green:d0", | ||
237 | .gpio = IGEP3_GPIO_LED0_GREEN, | ||
238 | .default_trigger = "default-off", | ||
239 | }, | ||
240 | [2] = { | ||
241 | .name = "gpio-led:red:d1", | ||
242 | .gpio = IGEP3_GPIO_LED1_RED, | ||
243 | .default_trigger = "default-off", | ||
244 | }, | ||
245 | [3] = { | ||
246 | .name = "gpio-led:green:d1", | ||
247 | .default_trigger = "heartbeat", | ||
248 | .gpio = -EINVAL, /* gets replaced */ | ||
249 | }, | ||
250 | }; | ||
251 | |||
252 | static struct gpio_led_platform_data igep3_led_pdata = { | ||
253 | .leds = igep3_gpio_leds, | ||
254 | .num_leds = ARRAY_SIZE(igep3_gpio_leds), | ||
255 | }; | ||
256 | |||
257 | static struct platform_device igep3_led_device = { | ||
258 | .name = "leds-gpio", | ||
259 | .id = -1, | ||
260 | .dev = { | ||
261 | .platform_data = &igep3_led_pdata, | ||
262 | }, | ||
263 | }; | ||
264 | |||
265 | static void __init igep3_leds_init(void) | ||
266 | { | ||
267 | platform_device_register(&igep3_led_device); | ||
268 | } | ||
269 | |||
270 | #else | ||
271 | static inline void igep3_leds_init(void) | ||
272 | { | ||
273 | if ((gpio_request(IGEP3_GPIO_LED0_RED, "gpio-led:red:d0") == 0) && | ||
274 | (gpio_direction_output(IGEP3_GPIO_LED0_RED, 1) == 0)) { | ||
275 | gpio_export(IGEP3_GPIO_LED0_RED, 0); | ||
276 | gpio_set_value(IGEP3_GPIO_LED0_RED, 1); | ||
277 | } else | ||
278 | pr_warning("IGEP3: Could not obtain gpio GPIO_LED0_RED\n"); | ||
279 | |||
280 | if ((gpio_request(IGEP3_GPIO_LED0_GREEN, "gpio-led:green:d0") == 0) && | ||
281 | (gpio_direction_output(IGEP3_GPIO_LED0_GREEN, 1) == 0)) { | ||
282 | gpio_export(IGEP3_GPIO_LED0_GREEN, 0); | ||
283 | gpio_set_value(IGEP3_GPIO_LED0_GREEN, 1); | ||
284 | } else | ||
285 | pr_warning("IGEP3: Could not obtain gpio GPIO_LED0_GREEN\n"); | ||
286 | |||
287 | if ((gpio_request(IGEP3_GPIO_LED1_RED, "gpio-led:red:d1") == 0) && | ||
288 | (gpio_direction_output(IGEP3_GPIO_LED1_RED, 1) == 0)) { | ||
289 | gpio_export(IGEP3_GPIO_LED1_RED, 0); | ||
290 | gpio_set_value(IGEP3_GPIO_LED1_RED, 1); | ||
291 | } else | ||
292 | pr_warning("IGEP3: Could not obtain gpio GPIO_LED1_RED\n"); | ||
293 | } | ||
294 | #endif | ||
295 | |||
296 | static int igep3_twl4030_gpio_setup(struct device *dev, | ||
297 | unsigned gpio, unsigned ngpio) | ||
298 | { | ||
299 | /* gpio + 0 is "mmc0_cd" (input/IRQ) */ | ||
300 | mmc[0].gpio_cd = gpio + 0; | ||
301 | omap2_hsmmc_init(mmc); | ||
302 | |||
303 | /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */ | ||
304 | #if !defined(CONFIG_LEDS_GPIO) && !defined(CONFIG_LEDS_GPIO_MODULE) | ||
305 | if ((gpio_request(gpio+TWL4030_GPIO_MAX+1, "gpio-led:green:d1") == 0) | ||
306 | && (gpio_direction_output(gpio + TWL4030_GPIO_MAX + 1, 1) == 0)) { | ||
307 | gpio_export(gpio + TWL4030_GPIO_MAX + 1, 0); | ||
308 | gpio_set_value(gpio + TWL4030_GPIO_MAX + 1, 0); | ||
309 | } else | ||
310 | pr_warning("IGEP3: Could not obtain gpio GPIO_LED1_GREEN\n"); | ||
311 | #else | ||
312 | igep3_gpio_leds[3].gpio = gpio + TWL4030_GPIO_MAX + 1; | ||
313 | #endif | ||
314 | |||
315 | return 0; | ||
316 | }; | ||
317 | |||
318 | static struct twl4030_gpio_platform_data igep3_twl4030_gpio_pdata = { | ||
319 | .gpio_base = OMAP_MAX_GPIO_LINES, | ||
320 | .irq_base = TWL4030_GPIO_IRQ_BASE, | ||
321 | .irq_end = TWL4030_GPIO_IRQ_END, | ||
322 | .use_leds = true, | ||
323 | .setup = igep3_twl4030_gpio_setup, | ||
324 | }; | ||
325 | |||
326 | static struct twl4030_usb_data igep3_twl4030_usb_data = { | ||
327 | .usb_mode = T2_USB_MODE_ULPI, | ||
328 | }; | ||
329 | |||
330 | static struct platform_device *igep3_devices[] __initdata = { | ||
331 | &igep3_vwlan_device, | ||
332 | }; | ||
333 | |||
334 | static void __init igep3_init_early(void) | ||
335 | { | ||
336 | omap2_init_common_infrastructure(); | ||
337 | omap2_init_common_devices(m65kxxxxam_sdrc_params, | ||
338 | m65kxxxxam_sdrc_params); | ||
339 | } | ||
340 | |||
341 | static struct twl4030_platform_data igep3_twl4030_pdata = { | ||
342 | .irq_base = TWL4030_IRQ_BASE, | ||
343 | .irq_end = TWL4030_IRQ_END, | ||
344 | |||
345 | /* platform_data for children goes here */ | ||
346 | .usb = &igep3_twl4030_usb_data, | ||
347 | .gpio = &igep3_twl4030_gpio_pdata, | ||
348 | .vmmc1 = &igep3_vmmc1, | ||
349 | .vio = &igep3_vio, | ||
350 | }; | ||
351 | |||
352 | static struct i2c_board_info __initdata igep3_i2c_boardinfo[] = { | ||
353 | { | ||
354 | I2C_BOARD_INFO("twl4030", 0x48), | ||
355 | .flags = I2C_CLIENT_WAKE, | ||
356 | .irq = INT_34XX_SYS_NIRQ, | ||
357 | .platform_data = &igep3_twl4030_pdata, | ||
358 | }, | ||
359 | }; | ||
360 | |||
361 | static int __init igep3_i2c_init(void) | ||
362 | { | ||
363 | omap_register_i2c_bus(1, 2600, igep3_i2c_boardinfo, | ||
364 | ARRAY_SIZE(igep3_i2c_boardinfo)); | ||
365 | |||
366 | return 0; | ||
367 | } | ||
368 | |||
369 | static struct omap_musb_board_data musb_board_data = { | ||
370 | .interface_type = MUSB_INTERFACE_ULPI, | ||
371 | .mode = MUSB_OTG, | ||
372 | .power = 100, | ||
373 | }; | ||
374 | |||
375 | #if defined(CONFIG_LIBERTAS_SDIO) || defined(CONFIG_LIBERTAS_SDIO_MODULE) | ||
376 | |||
377 | static void __init igep3_wifi_bt_init(void) | ||
378 | { | ||
379 | /* Configure MUX values for W-LAN + Bluetooth GPIO's */ | ||
380 | omap_mux_init_gpio(IGEP3_GPIO_WIFI_NPD, OMAP_PIN_OUTPUT); | ||
381 | omap_mux_init_gpio(IGEP3_GPIO_WIFI_NRESET, OMAP_PIN_OUTPUT); | ||
382 | omap_mux_init_gpio(IGEP3_GPIO_BT_NRESET, OMAP_PIN_OUTPUT); | ||
383 | |||
384 | /* Set GPIO's for W-LAN + Bluetooth combo module */ | ||
385 | if ((gpio_request(IGEP3_GPIO_WIFI_NPD, "GPIO_WIFI_NPD") == 0) && | ||
386 | (gpio_direction_output(IGEP3_GPIO_WIFI_NPD, 1) == 0)) { | ||
387 | gpio_export(IGEP3_GPIO_WIFI_NPD, 0); | ||
388 | } else | ||
389 | pr_warning("IGEP3: Could not obtain gpio GPIO_WIFI_NPD\n"); | ||
390 | |||
391 | if ((gpio_request(IGEP3_GPIO_WIFI_NRESET, "GPIO_WIFI_NRESET") == 0) && | ||
392 | (gpio_direction_output(IGEP3_GPIO_WIFI_NRESET, 1) == 0)) { | ||
393 | gpio_export(IGEP3_GPIO_WIFI_NRESET, 0); | ||
394 | gpio_set_value(IGEP3_GPIO_WIFI_NRESET, 0); | ||
395 | udelay(10); | ||
396 | gpio_set_value(IGEP3_GPIO_WIFI_NRESET, 1); | ||
397 | } else | ||
398 | pr_warning("IGEP3: Could not obtain gpio GPIO_WIFI_NRESET\n"); | ||
399 | |||
400 | if ((gpio_request(IGEP3_GPIO_BT_NRESET, "GPIO_BT_NRESET") == 0) && | ||
401 | (gpio_direction_output(IGEP3_GPIO_BT_NRESET, 1) == 0)) { | ||
402 | gpio_export(IGEP3_GPIO_BT_NRESET, 0); | ||
403 | } else | ||
404 | pr_warning("IGEP3: Could not obtain gpio GPIO_BT_NRESET\n"); | ||
405 | } | ||
406 | #else | ||
407 | void __init igep3_wifi_bt_init(void) {} | ||
408 | #endif | ||
409 | |||
410 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { | ||
411 | .port_mode[0] = OMAP_USBHS_PORT_MODE_UNUSED, | ||
412 | .port_mode[1] = OMAP_EHCI_PORT_MODE_PHY, | ||
413 | .port_mode[2] = OMAP_USBHS_PORT_MODE_UNUSED, | ||
414 | |||
415 | .phy_reset = true, | ||
416 | .reset_gpio_port[0] = -EINVAL, | ||
417 | .reset_gpio_port[1] = IGEP3_GPIO_USBH_NRESET, | ||
418 | .reset_gpio_port[2] = -EINVAL, | ||
419 | }; | ||
420 | |||
421 | #ifdef CONFIG_OMAP_MUX | ||
422 | static struct omap_board_mux board_mux[] __initdata = { | ||
423 | OMAP3_MUX(I2C2_SDA, OMAP_MUX_MODE4 | OMAP_PIN_OUTPUT), | ||
424 | { .reg_offset = OMAP_MUX_TERMINATOR }, | ||
425 | }; | ||
426 | #endif | ||
427 | |||
428 | static void __init igep3_init(void) | ||
429 | { | ||
430 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); | ||
431 | |||
432 | /* Register I2C busses and drivers */ | ||
433 | igep3_i2c_init(); | ||
434 | platform_add_devices(igep3_devices, ARRAY_SIZE(igep3_devices)); | ||
435 | omap_serial_init(); | ||
436 | usb_musb_init(&musb_board_data); | ||
437 | usbhs_init(&usbhs_bdata); | ||
438 | |||
439 | igep3_flash_init(); | ||
440 | igep3_leds_init(); | ||
441 | |||
442 | /* | ||
443 | * WLAN-BT combo module from MuRata which has a Marvell WLAN | ||
444 | * (88W8686) + CSR Bluetooth chipset. Uses SDIO interface. | ||
445 | */ | ||
446 | igep3_wifi_bt_init(); | ||
447 | |||
448 | } | ||
449 | |||
450 | MACHINE_START(IGEP0030, "IGEP OMAP3 module") | ||
451 | .boot_params = 0x80000100, | ||
452 | .reserve = omap_reserve, | ||
453 | .map_io = omap3_map_io, | ||
454 | .init_early = igep3_init_early, | ||
455 | .init_irq = omap_init_irq, | ||
456 | .init_machine = igep3_init, | ||
457 | .timer = &omap_timer, | ||
458 | MACHINE_END | ||
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c index e2ba77957a8c..f7d6038075f0 100644 --- a/arch/arm/mach-omap2/board-ldp.c +++ b/arch/arm/mach-omap2/board-ldp.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/err.h> | 22 | #include <linux/err.h> |
23 | #include <linux/clk.h> | 23 | #include <linux/clk.h> |
24 | #include <linux/spi/spi.h> | 24 | #include <linux/spi/spi.h> |
25 | #include <linux/spi/ads7846.h> | ||
26 | #include <linux/regulator/machine.h> | 25 | #include <linux/regulator/machine.h> |
27 | #include <linux/i2c/twl.h> | 26 | #include <linux/i2c/twl.h> |
28 | #include <linux/io.h> | 27 | #include <linux/io.h> |
@@ -43,47 +42,19 @@ | |||
43 | 42 | ||
44 | #include <asm/delay.h> | 43 | #include <asm/delay.h> |
45 | #include <plat/usb.h> | 44 | #include <plat/usb.h> |
45 | #include <plat/gpmc-smsc911x.h> | ||
46 | 46 | ||
47 | #include "board-flash.h" | 47 | #include "board-flash.h" |
48 | #include "mux.h" | 48 | #include "mux.h" |
49 | #include "hsmmc.h" | 49 | #include "hsmmc.h" |
50 | #include "control.h" | 50 | #include "control.h" |
51 | #include "common-board-devices.h" | ||
51 | 52 | ||
52 | #define LDP_SMSC911X_CS 1 | 53 | #define LDP_SMSC911X_CS 1 |
53 | #define LDP_SMSC911X_GPIO 152 | 54 | #define LDP_SMSC911X_GPIO 152 |
54 | #define DEBUG_BASE 0x08000000 | 55 | #define DEBUG_BASE 0x08000000 |
55 | #define LDP_ETHR_START DEBUG_BASE | 56 | #define LDP_ETHR_START DEBUG_BASE |
56 | 57 | ||
57 | static struct resource ldp_smsc911x_resources[] = { | ||
58 | [0] = { | ||
59 | .start = LDP_ETHR_START, | ||
60 | .end = LDP_ETHR_START + SZ_4K, | ||
61 | .flags = IORESOURCE_MEM, | ||
62 | }, | ||
63 | [1] = { | ||
64 | .start = 0, | ||
65 | .end = 0, | ||
66 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, | ||
67 | }, | ||
68 | }; | ||
69 | |||
70 | static struct smsc911x_platform_config ldp_smsc911x_config = { | ||
71 | .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, | ||
72 | .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, | ||
73 | .flags = SMSC911X_USE_32BIT, | ||
74 | .phy_interface = PHY_INTERFACE_MODE_MII, | ||
75 | }; | ||
76 | |||
77 | static struct platform_device ldp_smsc911x_device = { | ||
78 | .name = "smsc911x", | ||
79 | .id = -1, | ||
80 | .num_resources = ARRAY_SIZE(ldp_smsc911x_resources), | ||
81 | .resource = ldp_smsc911x_resources, | ||
82 | .dev = { | ||
83 | .platform_data = &ldp_smsc911x_config, | ||
84 | }, | ||
85 | }; | ||
86 | |||
87 | static uint32_t board_keymap[] = { | 58 | static uint32_t board_keymap[] = { |
88 | KEY(0, 0, KEY_1), | 59 | KEY(0, 0, KEY_1), |
89 | KEY(1, 0, KEY_2), | 60 | KEY(1, 0, KEY_2), |
@@ -197,82 +168,16 @@ static struct platform_device ldp_gpio_keys_device = { | |||
197 | }, | 168 | }, |
198 | }; | 169 | }; |
199 | 170 | ||
200 | static int ts_gpio; | 171 | static struct omap_smsc911x_platform_data smsc911x_cfg = { |
201 | 172 | .cs = LDP_SMSC911X_CS, | |
202 | /** | 173 | .gpio_irq = LDP_SMSC911X_GPIO, |
203 | * @brief ads7846_dev_init : Requests & sets GPIO line for pen-irq | 174 | .gpio_reset = -EINVAL, |
204 | * | 175 | .flags = SMSC911X_USE_32BIT, |
205 | * @return - void. If request gpio fails then Flag KERN_ERR. | ||
206 | */ | ||
207 | static void ads7846_dev_init(void) | ||
208 | { | ||
209 | if (gpio_request(ts_gpio, "ads7846 irq") < 0) { | ||
210 | printk(KERN_ERR "can't get ads746 pen down GPIO\n"); | ||
211 | return; | ||
212 | } | ||
213 | |||
214 | gpio_direction_input(ts_gpio); | ||
215 | gpio_set_debounce(ts_gpio, 310); | ||
216 | } | ||
217 | |||
218 | static int ads7846_get_pendown_state(void) | ||
219 | { | ||
220 | return !gpio_get_value(ts_gpio); | ||
221 | } | ||
222 | |||
223 | static struct ads7846_platform_data tsc2046_config __initdata = { | ||
224 | .get_pendown_state = ads7846_get_pendown_state, | ||
225 | .keep_vref_on = 1, | ||
226 | }; | ||
227 | |||
228 | static struct omap2_mcspi_device_config tsc2046_mcspi_config = { | ||
229 | .turbo_mode = 0, | ||
230 | .single_channel = 1, /* 0: slave, 1: master */ | ||
231 | }; | ||
232 | |||
233 | static struct spi_board_info ldp_spi_board_info[] __initdata = { | ||
234 | [0] = { | ||
235 | /* | ||
236 | * TSC2046 operates at a max freqency of 2MHz, so | ||
237 | * operate slightly below at 1.5MHz | ||
238 | */ | ||
239 | .modalias = "ads7846", | ||
240 | .bus_num = 1, | ||
241 | .chip_select = 0, | ||
242 | .max_speed_hz = 1500000, | ||
243 | .controller_data = &tsc2046_mcspi_config, | ||
244 | .irq = 0, | ||
245 | .platform_data = &tsc2046_config, | ||
246 | }, | ||
247 | }; | 176 | }; |
248 | 177 | ||
249 | static inline void __init ldp_init_smsc911x(void) | 178 | static inline void __init ldp_init_smsc911x(void) |
250 | { | 179 | { |
251 | int eth_cs; | 180 | gpmc_smsc911x_init(&smsc911x_cfg); |
252 | unsigned long cs_mem_base; | ||
253 | int eth_gpio = 0; | ||
254 | |||
255 | eth_cs = LDP_SMSC911X_CS; | ||
256 | |||
257 | if (gpmc_cs_request(eth_cs, SZ_16M, &cs_mem_base) < 0) { | ||
258 | printk(KERN_ERR "Failed to request GPMC mem for smsc911x\n"); | ||
259 | return; | ||
260 | } | ||
261 | |||
262 | ldp_smsc911x_resources[0].start = cs_mem_base + 0x0; | ||
263 | ldp_smsc911x_resources[0].end = cs_mem_base + 0xff; | ||
264 | udelay(100); | ||
265 | |||
266 | eth_gpio = LDP_SMSC911X_GPIO; | ||
267 | |||
268 | ldp_smsc911x_resources[1].start = OMAP_GPIO_IRQ(eth_gpio); | ||
269 | |||
270 | if (gpio_request(eth_gpio, "smsc911x irq") < 0) { | ||
271 | printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n", | ||
272 | eth_gpio); | ||
273 | return; | ||
274 | } | ||
275 | gpio_direction_input(eth_gpio); | ||
276 | } | 181 | } |
277 | 182 | ||
278 | static struct platform_device ldp_lcd_device = { | 183 | static struct platform_device ldp_lcd_device = { |
@@ -360,19 +265,9 @@ static struct twl4030_platform_data ldp_twldata = { | |||
360 | .keypad = &ldp_kp_twl4030_data, | 265 | .keypad = &ldp_kp_twl4030_data, |
361 | }; | 266 | }; |
362 | 267 | ||
363 | static struct i2c_board_info __initdata ldp_i2c_boardinfo[] = { | ||
364 | { | ||
365 | I2C_BOARD_INFO("twl4030", 0x48), | ||
366 | .flags = I2C_CLIENT_WAKE, | ||
367 | .irq = INT_34XX_SYS_NIRQ, | ||
368 | .platform_data = &ldp_twldata, | ||
369 | }, | ||
370 | }; | ||
371 | |||
372 | static int __init omap_i2c_init(void) | 268 | static int __init omap_i2c_init(void) |
373 | { | 269 | { |
374 | omap_register_i2c_bus(1, 2600, ldp_i2c_boardinfo, | 270 | omap3_pmic_init("twl4030", &ldp_twldata); |
375 | ARRAY_SIZE(ldp_i2c_boardinfo)); | ||
376 | omap_register_i2c_bus(2, 400, NULL, 0); | 271 | omap_register_i2c_bus(2, 400, NULL, 0); |
377 | omap_register_i2c_bus(3, 400, NULL, 0); | 272 | omap_register_i2c_bus(3, 400, NULL, 0); |
378 | return 0; | 273 | return 0; |
@@ -389,7 +284,6 @@ static struct omap2_hsmmc_info mmc[] __initdata = { | |||
389 | }; | 284 | }; |
390 | 285 | ||
391 | static struct platform_device *ldp_devices[] __initdata = { | 286 | static struct platform_device *ldp_devices[] __initdata = { |
392 | &ldp_smsc911x_device, | ||
393 | &ldp_lcd_device, | 287 | &ldp_lcd_device, |
394 | &ldp_gpio_keys_device, | 288 | &ldp_gpio_keys_device, |
395 | }; | 289 | }; |
@@ -400,12 +294,6 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
400 | }; | 294 | }; |
401 | #endif | 295 | #endif |
402 | 296 | ||
403 | static struct omap_musb_board_data musb_board_data = { | ||
404 | .interface_type = MUSB_INTERFACE_ULPI, | ||
405 | .mode = MUSB_OTG, | ||
406 | .power = 100, | ||
407 | }; | ||
408 | |||
409 | static struct mtd_partition ldp_nand_partitions[] = { | 297 | static struct mtd_partition ldp_nand_partitions[] = { |
410 | /* All the partition sizes are listed in terms of NAND block size */ | 298 | /* All the partition sizes are listed in terms of NAND block size */ |
411 | { | 299 | { |
@@ -446,13 +334,9 @@ static void __init omap_ldp_init(void) | |||
446 | ldp_init_smsc911x(); | 334 | ldp_init_smsc911x(); |
447 | omap_i2c_init(); | 335 | omap_i2c_init(); |
448 | platform_add_devices(ldp_devices, ARRAY_SIZE(ldp_devices)); | 336 | platform_add_devices(ldp_devices, ARRAY_SIZE(ldp_devices)); |
449 | ts_gpio = 54; | 337 | omap_ads7846_init(1, 54, 310, NULL); |
450 | ldp_spi_board_info[0].irq = gpio_to_irq(ts_gpio); | ||
451 | spi_register_board_info(ldp_spi_board_info, | ||
452 | ARRAY_SIZE(ldp_spi_board_info)); | ||
453 | ads7846_dev_init(); | ||
454 | omap_serial_init(); | 338 | omap_serial_init(); |
455 | usb_musb_init(&musb_board_data); | 339 | usb_musb_init(NULL); |
456 | board_nand_init(ldp_nand_partitions, | 340 | board_nand_init(ldp_nand_partitions, |
457 | ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS, 0); | 341 | ARRAY_SIZE(ldp_nand_partitions), ZOOM_NAND_CS, 0); |
458 | 342 | ||
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c index e710cd9e079b..8d74318ed495 100644 --- a/arch/arm/mach-omap2/board-n8x0.c +++ b/arch/arm/mach-omap2/board-n8x0.c | |||
@@ -106,14 +106,13 @@ static void __init n8x0_usb_init(void) | |||
106 | static char announce[] __initdata = KERN_INFO "TUSB 6010\n"; | 106 | static char announce[] __initdata = KERN_INFO "TUSB 6010\n"; |
107 | 107 | ||
108 | /* PM companion chip power control pin */ | 108 | /* PM companion chip power control pin */ |
109 | ret = gpio_request(TUSB6010_GPIO_ENABLE, "TUSB6010 enable"); | 109 | ret = gpio_request_one(TUSB6010_GPIO_ENABLE, GPIOF_OUT_INIT_LOW, |
110 | "TUSB6010 enable"); | ||
110 | if (ret != 0) { | 111 | if (ret != 0) { |
111 | printk(KERN_ERR "Could not get TUSB power GPIO%i\n", | 112 | printk(KERN_ERR "Could not get TUSB power GPIO%i\n", |
112 | TUSB6010_GPIO_ENABLE); | 113 | TUSB6010_GPIO_ENABLE); |
113 | return; | 114 | return; |
114 | } | 115 | } |
115 | gpio_direction_output(TUSB6010_GPIO_ENABLE, 0); | ||
116 | |||
117 | tusb_set_power(0); | 116 | tusb_set_power(0); |
118 | 117 | ||
119 | ret = tusb6010_setup_interface(&tusb_data, TUSB6010_REFCLK_19, 2, | 118 | ret = tusb6010_setup_interface(&tusb_data, TUSB6010_REFCLK_19, 2, |
@@ -494,8 +493,12 @@ static struct omap_mmc_platform_data mmc1_data = { | |||
494 | 493 | ||
495 | static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC]; | 494 | static struct omap_mmc_platform_data *mmc_data[OMAP24XX_NR_MMC]; |
496 | 495 | ||
497 | static void __init n8x0_mmc_init(void) | 496 | static struct gpio n810_emmc_gpios[] __initdata = { |
497 | { N810_EMMC_VSD_GPIO, GPIOF_OUT_INIT_LOW, "MMC slot 2 Vddf" }, | ||
498 | { N810_EMMC_VIO_GPIO, GPIOF_OUT_INIT_LOW, "MMC slot 2 Vdd" }, | ||
499 | }; | ||
498 | 500 | ||
501 | static void __init n8x0_mmc_init(void) | ||
499 | { | 502 | { |
500 | int err; | 503 | int err; |
501 | 504 | ||
@@ -512,27 +515,18 @@ static void __init n8x0_mmc_init(void) | |||
512 | mmc1_data.slots[1].ban_openended = 1; | 515 | mmc1_data.slots[1].ban_openended = 1; |
513 | } | 516 | } |
514 | 517 | ||
515 | err = gpio_request(N8X0_SLOT_SWITCH_GPIO, "MMC slot switch"); | 518 | err = gpio_request_one(N8X0_SLOT_SWITCH_GPIO, GPIOF_OUT_INIT_LOW, |
519 | "MMC slot switch"); | ||
516 | if (err) | 520 | if (err) |
517 | return; | 521 | return; |
518 | 522 | ||
519 | gpio_direction_output(N8X0_SLOT_SWITCH_GPIO, 0); | ||
520 | |||
521 | if (machine_is_nokia_n810()) { | 523 | if (machine_is_nokia_n810()) { |
522 | err = gpio_request(N810_EMMC_VSD_GPIO, "MMC slot 2 Vddf"); | 524 | err = gpio_request_array(n810_emmc_gpios, |
523 | if (err) { | 525 | ARRAY_SIZE(n810_emmc_gpios)); |
524 | gpio_free(N8X0_SLOT_SWITCH_GPIO); | ||
525 | return; | ||
526 | } | ||
527 | gpio_direction_output(N810_EMMC_VSD_GPIO, 0); | ||
528 | |||
529 | err = gpio_request(N810_EMMC_VIO_GPIO, "MMC slot 2 Vdd"); | ||
530 | if (err) { | 526 | if (err) { |
531 | gpio_free(N8X0_SLOT_SWITCH_GPIO); | 527 | gpio_free(N8X0_SLOT_SWITCH_GPIO); |
532 | gpio_free(N810_EMMC_VSD_GPIO); | ||
533 | return; | 528 | return; |
534 | } | 529 | } |
535 | gpio_direction_output(N810_EMMC_VIO_GPIO, 0); | ||
536 | } | 530 | } |
537 | 531 | ||
538 | mmc_data[0] = &mmc1_data; | 532 | mmc_data[0] = &mmc1_data; |
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c index 97750d483a70..be71426359f2 100644 --- a/arch/arm/mach-omap2/board-omap3beagle.c +++ b/arch/arm/mach-omap2/board-omap3beagle.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include "hsmmc.h" | 52 | #include "hsmmc.h" |
53 | #include "timer-gp.h" | 53 | #include "timer-gp.h" |
54 | #include "pm.h" | 54 | #include "pm.h" |
55 | #include "common-board-devices.h" | ||
55 | 56 | ||
56 | #define NAND_BLOCK_SIZE SZ_128K | 57 | #define NAND_BLOCK_SIZE SZ_128K |
57 | 58 | ||
@@ -79,6 +80,12 @@ static u8 omap3_beagle_get_rev(void) | |||
79 | return omap3_beagle_version; | 80 | return omap3_beagle_version; |
80 | } | 81 | } |
81 | 82 | ||
83 | static struct gpio omap3_beagle_rev_gpios[] __initdata = { | ||
84 | { 171, GPIOF_IN, "rev_id_0" }, | ||
85 | { 172, GPIOF_IN, "rev_id_1" }, | ||
86 | { 173, GPIOF_IN, "rev_id_2" }, | ||
87 | }; | ||
88 | |||
82 | static void __init omap3_beagle_init_rev(void) | 89 | static void __init omap3_beagle_init_rev(void) |
83 | { | 90 | { |
84 | int ret; | 91 | int ret; |
@@ -88,21 +95,13 @@ static void __init omap3_beagle_init_rev(void) | |||
88 | omap_mux_init_gpio(172, OMAP_PIN_INPUT_PULLUP); | 95 | omap_mux_init_gpio(172, OMAP_PIN_INPUT_PULLUP); |
89 | omap_mux_init_gpio(173, OMAP_PIN_INPUT_PULLUP); | 96 | omap_mux_init_gpio(173, OMAP_PIN_INPUT_PULLUP); |
90 | 97 | ||
91 | ret = gpio_request(171, "rev_id_0"); | 98 | ret = gpio_request_array(omap3_beagle_rev_gpios, |
92 | if (ret < 0) | 99 | ARRAY_SIZE(omap3_beagle_rev_gpios)); |
93 | goto fail0; | 100 | if (ret < 0) { |
94 | 101 | printk(KERN_ERR "Unable to get revision detection GPIO pins\n"); | |
95 | ret = gpio_request(172, "rev_id_1"); | 102 | omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN; |
96 | if (ret < 0) | 103 | return; |
97 | goto fail1; | 104 | } |
98 | |||
99 | ret = gpio_request(173, "rev_id_2"); | ||
100 | if (ret < 0) | ||
101 | goto fail2; | ||
102 | |||
103 | gpio_direction_input(171); | ||
104 | gpio_direction_input(172); | ||
105 | gpio_direction_input(173); | ||
106 | 105 | ||
107 | beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1) | 106 | beagle_rev = gpio_get_value(171) | (gpio_get_value(172) << 1) |
108 | | (gpio_get_value(173) << 2); | 107 | | (gpio_get_value(173) << 2); |
@@ -128,18 +127,6 @@ static void __init omap3_beagle_init_rev(void) | |||
128 | printk(KERN_INFO "OMAP3 Beagle Rev: unknown %hd\n", beagle_rev); | 127 | printk(KERN_INFO "OMAP3 Beagle Rev: unknown %hd\n", beagle_rev); |
129 | omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN; | 128 | omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN; |
130 | } | 129 | } |
131 | |||
132 | return; | ||
133 | |||
134 | fail2: | ||
135 | gpio_free(172); | ||
136 | fail1: | ||
137 | gpio_free(171); | ||
138 | fail0: | ||
139 | printk(KERN_ERR "Unable to get revision detection GPIO pins\n"); | ||
140 | omap3_beagle_version = OMAP3BEAGLE_BOARD_UNKN; | ||
141 | |||
142 | return; | ||
143 | } | 130 | } |
144 | 131 | ||
145 | static struct mtd_partition omap3beagle_nand_partitions[] = { | 132 | static struct mtd_partition omap3beagle_nand_partitions[] = { |
@@ -173,15 +160,6 @@ static struct mtd_partition omap3beagle_nand_partitions[] = { | |||
173 | }, | 160 | }, |
174 | }; | 161 | }; |
175 | 162 | ||
176 | static struct omap_nand_platform_data omap3beagle_nand_data = { | ||
177 | .options = NAND_BUSWIDTH_16, | ||
178 | .parts = omap3beagle_nand_partitions, | ||
179 | .nr_parts = ARRAY_SIZE(omap3beagle_nand_partitions), | ||
180 | .dma_channel = -1, /* disable DMA in OMAP NAND driver */ | ||
181 | .nand_setup = NULL, | ||
182 | .dev_ready = NULL, | ||
183 | }; | ||
184 | |||
185 | /* DSS */ | 163 | /* DSS */ |
186 | 164 | ||
187 | static int beagle_enable_dvi(struct omap_dss_device *dssdev) | 165 | static int beagle_enable_dvi(struct omap_dss_device *dssdev) |
@@ -243,13 +221,10 @@ static void __init beagle_display_init(void) | |||
243 | { | 221 | { |
244 | int r; | 222 | int r; |
245 | 223 | ||
246 | r = gpio_request(beagle_dvi_device.reset_gpio, "DVI reset"); | 224 | r = gpio_request_one(beagle_dvi_device.reset_gpio, GPIOF_OUT_INIT_LOW, |
247 | if (r < 0) { | 225 | "DVI reset"); |
226 | if (r < 0) | ||
248 | printk(KERN_ERR "Unable to get DVI reset GPIO\n"); | 227 | printk(KERN_ERR "Unable to get DVI reset GPIO\n"); |
249 | return; | ||
250 | } | ||
251 | |||
252 | gpio_direction_output(beagle_dvi_device.reset_gpio, 0); | ||
253 | } | 228 | } |
254 | 229 | ||
255 | #include "sdram-micron-mt46h32m32lf-6.h" | 230 | #include "sdram-micron-mt46h32m32lf-6.h" |
@@ -276,7 +251,7 @@ static struct gpio_led gpio_leds[]; | |||
276 | static int beagle_twl_gpio_setup(struct device *dev, | 251 | static int beagle_twl_gpio_setup(struct device *dev, |
277 | unsigned gpio, unsigned ngpio) | 252 | unsigned gpio, unsigned ngpio) |
278 | { | 253 | { |
279 | int r; | 254 | int r, usb_pwr_level; |
280 | 255 | ||
281 | if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) { | 256 | if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) { |
282 | mmc[0].gpio_wp = -EINVAL; | 257 | mmc[0].gpio_wp = -EINVAL; |
@@ -295,66 +270,46 @@ static int beagle_twl_gpio_setup(struct device *dev, | |||
295 | beagle_vmmc1_supply.dev = mmc[0].dev; | 270 | beagle_vmmc1_supply.dev = mmc[0].dev; |
296 | beagle_vsim_supply.dev = mmc[0].dev; | 271 | beagle_vsim_supply.dev = mmc[0].dev; |
297 | 272 | ||
298 | /* REVISIT: need ehci-omap hooks for external VBUS | ||
299 | * power switch and overcurrent detect | ||
300 | */ | ||
301 | if (omap3_beagle_get_rev() != OMAP3BEAGLE_BOARD_XM) { | ||
302 | r = gpio_request(gpio + 1, "EHCI_nOC"); | ||
303 | if (!r) { | ||
304 | r = gpio_direction_input(gpio + 1); | ||
305 | if (r) | ||
306 | gpio_free(gpio + 1); | ||
307 | } | ||
308 | if (r) | ||
309 | pr_err("%s: unable to configure EHCI_nOC\n", __func__); | ||
310 | } | ||
311 | |||
312 | /* | 273 | /* |
313 | * TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, XM active | 274 | * TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, XM active |
314 | * high / others active low) | 275 | * high / others active low) |
315 | */ | 276 | * DVI reset GPIO is different between beagle revisions |
316 | gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR"); | ||
317 | if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) | ||
318 | gpio_direction_output(gpio + TWL4030_GPIO_MAX, 1); | ||
319 | else | ||
320 | gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0); | ||
321 | |||
322 | /* DVI reset GPIO is different between beagle revisions */ | ||
323 | if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) | ||
324 | beagle_dvi_device.reset_gpio = 129; | ||
325 | else | ||
326 | beagle_dvi_device.reset_gpio = 170; | ||
327 | |||
328 | /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ | ||
329 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; | ||
330 | |||
331 | /* | ||
332 | * gpio + 1 on Xm controls the TFP410's enable line (active low) | ||
333 | * gpio + 2 control varies depending on the board rev as follows: | ||
334 | * P7/P8 revisions(prototype): Camera EN | ||
335 | * A2+ revisions (production): LDO (supplies DVI, serial, led blocks) | ||
336 | */ | 277 | */ |
337 | if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) { | 278 | if (omap3_beagle_get_rev() == OMAP3BEAGLE_BOARD_XM) { |
338 | r = gpio_request(gpio + 1, "nDVI_PWR_EN"); | 279 | usb_pwr_level = GPIOF_OUT_INIT_HIGH; |
339 | if (!r) { | 280 | beagle_dvi_device.reset_gpio = 129; |
340 | r = gpio_direction_output(gpio + 1, 0); | 281 | /* |
341 | if (r) | 282 | * gpio + 1 on Xm controls the TFP410's enable line (active low) |
342 | gpio_free(gpio + 1); | 283 | * gpio + 2 control varies depending on the board rev as below: |
343 | } | 284 | * P7/P8 revisions(prototype): Camera EN |
285 | * A2+ revisions (production): LDO (DVI, serial, led blocks) | ||
286 | */ | ||
287 | r = gpio_request_one(gpio + 1, GPIOF_OUT_INIT_LOW, | ||
288 | "nDVI_PWR_EN"); | ||
344 | if (r) | 289 | if (r) |
345 | pr_err("%s: unable to configure nDVI_PWR_EN\n", | 290 | pr_err("%s: unable to configure nDVI_PWR_EN\n", |
346 | __func__); | 291 | __func__); |
347 | r = gpio_request(gpio + 2, "DVI_LDO_EN"); | 292 | r = gpio_request_one(gpio + 2, GPIOF_OUT_INIT_HIGH, |
348 | if (!r) { | 293 | "DVI_LDO_EN"); |
349 | r = gpio_direction_output(gpio + 2, 1); | ||
350 | if (r) | ||
351 | gpio_free(gpio + 2); | ||
352 | } | ||
353 | if (r) | 294 | if (r) |
354 | pr_err("%s: unable to configure DVI_LDO_EN\n", | 295 | pr_err("%s: unable to configure DVI_LDO_EN\n", |
355 | __func__); | 296 | __func__); |
297 | } else { | ||
298 | usb_pwr_level = GPIOF_OUT_INIT_LOW; | ||
299 | beagle_dvi_device.reset_gpio = 170; | ||
300 | /* | ||
301 | * REVISIT: need ehci-omap hooks for external VBUS | ||
302 | * power switch and overcurrent detect | ||
303 | */ | ||
304 | if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC")) | ||
305 | pr_err("%s: unable to configure EHCI_nOC\n", __func__); | ||
356 | } | 306 | } |
357 | 307 | ||
308 | gpio_request_one(gpio + TWL4030_GPIO_MAX, usb_pwr_level, "nEN_USB_PWR"); | ||
309 | |||
310 | /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ | ||
311 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; | ||
312 | |||
358 | return 0; | 313 | return 0; |
359 | } | 314 | } |
360 | 315 | ||
@@ -453,15 +408,6 @@ static struct twl4030_platform_data beagle_twldata = { | |||
453 | .vpll2 = &beagle_vpll2, | 408 | .vpll2 = &beagle_vpll2, |
454 | }; | 409 | }; |
455 | 410 | ||
456 | static struct i2c_board_info __initdata beagle_i2c_boardinfo[] = { | ||
457 | { | ||
458 | I2C_BOARD_INFO("twl4030", 0x48), | ||
459 | .flags = I2C_CLIENT_WAKE, | ||
460 | .irq = INT_34XX_SYS_NIRQ, | ||
461 | .platform_data = &beagle_twldata, | ||
462 | }, | ||
463 | }; | ||
464 | |||
465 | static struct i2c_board_info __initdata beagle_i2c_eeprom[] = { | 411 | static struct i2c_board_info __initdata beagle_i2c_eeprom[] = { |
466 | { | 412 | { |
467 | I2C_BOARD_INFO("eeprom", 0x50), | 413 | I2C_BOARD_INFO("eeprom", 0x50), |
@@ -470,8 +416,7 @@ static struct i2c_board_info __initdata beagle_i2c_eeprom[] = { | |||
470 | 416 | ||
471 | static int __init omap3_beagle_i2c_init(void) | 417 | static int __init omap3_beagle_i2c_init(void) |
472 | { | 418 | { |
473 | omap_register_i2c_bus(1, 2600, beagle_i2c_boardinfo, | 419 | omap3_pmic_init("twl4030", &beagle_twldata); |
474 | ARRAY_SIZE(beagle_i2c_boardinfo)); | ||
475 | /* Bus 3 is attached to the DVI port where devices like the pico DLP | 420 | /* Bus 3 is attached to the DVI port where devices like the pico DLP |
476 | * projector don't work reliably with 400kHz */ | 421 | * projector don't work reliably with 400kHz */ |
477 | omap_register_i2c_bus(3, 100, beagle_i2c_eeprom, ARRAY_SIZE(beagle_i2c_eeprom)); | 422 | omap_register_i2c_bus(3, 100, beagle_i2c_eeprom, ARRAY_SIZE(beagle_i2c_eeprom)); |
@@ -551,39 +496,6 @@ static struct platform_device *omap3_beagle_devices[] __initdata = { | |||
551 | &keys_gpio, | 496 | &keys_gpio, |
552 | }; | 497 | }; |
553 | 498 | ||
554 | static void __init omap3beagle_flash_init(void) | ||
555 | { | ||
556 | u8 cs = 0; | ||
557 | u8 nandcs = GPMC_CS_NUM + 1; | ||
558 | |||
559 | /* find out the chip-select on which NAND exists */ | ||
560 | while (cs < GPMC_CS_NUM) { | ||
561 | u32 ret = 0; | ||
562 | ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); | ||
563 | |||
564 | if ((ret & 0xC00) == 0x800) { | ||
565 | printk(KERN_INFO "Found NAND on CS%d\n", cs); | ||
566 | if (nandcs > GPMC_CS_NUM) | ||
567 | nandcs = cs; | ||
568 | } | ||
569 | cs++; | ||
570 | } | ||
571 | |||
572 | if (nandcs > GPMC_CS_NUM) { | ||
573 | printk(KERN_INFO "NAND: Unable to find configuration " | ||
574 | "in GPMC\n "); | ||
575 | return; | ||
576 | } | ||
577 | |||
578 | if (nandcs < GPMC_CS_NUM) { | ||
579 | omap3beagle_nand_data.cs = nandcs; | ||
580 | |||
581 | printk(KERN_INFO "Registering NAND on CS%d\n", nandcs); | ||
582 | if (gpmc_nand_init(&omap3beagle_nand_data) < 0) | ||
583 | printk(KERN_ERR "Unable to register NAND device\n"); | ||
584 | } | ||
585 | } | ||
586 | |||
587 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { | 499 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
588 | 500 | ||
589 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, | 501 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
@@ -602,12 +514,6 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
602 | }; | 514 | }; |
603 | #endif | 515 | #endif |
604 | 516 | ||
605 | static struct omap_musb_board_data musb_board_data = { | ||
606 | .interface_type = MUSB_INTERFACE_ULPI, | ||
607 | .mode = MUSB_OTG, | ||
608 | .power = 100, | ||
609 | }; | ||
610 | |||
611 | static void __init beagle_opp_init(void) | 517 | static void __init beagle_opp_init(void) |
612 | { | 518 | { |
613 | int r = 0; | 519 | int r = 0; |
@@ -665,13 +571,13 @@ static void __init omap3_beagle_init(void) | |||
665 | omap_serial_init(); | 571 | omap_serial_init(); |
666 | 572 | ||
667 | omap_mux_init_gpio(170, OMAP_PIN_INPUT); | 573 | omap_mux_init_gpio(170, OMAP_PIN_INPUT); |
668 | gpio_request(170, "DVI_nPD"); | ||
669 | /* REVISIT leave DVI powered down until it's needed ... */ | 574 | /* REVISIT leave DVI powered down until it's needed ... */ |
670 | gpio_direction_output(170, true); | 575 | gpio_request_one(170, GPIOF_OUT_INIT_HIGH, "DVI_nPD"); |
671 | 576 | ||
672 | usb_musb_init(&musb_board_data); | 577 | usb_musb_init(NULL); |
673 | usbhs_init(&usbhs_bdata); | 578 | usbhs_init(&usbhs_bdata); |
674 | omap3beagle_flash_init(); | 579 | omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions, |
580 | ARRAY_SIZE(omap3beagle_nand_partitions)); | ||
675 | 581 | ||
676 | /* Ensure SDRC pins are mux'd for self-refresh */ | 582 | /* Ensure SDRC pins are mux'd for self-refresh */ |
677 | omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); | 583 | omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); |
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c index 7f94cccdb076..b4d43464a303 100644 --- a/arch/arm/mach-omap2/board-omap3evm.c +++ b/arch/arm/mach-omap2/board-omap3evm.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include "mux.h" | 50 | #include "mux.h" |
51 | #include "sdram-micron-mt46h32m32lf-6.h" | 51 | #include "sdram-micron-mt46h32m32lf-6.h" |
52 | #include "hsmmc.h" | 52 | #include "hsmmc.h" |
53 | #include "common-board-devices.h" | ||
53 | 54 | ||
54 | #define OMAP3_EVM_TS_GPIO 175 | 55 | #define OMAP3_EVM_TS_GPIO 175 |
55 | #define OMAP3_EVM_EHCI_VBUS 22 | 56 | #define OMAP3_EVM_EHCI_VBUS 22 |
@@ -101,49 +102,20 @@ static void __init omap3_evm_get_revision(void) | |||
101 | } | 102 | } |
102 | 103 | ||
103 | #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) | 104 | #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) |
104 | static struct resource omap3evm_smsc911x_resources[] = { | 105 | #include <plat/gpmc-smsc911x.h> |
105 | [0] = { | ||
106 | .start = OMAP3EVM_ETHR_START, | ||
107 | .end = (OMAP3EVM_ETHR_START + OMAP3EVM_ETHR_SIZE - 1), | ||
108 | .flags = IORESOURCE_MEM, | ||
109 | }, | ||
110 | [1] = { | ||
111 | .start = OMAP_GPIO_IRQ(OMAP3EVM_ETHR_GPIO_IRQ), | ||
112 | .end = OMAP_GPIO_IRQ(OMAP3EVM_ETHR_GPIO_IRQ), | ||
113 | .flags = (IORESOURCE_IRQ | IRQF_TRIGGER_LOW), | ||
114 | }, | ||
115 | }; | ||
116 | 106 | ||
117 | static struct smsc911x_platform_config smsc911x_config = { | 107 | static struct omap_smsc911x_platform_data smsc911x_cfg = { |
118 | .phy_interface = PHY_INTERFACE_MODE_MII, | 108 | .cs = OMAP3EVM_SMSC911X_CS, |
119 | .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, | 109 | .gpio_irq = OMAP3EVM_ETHR_GPIO_IRQ, |
120 | .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, | 110 | .gpio_reset = -EINVAL, |
121 | .flags = (SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS), | 111 | .flags = SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS, |
122 | }; | ||
123 | |||
124 | static struct platform_device omap3evm_smsc911x_device = { | ||
125 | .name = "smsc911x", | ||
126 | .id = -1, | ||
127 | .num_resources = ARRAY_SIZE(omap3evm_smsc911x_resources), | ||
128 | .resource = &omap3evm_smsc911x_resources[0], | ||
129 | .dev = { | ||
130 | .platform_data = &smsc911x_config, | ||
131 | }, | ||
132 | }; | 112 | }; |
133 | 113 | ||
134 | static inline void __init omap3evm_init_smsc911x(void) | 114 | static inline void __init omap3evm_init_smsc911x(void) |
135 | { | 115 | { |
136 | int eth_cs, eth_rst; | ||
137 | struct clk *l3ck; | 116 | struct clk *l3ck; |
138 | unsigned int rate; | 117 | unsigned int rate; |
139 | 118 | ||
140 | if (get_omap3_evm_rev() == OMAP3EVM_BOARD_GEN_1) | ||
141 | eth_rst = OMAP3EVM_GEN1_ETHR_GPIO_RST; | ||
142 | else | ||
143 | eth_rst = OMAP3EVM_GEN2_ETHR_GPIO_RST; | ||
144 | |||
145 | eth_cs = OMAP3EVM_SMSC911X_CS; | ||
146 | |||
147 | l3ck = clk_get(NULL, "l3_ck"); | 119 | l3ck = clk_get(NULL, "l3_ck"); |
148 | if (IS_ERR(l3ck)) | 120 | if (IS_ERR(l3ck)) |
149 | rate = 100000000; | 121 | rate = 100000000; |
@@ -152,33 +124,13 @@ static inline void __init omap3evm_init_smsc911x(void) | |||
152 | 124 | ||
153 | /* Configure ethernet controller reset gpio */ | 125 | /* Configure ethernet controller reset gpio */ |
154 | if (cpu_is_omap3430()) { | 126 | if (cpu_is_omap3430()) { |
155 | if (gpio_request(eth_rst, "SMSC911x gpio") < 0) { | 127 | if (get_omap3_evm_rev() == OMAP3EVM_BOARD_GEN_1) |
156 | pr_err(KERN_ERR "Failed to request %d for smsc911x\n", | 128 | smsc911x_cfg.gpio_reset = OMAP3EVM_GEN1_ETHR_GPIO_RST; |
157 | eth_rst); | 129 | else |
158 | return; | 130 | smsc911x_cfg.gpio_reset = OMAP3EVM_GEN2_ETHR_GPIO_RST; |
159 | } | ||
160 | |||
161 | if (gpio_direction_output(eth_rst, 1) < 0) { | ||
162 | pr_err(KERN_ERR "Failed to set direction of %d for" \ | ||
163 | " smsc911x\n", eth_rst); | ||
164 | return; | ||
165 | } | ||
166 | /* reset pulse to ethernet controller*/ | ||
167 | usleep_range(150, 220); | ||
168 | gpio_set_value(eth_rst, 0); | ||
169 | usleep_range(150, 220); | ||
170 | gpio_set_value(eth_rst, 1); | ||
171 | usleep_range(1, 2); | ||
172 | } | ||
173 | |||
174 | if (gpio_request(OMAP3EVM_ETHR_GPIO_IRQ, "SMSC911x irq") < 0) { | ||
175 | printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n", | ||
176 | OMAP3EVM_ETHR_GPIO_IRQ); | ||
177 | return; | ||
178 | } | 131 | } |
179 | 132 | ||
180 | gpio_direction_input(OMAP3EVM_ETHR_GPIO_IRQ); | 133 | gpmc_smsc911x_init(&smsc911x_cfg); |
181 | platform_device_register(&omap3evm_smsc911x_device); | ||
182 | } | 134 | } |
183 | 135 | ||
184 | #else | 136 | #else |
@@ -197,6 +149,15 @@ static inline void __init omap3evm_init_smsc911x(void) { return; } | |||
197 | #define OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO 210 | 149 | #define OMAP3EVM_LCD_PANEL_BKLIGHT_GPIO 210 |
198 | #define OMAP3EVM_DVI_PANEL_EN_GPIO 199 | 150 | #define OMAP3EVM_DVI_PANEL_EN_GPIO 199 |
199 | 151 | ||
152 | static struct gpio omap3_evm_dss_gpios[] __initdata = { | ||
153 | { OMAP3EVM_LCD_PANEL_RESB, GPIOF_OUT_INIT_HIGH, "lcd_panel_resb" }, | ||
154 | { OMAP3EVM_LCD_PANEL_INI, GPIOF_OUT_INIT_HIGH, "lcd_panel_ini" }, | ||
155 | { OMAP3EVM_LCD_PANEL_QVGA, GPIOF_OUT_INIT_LOW, "lcd_panel_qvga" }, | ||
156 | { OMAP3EVM_LCD_PANEL_LR, GPIOF_OUT_INIT_HIGH, "lcd_panel_lr" }, | ||
157 | { OMAP3EVM_LCD_PANEL_UD, GPIOF_OUT_INIT_HIGH, "lcd_panel_ud" }, | ||
158 | { OMAP3EVM_LCD_PANEL_ENVDD, GPIOF_OUT_INIT_LOW, "lcd_panel_envdd" }, | ||
159 | }; | ||
160 | |||
200 | static int lcd_enabled; | 161 | static int lcd_enabled; |
201 | static int dvi_enabled; | 162 | static int dvi_enabled; |
202 | 163 | ||
@@ -204,61 +165,10 @@ static void __init omap3_evm_display_init(void) | |||
204 | { | 165 | { |
205 | int r; | 166 | int r; |
206 | 167 | ||
207 | r = gpio_request(OMAP3EVM_LCD_PANEL_RESB, "lcd_panel_resb"); | 168 | r = gpio_request_array(omap3_evm_dss_gpios, |
208 | if (r) { | 169 | ARRAY_SIZE(omap3_evm_dss_gpios)); |
209 | printk(KERN_ERR "failed to get lcd_panel_resb\n"); | 170 | if (r) |
210 | return; | 171 | printk(KERN_ERR "failed to get lcd_panel_* gpios\n"); |
211 | } | ||
212 | gpio_direction_output(OMAP3EVM_LCD_PANEL_RESB, 1); | ||
213 | |||
214 | r = gpio_request(OMAP3EVM_LCD_PANEL_INI, "lcd_panel_ini"); | ||
215 | if (r) { | ||
216 | printk(KERN_ERR "failed to get lcd_panel_ini\n"); | ||
217 | goto err_1; | ||
218 | } | ||
219 | gpio_direction_output(OMAP3EVM_LCD_PANEL_INI, 1); | ||
220 | |||
221 | r = gpio_request(OMAP3EVM_LCD_PANEL_QVGA, "lcd_panel_qvga"); | ||
222 | if (r) { | ||
223 | printk(KERN_ERR "failed to get lcd_panel_qvga\n"); | ||
224 | goto err_2; | ||
225 | } | ||
226 | gpio_direction_output(OMAP3EVM_LCD_PANEL_QVGA, 0); | ||
227 | |||
228 | r = gpio_request(OMAP3EVM_LCD_PANEL_LR, "lcd_panel_lr"); | ||
229 | if (r) { | ||
230 | printk(KERN_ERR "failed to get lcd_panel_lr\n"); | ||
231 | goto err_3; | ||
232 | } | ||
233 | gpio_direction_output(OMAP3EVM_LCD_PANEL_LR, 1); | ||
234 | |||
235 | r = gpio_request(OMAP3EVM_LCD_PANEL_UD, "lcd_panel_ud"); | ||
236 | if (r) { | ||
237 | printk(KERN_ERR "failed to get lcd_panel_ud\n"); | ||
238 | goto err_4; | ||
239 | } | ||
240 | gpio_direction_output(OMAP3EVM_LCD_PANEL_UD, 1); | ||
241 | |||
242 | r = gpio_request(OMAP3EVM_LCD_PANEL_ENVDD, "lcd_panel_envdd"); | ||
243 | if (r) { | ||
244 | printk(KERN_ERR "failed to get lcd_panel_envdd\n"); | ||
245 | goto err_5; | ||
246 | } | ||
247 | gpio_direction_output(OMAP3EVM_LCD_PANEL_ENVDD, 0); | ||
248 | |||
249 | return; | ||
250 | |||
251 | err_5: | ||
252 | gpio_free(OMAP3EVM_LCD_PANEL_UD); | ||
253 | err_4: | ||
254 | gpio_free(OMAP3EVM_LCD_PANEL_LR); | ||
255 | err_3: | ||
256 | gpio_free(OMAP3EVM_LCD_PANEL_QVGA); | ||
257 | err_2: | ||
258 | gpio_free(OMAP3EVM_LCD_PANEL_INI); | ||
259 | err_1: | ||
260 | gpio_free(OMAP3EVM_LCD_PANEL_RESB); | ||
261 | |||
262 | } | 172 | } |
263 | 173 | ||
264 | static int omap3_evm_enable_lcd(struct omap_dss_device *dssdev) | 174 | static int omap3_evm_enable_lcd(struct omap_dss_device *dssdev) |
@@ -448,7 +358,7 @@ static struct platform_device leds_gpio = { | |||
448 | static int omap3evm_twl_gpio_setup(struct device *dev, | 358 | static int omap3evm_twl_gpio_setup(struct device *dev, |
449 | unsigned gpio, unsigned ngpio) | 359 | unsigned gpio, unsigned ngpio) |
450 | { | 360 | { |
451 | int r; | 361 | int r, lcd_bl_en; |
452 | 362 | ||
453 | /* gpio + 0 is "mmc0_cd" (input/IRQ) */ | 363 | /* gpio + 0 is "mmc0_cd" (input/IRQ) */ |
454 | omap_mux_init_gpio(63, OMAP_PIN_INPUT); | 364 | omap_mux_init_gpio(63, OMAP_PIN_INPUT); |
@@ -465,16 +375,14 @@ static int omap3evm_twl_gpio_setup(struct device *dev, | |||
465 | */ | 375 | */ |
466 | 376 | ||
467 | /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */ | 377 | /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */ |
468 | r = gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL"); | 378 | lcd_bl_en = get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2 ? |
469 | if (!r) | 379 | GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW; |
470 | r = gpio_direction_output(gpio + TWL4030_GPIO_MAX, | 380 | r = gpio_request_one(gpio + TWL4030_GPIO_MAX, lcd_bl_en, "EN_LCD_BKL"); |
471 | (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) ? 1 : 0); | ||
472 | if (r) | 381 | if (r) |
473 | printk(KERN_ERR "failed to get/set lcd_bkl gpio\n"); | 382 | printk(KERN_ERR "failed to get/set lcd_bkl gpio\n"); |
474 | 383 | ||
475 | /* gpio + 7 == DVI Enable */ | 384 | /* gpio + 7 == DVI Enable */ |
476 | gpio_request(gpio + 7, "EN_DVI"); | 385 | gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "EN_DVI"); |
477 | gpio_direction_output(gpio + 7, 0); | ||
478 | 386 | ||
479 | /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */ | 387 | /* TWL4030_GPIO_MAX + 1 == ledB (out, active low LED) */ |
480 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; | 388 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; |
@@ -652,78 +560,18 @@ static struct twl4030_platform_data omap3evm_twldata = { | |||
652 | .vdac = &omap3_evm_vdac, | 560 | .vdac = &omap3_evm_vdac, |
653 | .vpll2 = &omap3_evm_vpll2, | 561 | .vpll2 = &omap3_evm_vpll2, |
654 | .vio = &omap3evm_vio, | 562 | .vio = &omap3evm_vio, |
655 | }; | 563 | .vmmc1 = &omap3evm_vmmc1, |
656 | 564 | .vsim = &omap3evm_vsim, | |
657 | static struct i2c_board_info __initdata omap3evm_i2c_boardinfo[] = { | ||
658 | { | ||
659 | I2C_BOARD_INFO("twl4030", 0x48), | ||
660 | .flags = I2C_CLIENT_WAKE, | ||
661 | .irq = INT_34XX_SYS_NIRQ, | ||
662 | .platform_data = &omap3evm_twldata, | ||
663 | }, | ||
664 | }; | 565 | }; |
665 | 566 | ||
666 | static int __init omap3_evm_i2c_init(void) | 567 | static int __init omap3_evm_i2c_init(void) |
667 | { | 568 | { |
668 | /* | 569 | omap3_pmic_init("twl4030", &omap3evm_twldata); |
669 | * REVISIT: These entries can be set in omap3evm_twl_data | ||
670 | * after a merge with MFD tree | ||
671 | */ | ||
672 | omap3evm_twldata.vmmc1 = &omap3evm_vmmc1; | ||
673 | omap3evm_twldata.vsim = &omap3evm_vsim; | ||
674 | |||
675 | omap_register_i2c_bus(1, 2600, omap3evm_i2c_boardinfo, | ||
676 | ARRAY_SIZE(omap3evm_i2c_boardinfo)); | ||
677 | omap_register_i2c_bus(2, 400, NULL, 0); | 570 | omap_register_i2c_bus(2, 400, NULL, 0); |
678 | omap_register_i2c_bus(3, 400, NULL, 0); | 571 | omap_register_i2c_bus(3, 400, NULL, 0); |
679 | return 0; | 572 | return 0; |
680 | } | 573 | } |
681 | 574 | ||
682 | static void ads7846_dev_init(void) | ||
683 | { | ||
684 | if (gpio_request(OMAP3_EVM_TS_GPIO, "ADS7846 pendown") < 0) | ||
685 | printk(KERN_ERR "can't get ads7846 pen down GPIO\n"); | ||
686 | |||
687 | gpio_direction_input(OMAP3_EVM_TS_GPIO); | ||
688 | gpio_set_debounce(OMAP3_EVM_TS_GPIO, 310); | ||
689 | } | ||
690 | |||
691 | static int ads7846_get_pendown_state(void) | ||
692 | { | ||
693 | return !gpio_get_value(OMAP3_EVM_TS_GPIO); | ||
694 | } | ||
695 | |||
696 | static struct ads7846_platform_data ads7846_config = { | ||
697 | .x_max = 0x0fff, | ||
698 | .y_max = 0x0fff, | ||
699 | .x_plate_ohms = 180, | ||
700 | .pressure_max = 255, | ||
701 | .debounce_max = 10, | ||
702 | .debounce_tol = 3, | ||
703 | .debounce_rep = 1, | ||
704 | .get_pendown_state = ads7846_get_pendown_state, | ||
705 | .keep_vref_on = 1, | ||
706 | .settle_delay_usecs = 150, | ||
707 | .wakeup = true, | ||
708 | }; | ||
709 | |||
710 | static struct omap2_mcspi_device_config ads7846_mcspi_config = { | ||
711 | .turbo_mode = 0, | ||
712 | .single_channel = 1, /* 0: slave, 1: master */ | ||
713 | }; | ||
714 | |||
715 | static struct spi_board_info omap3evm_spi_board_info[] = { | ||
716 | [0] = { | ||
717 | .modalias = "ads7846", | ||
718 | .bus_num = 1, | ||
719 | .chip_select = 0, | ||
720 | .max_speed_hz = 1500000, | ||
721 | .controller_data = &ads7846_mcspi_config, | ||
722 | .irq = OMAP_GPIO_IRQ(OMAP3_EVM_TS_GPIO), | ||
723 | .platform_data = &ads7846_config, | ||
724 | }, | ||
725 | }; | ||
726 | |||
727 | static struct omap_board_config_kernel omap3_evm_config[] __initdata = { | 575 | static struct omap_board_config_kernel omap3_evm_config[] __initdata = { |
728 | }; | 576 | }; |
729 | 577 | ||
@@ -825,6 +673,11 @@ static struct omap_musb_board_data musb_board_data = { | |||
825 | .power = 100, | 673 | .power = 100, |
826 | }; | 674 | }; |
827 | 675 | ||
676 | static struct gpio omap3_evm_ehci_gpios[] __initdata = { | ||
677 | { OMAP3_EVM_EHCI_VBUS, GPIOF_OUT_INIT_HIGH, "enable EHCI VBUS" }, | ||
678 | { OMAP3_EVM_EHCI_SELECT, GPIOF_OUT_INIT_LOW, "select EHCI port" }, | ||
679 | }; | ||
680 | |||
828 | static void __init omap3_evm_init(void) | 681 | static void __init omap3_evm_init(void) |
829 | { | 682 | { |
830 | omap3_evm_get_revision(); | 683 | omap3_evm_get_revision(); |
@@ -841,9 +694,6 @@ static void __init omap3_evm_init(void) | |||
841 | 694 | ||
842 | omap_display_init(&omap3_evm_dss_data); | 695 | omap_display_init(&omap3_evm_dss_data); |
843 | 696 | ||
844 | spi_register_board_info(omap3evm_spi_board_info, | ||
845 | ARRAY_SIZE(omap3evm_spi_board_info)); | ||
846 | |||
847 | omap_serial_init(); | 697 | omap_serial_init(); |
848 | 698 | ||
849 | /* OMAP3EVM uses ISP1504 phy and so register nop transceiver */ | 699 | /* OMAP3EVM uses ISP1504 phy and so register nop transceiver */ |
@@ -851,16 +701,12 @@ static void __init omap3_evm_init(void) | |||
851 | 701 | ||
852 | if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) { | 702 | if (get_omap3_evm_rev() >= OMAP3EVM_BOARD_GEN_2) { |
853 | /* enable EHCI VBUS using GPIO22 */ | 703 | /* enable EHCI VBUS using GPIO22 */ |
854 | omap_mux_init_gpio(22, OMAP_PIN_INPUT_PULLUP); | 704 | omap_mux_init_gpio(OMAP3_EVM_EHCI_VBUS, OMAP_PIN_INPUT_PULLUP); |
855 | gpio_request(OMAP3_EVM_EHCI_VBUS, "enable EHCI VBUS"); | ||
856 | gpio_direction_output(OMAP3_EVM_EHCI_VBUS, 0); | ||
857 | gpio_set_value(OMAP3_EVM_EHCI_VBUS, 1); | ||
858 | |||
859 | /* Select EHCI port on main board */ | 705 | /* Select EHCI port on main board */ |
860 | omap_mux_init_gpio(61, OMAP_PIN_INPUT_PULLUP); | 706 | omap_mux_init_gpio(OMAP3_EVM_EHCI_SELECT, |
861 | gpio_request(OMAP3_EVM_EHCI_SELECT, "select EHCI port"); | 707 | OMAP_PIN_INPUT_PULLUP); |
862 | gpio_direction_output(OMAP3_EVM_EHCI_SELECT, 0); | 708 | gpio_request_array(omap3_evm_ehci_gpios, |
863 | gpio_set_value(OMAP3_EVM_EHCI_SELECT, 0); | 709 | ARRAY_SIZE(omap3_evm_ehci_gpios)); |
864 | 710 | ||
865 | /* setup EHCI phy reset config */ | 711 | /* setup EHCI phy reset config */ |
866 | omap_mux_init_gpio(21, OMAP_PIN_INPUT_PULLUP); | 712 | omap_mux_init_gpio(21, OMAP_PIN_INPUT_PULLUP); |
@@ -876,7 +722,7 @@ static void __init omap3_evm_init(void) | |||
876 | } | 722 | } |
877 | usb_musb_init(&musb_board_data); | 723 | usb_musb_init(&musb_board_data); |
878 | usbhs_init(&usbhs_bdata); | 724 | usbhs_init(&usbhs_bdata); |
879 | ads7846_dev_init(); | 725 | omap_ads7846_init(1, OMAP3_EVM_TS_GPIO, 310, NULL); |
880 | omap3evm_init_smsc911x(); | 726 | omap3evm_init_smsc911x(); |
881 | omap3_evm_display_init(); | 727 | omap3_evm_display_init(); |
882 | 728 | ||
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c index b726943d7c93..60d9be49dbab 100644 --- a/arch/arm/mach-omap2/board-omap3logic.c +++ b/arch/arm/mach-omap2/board-omap3logic.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include "hsmmc.h" | 37 | #include "hsmmc.h" |
38 | #include "timer-gp.h" | 38 | #include "timer-gp.h" |
39 | #include "control.h" | 39 | #include "control.h" |
40 | #include "common-board-devices.h" | ||
40 | 41 | ||
41 | #include <plat/mux.h> | 42 | #include <plat/mux.h> |
42 | #include <plat/board.h> | 43 | #include <plat/board.h> |
@@ -93,19 +94,9 @@ static struct twl4030_platform_data omap3logic_twldata = { | |||
93 | .vmmc1 = &omap3logic_vmmc1, | 94 | .vmmc1 = &omap3logic_vmmc1, |
94 | }; | 95 | }; |
95 | 96 | ||
96 | static struct i2c_board_info __initdata omap3logic_i2c_boardinfo[] = { | ||
97 | { | ||
98 | I2C_BOARD_INFO("twl4030", 0x48), | ||
99 | .flags = I2C_CLIENT_WAKE, | ||
100 | .irq = INT_34XX_SYS_NIRQ, | ||
101 | .platform_data = &omap3logic_twldata, | ||
102 | }, | ||
103 | }; | ||
104 | |||
105 | static int __init omap3logic_i2c_init(void) | 97 | static int __init omap3logic_i2c_init(void) |
106 | { | 98 | { |
107 | omap_register_i2c_bus(1, 2600, omap3logic_i2c_boardinfo, | 99 | omap3_pmic_init("twl4030", &omap3logic_twldata); |
108 | ARRAY_SIZE(omap3logic_i2c_boardinfo)); | ||
109 | return 0; | 100 | return 0; |
110 | } | 101 | } |
111 | 102 | ||
@@ -147,7 +138,6 @@ static struct omap_smsc911x_platform_data __initdata board_smsc911x_data = { | |||
147 | .cs = OMAP3LOGIC_SMSC911X_CS, | 138 | .cs = OMAP3LOGIC_SMSC911X_CS, |
148 | .gpio_irq = -EINVAL, | 139 | .gpio_irq = -EINVAL, |
149 | .gpio_reset = -EINVAL, | 140 | .gpio_reset = -EINVAL, |
150 | .flags = IORESOURCE_IRQ_LOWLEVEL, | ||
151 | }; | 141 | }; |
152 | 142 | ||
153 | /* TODO/FIXME (comment by Peter Barada, LogicPD): | 143 | /* TODO/FIXME (comment by Peter Barada, LogicPD): |
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c index 1db15492d82b..1d10736c6d3c 100644 --- a/arch/arm/mach-omap2/board-omap3pandora.c +++ b/arch/arm/mach-omap2/board-omap3pandora.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/platform_device.h> | 22 | #include <linux/platform_device.h> |
23 | 23 | ||
24 | #include <linux/spi/spi.h> | 24 | #include <linux/spi/spi.h> |
25 | #include <linux/spi/ads7846.h> | ||
26 | #include <linux/regulator/machine.h> | 25 | #include <linux/regulator/machine.h> |
27 | #include <linux/i2c/twl.h> | 26 | #include <linux/i2c/twl.h> |
28 | #include <linux/wl12xx.h> | 27 | #include <linux/wl12xx.h> |
@@ -52,6 +51,7 @@ | |||
52 | #include "mux.h" | 51 | #include "mux.h" |
53 | #include "sdram-micron-mt46h32m32lf-6.h" | 52 | #include "sdram-micron-mt46h32m32lf-6.h" |
54 | #include "hsmmc.h" | 53 | #include "hsmmc.h" |
54 | #include "common-board-devices.h" | ||
55 | 55 | ||
56 | #define PANDORA_WIFI_IRQ_GPIO 21 | 56 | #define PANDORA_WIFI_IRQ_GPIO 21 |
57 | #define PANDORA_WIFI_NRESET_GPIO 23 | 57 | #define PANDORA_WIFI_NRESET_GPIO 23 |
@@ -305,24 +305,13 @@ static int omap3pandora_twl_gpio_setup(struct device *dev, | |||
305 | 305 | ||
306 | /* gpio + 13 drives 32kHz buffer for wifi module */ | 306 | /* gpio + 13 drives 32kHz buffer for wifi module */ |
307 | gpio_32khz = gpio + 13; | 307 | gpio_32khz = gpio + 13; |
308 | ret = gpio_request(gpio_32khz, "wifi 32kHz"); | 308 | ret = gpio_request_one(gpio_32khz, GPIOF_OUT_INIT_HIGH, "wifi 32kHz"); |
309 | if (ret < 0) { | 309 | if (ret < 0) { |
310 | pr_err("Cannot get GPIO line %d, ret=%d\n", gpio_32khz, ret); | 310 | pr_err("Cannot get GPIO line %d, ret=%d\n", gpio_32khz, ret); |
311 | goto fail; | 311 | return -ENODEV; |
312 | } | ||
313 | |||
314 | ret = gpio_direction_output(gpio_32khz, 1); | ||
315 | if (ret < 0) { | ||
316 | pr_err("Cannot set GPIO line %d, ret=%d\n", gpio_32khz, ret); | ||
317 | goto fail_direction; | ||
318 | } | 312 | } |
319 | 313 | ||
320 | return 0; | 314 | return 0; |
321 | |||
322 | fail_direction: | ||
323 | gpio_free(gpio_32khz); | ||
324 | fail: | ||
325 | return -ENODEV; | ||
326 | } | 315 | } |
327 | 316 | ||
328 | static struct twl4030_gpio_platform_data omap3pandora_gpio_data = { | 317 | static struct twl4030_gpio_platform_data omap3pandora_gpio_data = { |
@@ -544,15 +533,6 @@ static struct twl4030_platform_data omap3pandora_twldata = { | |||
544 | .bci = &pandora_bci_data, | 533 | .bci = &pandora_bci_data, |
545 | }; | 534 | }; |
546 | 535 | ||
547 | static struct i2c_board_info __initdata omap3pandora_i2c_boardinfo[] = { | ||
548 | { | ||
549 | I2C_BOARD_INFO("tps65950", 0x48), | ||
550 | .flags = I2C_CLIENT_WAKE, | ||
551 | .irq = INT_34XX_SYS_NIRQ, | ||
552 | .platform_data = &omap3pandora_twldata, | ||
553 | }, | ||
554 | }; | ||
555 | |||
556 | static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = { | 536 | static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = { |
557 | { | 537 | { |
558 | I2C_BOARD_INFO("bq27500", 0x55), | 538 | I2C_BOARD_INFO("bq27500", 0x55), |
@@ -562,61 +542,15 @@ static struct i2c_board_info __initdata omap3pandora_i2c3_boardinfo[] = { | |||
562 | 542 | ||
563 | static int __init omap3pandora_i2c_init(void) | 543 | static int __init omap3pandora_i2c_init(void) |
564 | { | 544 | { |
565 | omap_register_i2c_bus(1, 2600, omap3pandora_i2c_boardinfo, | 545 | omap3_pmic_init("tps65950", &omap3pandora_twldata); |
566 | ARRAY_SIZE(omap3pandora_i2c_boardinfo)); | ||
567 | /* i2c2 pins are not connected */ | 546 | /* i2c2 pins are not connected */ |
568 | omap_register_i2c_bus(3, 100, omap3pandora_i2c3_boardinfo, | 547 | omap_register_i2c_bus(3, 100, omap3pandora_i2c3_boardinfo, |
569 | ARRAY_SIZE(omap3pandora_i2c3_boardinfo)); | 548 | ARRAY_SIZE(omap3pandora_i2c3_boardinfo)); |
570 | return 0; | 549 | return 0; |
571 | } | 550 | } |
572 | 551 | ||
573 | static void __init omap3pandora_ads7846_init(void) | ||
574 | { | ||
575 | int gpio = OMAP3_PANDORA_TS_GPIO; | ||
576 | int ret; | ||
577 | |||
578 | ret = gpio_request(gpio, "ads7846_pen_down"); | ||
579 | if (ret < 0) { | ||
580 | printk(KERN_ERR "Failed to request GPIO %d for " | ||
581 | "ads7846 pen down IRQ\n", gpio); | ||
582 | return; | ||
583 | } | ||
584 | |||
585 | gpio_direction_input(gpio); | ||
586 | } | ||
587 | |||
588 | static int ads7846_get_pendown_state(void) | ||
589 | { | ||
590 | return !gpio_get_value(OMAP3_PANDORA_TS_GPIO); | ||
591 | } | ||
592 | |||
593 | static struct ads7846_platform_data ads7846_config = { | ||
594 | .x_max = 0x0fff, | ||
595 | .y_max = 0x0fff, | ||
596 | .x_plate_ohms = 180, | ||
597 | .pressure_max = 255, | ||
598 | .debounce_max = 10, | ||
599 | .debounce_tol = 3, | ||
600 | .debounce_rep = 1, | ||
601 | .get_pendown_state = ads7846_get_pendown_state, | ||
602 | .keep_vref_on = 1, | ||
603 | }; | ||
604 | |||
605 | static struct omap2_mcspi_device_config ads7846_mcspi_config = { | ||
606 | .turbo_mode = 0, | ||
607 | .single_channel = 1, /* 0: slave, 1: master */ | ||
608 | }; | ||
609 | |||
610 | static struct spi_board_info omap3pandora_spi_board_info[] __initdata = { | 552 | static struct spi_board_info omap3pandora_spi_board_info[] __initdata = { |
611 | { | 553 | { |
612 | .modalias = "ads7846", | ||
613 | .bus_num = 1, | ||
614 | .chip_select = 0, | ||
615 | .max_speed_hz = 1500000, | ||
616 | .controller_data = &ads7846_mcspi_config, | ||
617 | .irq = OMAP_GPIO_IRQ(OMAP3_PANDORA_TS_GPIO), | ||
618 | .platform_data = &ads7846_config, | ||
619 | }, { | ||
620 | .modalias = "tpo_td043mtea1_panel_spi", | 554 | .modalias = "tpo_td043mtea1_panel_spi", |
621 | .bus_num = 1, | 555 | .bus_num = 1, |
622 | .chip_select = 1, | 556 | .chip_select = 1, |
@@ -639,14 +573,10 @@ static void __init pandora_wl1251_init(void) | |||
639 | 573 | ||
640 | memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata)); | 574 | memset(&pandora_wl1251_pdata, 0, sizeof(pandora_wl1251_pdata)); |
641 | 575 | ||
642 | ret = gpio_request(PANDORA_WIFI_IRQ_GPIO, "wl1251 irq"); | 576 | ret = gpio_request_one(PANDORA_WIFI_IRQ_GPIO, GPIOF_IN, "wl1251 irq"); |
643 | if (ret < 0) | 577 | if (ret < 0) |
644 | goto fail; | 578 | goto fail; |
645 | 579 | ||
646 | ret = gpio_direction_input(PANDORA_WIFI_IRQ_GPIO); | ||
647 | if (ret < 0) | ||
648 | goto fail_irq; | ||
649 | |||
650 | pandora_wl1251_pdata.irq = gpio_to_irq(PANDORA_WIFI_IRQ_GPIO); | 580 | pandora_wl1251_pdata.irq = gpio_to_irq(PANDORA_WIFI_IRQ_GPIO); |
651 | if (pandora_wl1251_pdata.irq < 0) | 581 | if (pandora_wl1251_pdata.irq < 0) |
652 | goto fail_irq; | 582 | goto fail_irq; |
@@ -688,12 +618,6 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
688 | }; | 618 | }; |
689 | #endif | 619 | #endif |
690 | 620 | ||
691 | static struct omap_musb_board_data musb_board_data = { | ||
692 | .interface_type = MUSB_INTERFACE_ULPI, | ||
693 | .mode = MUSB_OTG, | ||
694 | .power = 100, | ||
695 | }; | ||
696 | |||
697 | static void __init omap3pandora_init(void) | 621 | static void __init omap3pandora_init(void) |
698 | { | 622 | { |
699 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); | 623 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); |
@@ -705,9 +629,9 @@ static void __init omap3pandora_init(void) | |||
705 | omap_serial_init(); | 629 | omap_serial_init(); |
706 | spi_register_board_info(omap3pandora_spi_board_info, | 630 | spi_register_board_info(omap3pandora_spi_board_info, |
707 | ARRAY_SIZE(omap3pandora_spi_board_info)); | 631 | ARRAY_SIZE(omap3pandora_spi_board_info)); |
708 | omap3pandora_ads7846_init(); | 632 | omap_ads7846_init(1, OMAP3_PANDORA_TS_GPIO, 0, NULL); |
709 | usbhs_init(&usbhs_bdata); | 633 | usbhs_init(&usbhs_bdata); |
710 | usb_musb_init(&musb_board_data); | 634 | usb_musb_init(NULL); |
711 | gpmc_nand_init(&pandora_nand_data); | 635 | gpmc_nand_init(&pandora_nand_data); |
712 | 636 | ||
713 | /* Ensure SDRC pins are mux'd for self-refresh */ | 637 | /* Ensure SDRC pins are mux'd for self-refresh */ |
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c index a72c90a08c8a..0c108a212ea2 100644 --- a/arch/arm/mach-omap2/board-omap3stalker.c +++ b/arch/arm/mach-omap2/board-omap3stalker.c | |||
@@ -45,7 +45,6 @@ | |||
45 | #include <plat/mcspi.h> | 45 | #include <plat/mcspi.h> |
46 | #include <linux/input/matrix_keypad.h> | 46 | #include <linux/input/matrix_keypad.h> |
47 | #include <linux/spi/spi.h> | 47 | #include <linux/spi/spi.h> |
48 | #include <linux/spi/ads7846.h> | ||
49 | #include <linux/interrupt.h> | 48 | #include <linux/interrupt.h> |
50 | #include <linux/smsc911x.h> | 49 | #include <linux/smsc911x.h> |
51 | #include <linux/i2c/at24.h> | 50 | #include <linux/i2c/at24.h> |
@@ -54,52 +53,28 @@ | |||
54 | #include "mux.h" | 53 | #include "mux.h" |
55 | #include "hsmmc.h" | 54 | #include "hsmmc.h" |
56 | #include "timer-gp.h" | 55 | #include "timer-gp.h" |
56 | #include "common-board-devices.h" | ||
57 | 57 | ||
58 | #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) | 58 | #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) |
59 | #include <plat/gpmc-smsc911x.h> | ||
60 | |||
59 | #define OMAP3STALKER_ETHR_START 0x2c000000 | 61 | #define OMAP3STALKER_ETHR_START 0x2c000000 |
60 | #define OMAP3STALKER_ETHR_SIZE 1024 | 62 | #define OMAP3STALKER_ETHR_SIZE 1024 |
61 | #define OMAP3STALKER_ETHR_GPIO_IRQ 19 | 63 | #define OMAP3STALKER_ETHR_GPIO_IRQ 19 |
62 | #define OMAP3STALKER_SMC911X_CS 5 | 64 | #define OMAP3STALKER_SMC911X_CS 5 |
63 | 65 | ||
64 | static struct resource omap3stalker_smsc911x_resources[] = { | 66 | static struct omap_smsc911x_platform_data smsc911x_cfg = { |
65 | [0] = { | 67 | .cs = OMAP3STALKER_SMC911X_CS, |
66 | .start = OMAP3STALKER_ETHR_START, | 68 | .gpio_irq = OMAP3STALKER_ETHR_GPIO_IRQ, |
67 | .end = | 69 | .gpio_reset = -EINVAL, |
68 | (OMAP3STALKER_ETHR_START + OMAP3STALKER_ETHR_SIZE - 1), | ||
69 | .flags = IORESOURCE_MEM, | ||
70 | }, | ||
71 | [1] = { | ||
72 | .start = OMAP_GPIO_IRQ(OMAP3STALKER_ETHR_GPIO_IRQ), | ||
73 | .end = OMAP_GPIO_IRQ(OMAP3STALKER_ETHR_GPIO_IRQ), | ||
74 | .flags = (IORESOURCE_IRQ | IRQF_TRIGGER_LOW), | ||
75 | }, | ||
76 | }; | ||
77 | |||
78 | static struct smsc911x_platform_config smsc911x_config = { | ||
79 | .phy_interface = PHY_INTERFACE_MODE_MII, | ||
80 | .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, | ||
81 | .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, | ||
82 | .flags = (SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS), | 70 | .flags = (SMSC911X_USE_32BIT | SMSC911X_SAVE_MAC_ADDRESS), |
83 | }; | 71 | }; |
84 | 72 | ||
85 | static struct platform_device omap3stalker_smsc911x_device = { | ||
86 | .name = "smsc911x", | ||
87 | .id = -1, | ||
88 | .num_resources = ARRAY_SIZE(omap3stalker_smsc911x_resources), | ||
89 | .resource = &omap3stalker_smsc911x_resources[0], | ||
90 | .dev = { | ||
91 | .platform_data = &smsc911x_config, | ||
92 | }, | ||
93 | }; | ||
94 | |||
95 | static inline void __init omap3stalker_init_eth(void) | 73 | static inline void __init omap3stalker_init_eth(void) |
96 | { | 74 | { |
97 | int eth_cs; | ||
98 | struct clk *l3ck; | 75 | struct clk *l3ck; |
99 | unsigned int rate; | 76 | unsigned int rate; |
100 | 77 | ||
101 | eth_cs = OMAP3STALKER_SMC911X_CS; | ||
102 | |||
103 | l3ck = clk_get(NULL, "l3_ck"); | 78 | l3ck = clk_get(NULL, "l3_ck"); |
104 | if (IS_ERR(l3ck)) | 79 | if (IS_ERR(l3ck)) |
105 | rate = 100000000; | 80 | rate = 100000000; |
@@ -107,16 +82,7 @@ static inline void __init omap3stalker_init_eth(void) | |||
107 | rate = clk_get_rate(l3ck); | 82 | rate = clk_get_rate(l3ck); |
108 | 83 | ||
109 | omap_mux_init_gpio(19, OMAP_PIN_INPUT_PULLUP); | 84 | omap_mux_init_gpio(19, OMAP_PIN_INPUT_PULLUP); |
110 | if (gpio_request(OMAP3STALKER_ETHR_GPIO_IRQ, "SMC911x irq") < 0) { | 85 | gpmc_smsc911x_init(&smsc911x_cfg); |
111 | printk(KERN_ERR | ||
112 | "Failed to request GPIO%d for smc911x IRQ\n", | ||
113 | OMAP3STALKER_ETHR_GPIO_IRQ); | ||
114 | return; | ||
115 | } | ||
116 | |||
117 | gpio_direction_input(OMAP3STALKER_ETHR_GPIO_IRQ); | ||
118 | |||
119 | platform_device_register(&omap3stalker_smsc911x_device); | ||
120 | } | 86 | } |
121 | 87 | ||
122 | #else | 88 | #else |
@@ -365,12 +331,11 @@ omap3stalker_twl_gpio_setup(struct device *dev, | |||
365 | */ | 331 | */ |
366 | 332 | ||
367 | /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */ | 333 | /* TWL4030_GPIO_MAX + 0 == ledA, LCD Backlight control */ |
368 | gpio_request(gpio + TWL4030_GPIO_MAX, "EN_LCD_BKL"); | 334 | gpio_request_one(gpio + TWL4030_GPIO_MAX, GPIOF_OUT_INIT_LOW, |
369 | gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0); | 335 | "EN_LCD_BKL"); |
370 | 336 | ||
371 | /* gpio + 7 == DVI Enable */ | 337 | /* gpio + 7 == DVI Enable */ |
372 | gpio_request(gpio + 7, "EN_DVI"); | 338 | gpio_request_one(gpio + 7, GPIOF_OUT_INIT_LOW, "EN_DVI"); |
373 | gpio_direction_output(gpio + 7, 0); | ||
374 | 339 | ||
375 | /* TWL4030_GPIO_MAX + 1 == ledB (out, mmc0) */ | 340 | /* TWL4030_GPIO_MAX + 1 == ledB (out, mmc0) */ |
376 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; | 341 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; |
@@ -489,15 +454,8 @@ static struct twl4030_platform_data omap3stalker_twldata = { | |||
489 | .codec = &omap3stalker_codec_data, | 454 | .codec = &omap3stalker_codec_data, |
490 | .vdac = &omap3_stalker_vdac, | 455 | .vdac = &omap3_stalker_vdac, |
491 | .vpll2 = &omap3_stalker_vpll2, | 456 | .vpll2 = &omap3_stalker_vpll2, |
492 | }; | 457 | .vmmc1 = &omap3stalker_vmmc1, |
493 | 458 | .vsim = &omap3stalker_vsim, | |
494 | static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo[] = { | ||
495 | { | ||
496 | I2C_BOARD_INFO("twl4030", 0x48), | ||
497 | .flags = I2C_CLIENT_WAKE, | ||
498 | .irq = INT_34XX_SYS_NIRQ, | ||
499 | .platform_data = &omap3stalker_twldata, | ||
500 | }, | ||
501 | }; | 459 | }; |
502 | 460 | ||
503 | static struct at24_platform_data fram_info = { | 461 | static struct at24_platform_data fram_info = { |
@@ -516,15 +474,7 @@ static struct i2c_board_info __initdata omap3stalker_i2c_boardinfo3[] = { | |||
516 | 474 | ||
517 | static int __init omap3_stalker_i2c_init(void) | 475 | static int __init omap3_stalker_i2c_init(void) |
518 | { | 476 | { |
519 | /* | 477 | omap3_pmic_init("twl4030", &omap3stalker_twldata); |
520 | * REVISIT: These entries can be set in omap3evm_twl_data | ||
521 | * after a merge with MFD tree | ||
522 | */ | ||
523 | omap3stalker_twldata.vmmc1 = &omap3stalker_vmmc1; | ||
524 | omap3stalker_twldata.vsim = &omap3stalker_vsim; | ||
525 | |||
526 | omap_register_i2c_bus(1, 2600, omap3stalker_i2c_boardinfo, | ||
527 | ARRAY_SIZE(omap3stalker_i2c_boardinfo)); | ||
528 | omap_register_i2c_bus(2, 400, NULL, 0); | 478 | omap_register_i2c_bus(2, 400, NULL, 0); |
529 | omap_register_i2c_bus(3, 400, omap3stalker_i2c_boardinfo3, | 479 | omap_register_i2c_bus(3, 400, omap3stalker_i2c_boardinfo3, |
530 | ARRAY_SIZE(omap3stalker_i2c_boardinfo3)); | 480 | ARRAY_SIZE(omap3stalker_i2c_boardinfo3)); |
@@ -532,49 +482,6 @@ static int __init omap3_stalker_i2c_init(void) | |||
532 | } | 482 | } |
533 | 483 | ||
534 | #define OMAP3_STALKER_TS_GPIO 175 | 484 | #define OMAP3_STALKER_TS_GPIO 175 |
535 | static void ads7846_dev_init(void) | ||
536 | { | ||
537 | if (gpio_request(OMAP3_STALKER_TS_GPIO, "ADS7846 pendown") < 0) | ||
538 | printk(KERN_ERR "can't get ads7846 pen down GPIO\n"); | ||
539 | |||
540 | gpio_direction_input(OMAP3_STALKER_TS_GPIO); | ||
541 | gpio_set_debounce(OMAP3_STALKER_TS_GPIO, 310); | ||
542 | } | ||
543 | |||
544 | static int ads7846_get_pendown_state(void) | ||
545 | { | ||
546 | return !gpio_get_value(OMAP3_STALKER_TS_GPIO); | ||
547 | } | ||
548 | |||
549 | static struct ads7846_platform_data ads7846_config = { | ||
550 | .x_max = 0x0fff, | ||
551 | .y_max = 0x0fff, | ||
552 | .x_plate_ohms = 180, | ||
553 | .pressure_max = 255, | ||
554 | .debounce_max = 10, | ||
555 | .debounce_tol = 3, | ||
556 | .debounce_rep = 1, | ||
557 | .get_pendown_state = ads7846_get_pendown_state, | ||
558 | .keep_vref_on = 1, | ||
559 | .settle_delay_usecs = 150, | ||
560 | }; | ||
561 | |||
562 | static struct omap2_mcspi_device_config ads7846_mcspi_config = { | ||
563 | .turbo_mode = 0, | ||
564 | .single_channel = 1, /* 0: slave, 1: master */ | ||
565 | }; | ||
566 | |||
567 | static struct spi_board_info omap3stalker_spi_board_info[] = { | ||
568 | [0] = { | ||
569 | .modalias = "ads7846", | ||
570 | .bus_num = 1, | ||
571 | .chip_select = 0, | ||
572 | .max_speed_hz = 1500000, | ||
573 | .controller_data = &ads7846_mcspi_config, | ||
574 | .irq = OMAP_GPIO_IRQ(OMAP3_STALKER_TS_GPIO), | ||
575 | .platform_data = &ads7846_config, | ||
576 | }, | ||
577 | }; | ||
578 | 485 | ||
579 | static struct omap_board_config_kernel omap3_stalker_config[] __initdata = { | 486 | static struct omap_board_config_kernel omap3_stalker_config[] __initdata = { |
580 | }; | 487 | }; |
@@ -618,12 +525,6 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
618 | }; | 525 | }; |
619 | #endif | 526 | #endif |
620 | 527 | ||
621 | static struct omap_musb_board_data musb_board_data = { | ||
622 | .interface_type = MUSB_INTERFACE_ULPI, | ||
623 | .mode = MUSB_OTG, | ||
624 | .power = 100, | ||
625 | }; | ||
626 | |||
627 | static void __init omap3_stalker_init(void) | 528 | static void __init omap3_stalker_init(void) |
628 | { | 529 | { |
629 | omap3_mux_init(board_mux, OMAP_PACKAGE_CUS); | 530 | omap3_mux_init(board_mux, OMAP_PACKAGE_CUS); |
@@ -636,13 +537,11 @@ static void __init omap3_stalker_init(void) | |||
636 | ARRAY_SIZE(omap3_stalker_devices)); | 537 | ARRAY_SIZE(omap3_stalker_devices)); |
637 | 538 | ||
638 | omap_display_init(&omap3_stalker_dss_data); | 539 | omap_display_init(&omap3_stalker_dss_data); |
639 | spi_register_board_info(omap3stalker_spi_board_info, | ||
640 | ARRAY_SIZE(omap3stalker_spi_board_info)); | ||
641 | 540 | ||
642 | omap_serial_init(); | 541 | omap_serial_init(); |
643 | usb_musb_init(&musb_board_data); | 542 | usb_musb_init(NULL); |
644 | usbhs_init(&usbhs_bdata); | 543 | usbhs_init(&usbhs_bdata); |
645 | ads7846_dev_init(); | 544 | omap_ads7846_init(1, OMAP3_STALKER_TS_GPIO, 310, NULL); |
646 | 545 | ||
647 | omap_mux_init_gpio(21, OMAP_PIN_OUTPUT); | 546 | omap_mux_init_gpio(21, OMAP_PIN_OUTPUT); |
648 | omap_mux_init_gpio(18, OMAP_PIN_INPUT_PULLUP); | 547 | omap_mux_init_gpio(18, OMAP_PIN_INPUT_PULLUP); |
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c index 127cb1752bdd..82872d7d313b 100644 --- a/arch/arm/mach-omap2/board-omap3touchbook.c +++ b/arch/arm/mach-omap2/board-omap3touchbook.c | |||
@@ -52,6 +52,7 @@ | |||
52 | #include "mux.h" | 52 | #include "mux.h" |
53 | #include "hsmmc.h" | 53 | #include "hsmmc.h" |
54 | #include "timer-gp.h" | 54 | #include "timer-gp.h" |
55 | #include "common-board-devices.h" | ||
55 | 56 | ||
56 | #include <asm/setup.h> | 57 | #include <asm/setup.h> |
57 | 58 | ||
@@ -95,15 +96,6 @@ static struct mtd_partition omap3touchbook_nand_partitions[] = { | |||
95 | }, | 96 | }, |
96 | }; | 97 | }; |
97 | 98 | ||
98 | static struct omap_nand_platform_data omap3touchbook_nand_data = { | ||
99 | .options = NAND_BUSWIDTH_16, | ||
100 | .parts = omap3touchbook_nand_partitions, | ||
101 | .nr_parts = ARRAY_SIZE(omap3touchbook_nand_partitions), | ||
102 | .dma_channel = -1, /* disable DMA in OMAP NAND driver */ | ||
103 | .nand_setup = NULL, | ||
104 | .dev_ready = NULL, | ||
105 | }; | ||
106 | |||
107 | #include "sdram-micron-mt46h32m32lf-6.h" | 99 | #include "sdram-micron-mt46h32m32lf-6.h" |
108 | 100 | ||
109 | static struct omap2_hsmmc_info mmc[] = { | 101 | static struct omap2_hsmmc_info mmc[] = { |
@@ -154,13 +146,11 @@ static int touchbook_twl_gpio_setup(struct device *dev, | |||
154 | /* REVISIT: need ehci-omap hooks for external VBUS | 146 | /* REVISIT: need ehci-omap hooks for external VBUS |
155 | * power switch and overcurrent detect | 147 | * power switch and overcurrent detect |
156 | */ | 148 | */ |
157 | 149 | gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC"); | |
158 | gpio_request(gpio + 1, "EHCI_nOC"); | ||
159 | gpio_direction_input(gpio + 1); | ||
160 | 150 | ||
161 | /* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */ | 151 | /* TWL4030_GPIO_MAX + 0 == ledA, EHCI nEN_USB_PWR (out, active low) */ |
162 | gpio_request(gpio + TWL4030_GPIO_MAX, "nEN_USB_PWR"); | 152 | gpio_request_one(gpio + TWL4030_GPIO_MAX, GPIOF_OUT_INIT_LOW, |
163 | gpio_direction_output(gpio + TWL4030_GPIO_MAX, 0); | 153 | "nEN_USB_PWR"); |
164 | 154 | ||
165 | /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ | 155 | /* TWL4030_GPIO_MAX + 1 == ledB, PMU_STAT (out, active low LED) */ |
166 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; | 156 | gpio_leds[2].gpio = gpio + TWL4030_GPIO_MAX + 1; |
@@ -273,15 +263,6 @@ static struct twl4030_platform_data touchbook_twldata = { | |||
273 | .vpll2 = &touchbook_vpll2, | 263 | .vpll2 = &touchbook_vpll2, |
274 | }; | 264 | }; |
275 | 265 | ||
276 | static struct i2c_board_info __initdata touchbook_i2c_boardinfo[] = { | ||
277 | { | ||
278 | I2C_BOARD_INFO("twl4030", 0x48), | ||
279 | .flags = I2C_CLIENT_WAKE, | ||
280 | .irq = INT_34XX_SYS_NIRQ, | ||
281 | .platform_data = &touchbook_twldata, | ||
282 | }, | ||
283 | }; | ||
284 | |||
285 | static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = { | 266 | static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = { |
286 | { | 267 | { |
287 | I2C_BOARD_INFO("bq27200", 0x55), | 268 | I2C_BOARD_INFO("bq27200", 0x55), |
@@ -291,8 +272,7 @@ static struct i2c_board_info __initdata touchBook_i2c_boardinfo[] = { | |||
291 | static int __init omap3_touchbook_i2c_init(void) | 272 | static int __init omap3_touchbook_i2c_init(void) |
292 | { | 273 | { |
293 | /* Standard TouchBook bus */ | 274 | /* Standard TouchBook bus */ |
294 | omap_register_i2c_bus(1, 2600, touchbook_i2c_boardinfo, | 275 | omap3_pmic_init("twl4030", &touchbook_twldata); |
295 | ARRAY_SIZE(touchbook_i2c_boardinfo)); | ||
296 | 276 | ||
297 | /* Additional TouchBook bus */ | 277 | /* Additional TouchBook bus */ |
298 | omap_register_i2c_bus(3, 100, touchBook_i2c_boardinfo, | 278 | omap_register_i2c_bus(3, 100, touchBook_i2c_boardinfo, |
@@ -301,19 +281,7 @@ static int __init omap3_touchbook_i2c_init(void) | |||
301 | return 0; | 281 | return 0; |
302 | } | 282 | } |
303 | 283 | ||
304 | static void __init omap3_ads7846_init(void) | 284 | static struct ads7846_platform_data ads7846_pdata = { |
305 | { | ||
306 | if (gpio_request(OMAP3_TS_GPIO, "ads7846_pen_down")) { | ||
307 | printk(KERN_ERR "Failed to request GPIO %d for " | ||
308 | "ads7846 pen down IRQ\n", OMAP3_TS_GPIO); | ||
309 | return; | ||
310 | } | ||
311 | |||
312 | gpio_direction_input(OMAP3_TS_GPIO); | ||
313 | gpio_set_debounce(OMAP3_TS_GPIO, 310); | ||
314 | } | ||
315 | |||
316 | static struct ads7846_platform_data ads7846_config = { | ||
317 | .x_min = 100, | 285 | .x_min = 100, |
318 | .y_min = 265, | 286 | .y_min = 265, |
319 | .x_max = 3950, | 287 | .x_max = 3950, |
@@ -327,23 +295,6 @@ static struct ads7846_platform_data ads7846_config = { | |||
327 | .keep_vref_on = 1, | 295 | .keep_vref_on = 1, |
328 | }; | 296 | }; |
329 | 297 | ||
330 | static struct omap2_mcspi_device_config ads7846_mcspi_config = { | ||
331 | .turbo_mode = 0, | ||
332 | .single_channel = 1, /* 0: slave, 1: master */ | ||
333 | }; | ||
334 | |||
335 | static struct spi_board_info omap3_ads7846_spi_board_info[] __initdata = { | ||
336 | { | ||
337 | .modalias = "ads7846", | ||
338 | .bus_num = 4, | ||
339 | .chip_select = 0, | ||
340 | .max_speed_hz = 1500000, | ||
341 | .controller_data = &ads7846_mcspi_config, | ||
342 | .irq = OMAP_GPIO_IRQ(OMAP3_TS_GPIO), | ||
343 | .platform_data = &ads7846_config, | ||
344 | } | ||
345 | }; | ||
346 | |||
347 | static struct gpio_led gpio_leds[] = { | 298 | static struct gpio_led gpio_leds[] = { |
348 | { | 299 | { |
349 | .name = "touchbook::usr0", | 300 | .name = "touchbook::usr0", |
@@ -434,39 +385,6 @@ static struct platform_device *omap3_touchbook_devices[] __initdata = { | |||
434 | &keys_gpio, | 385 | &keys_gpio, |
435 | }; | 386 | }; |
436 | 387 | ||
437 | static void __init omap3touchbook_flash_init(void) | ||
438 | { | ||
439 | u8 cs = 0; | ||
440 | u8 nandcs = GPMC_CS_NUM + 1; | ||
441 | |||
442 | /* find out the chip-select on which NAND exists */ | ||
443 | while (cs < GPMC_CS_NUM) { | ||
444 | u32 ret = 0; | ||
445 | ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); | ||
446 | |||
447 | if ((ret & 0xC00) == 0x800) { | ||
448 | printk(KERN_INFO "Found NAND on CS%d\n", cs); | ||
449 | if (nandcs > GPMC_CS_NUM) | ||
450 | nandcs = cs; | ||
451 | } | ||
452 | cs++; | ||
453 | } | ||
454 | |||
455 | if (nandcs > GPMC_CS_NUM) { | ||
456 | printk(KERN_INFO "NAND: Unable to find configuration " | ||
457 | "in GPMC\n "); | ||
458 | return; | ||
459 | } | ||
460 | |||
461 | if (nandcs < GPMC_CS_NUM) { | ||
462 | omap3touchbook_nand_data.cs = nandcs; | ||
463 | |||
464 | printk(KERN_INFO "Registering NAND on CS%d\n", nandcs); | ||
465 | if (gpmc_nand_init(&omap3touchbook_nand_data) < 0) | ||
466 | printk(KERN_ERR "Unable to register NAND device\n"); | ||
467 | } | ||
468 | } | ||
469 | |||
470 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { | 388 | static const struct usbhs_omap_board_data usbhs_bdata __initconst = { |
471 | 389 | ||
472 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, | 390 | .port_mode[0] = OMAP_EHCI_PORT_MODE_PHY, |
@@ -481,15 +399,10 @@ static const struct usbhs_omap_board_data usbhs_bdata __initconst = { | |||
481 | 399 | ||
482 | static void omap3_touchbook_poweroff(void) | 400 | static void omap3_touchbook_poweroff(void) |
483 | { | 401 | { |
484 | int r; | 402 | int pwr_off = TB_KILL_POWER_GPIO; |
485 | 403 | ||
486 | r = gpio_request(TB_KILL_POWER_GPIO, "DVI reset"); | 404 | if (gpio_request_one(pwr_off, GPIOF_OUT_INIT_LOW, "DVI reset") < 0) |
487 | if (r < 0) { | ||
488 | printk(KERN_ERR "Unable to get kill power GPIO\n"); | 405 | printk(KERN_ERR "Unable to get kill power GPIO\n"); |
489 | return; | ||
490 | } | ||
491 | |||
492 | gpio_direction_output(TB_KILL_POWER_GPIO, 0); | ||
493 | } | 406 | } |
494 | 407 | ||
495 | static int __init early_touchbook_revision(char *p) | 408 | static int __init early_touchbook_revision(char *p) |
@@ -501,12 +414,6 @@ static int __init early_touchbook_revision(char *p) | |||
501 | } | 414 | } |
502 | early_param("tbr", early_touchbook_revision); | 415 | early_param("tbr", early_touchbook_revision); |
503 | 416 | ||
504 | static struct omap_musb_board_data musb_board_data = { | ||
505 | .interface_type = MUSB_INTERFACE_ULPI, | ||
506 | .mode = MUSB_OTG, | ||
507 | .power = 100, | ||
508 | }; | ||
509 | |||
510 | static void __init omap3_touchbook_init(void) | 417 | static void __init omap3_touchbook_init(void) |
511 | { | 418 | { |
512 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); | 419 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); |
@@ -521,17 +428,15 @@ static void __init omap3_touchbook_init(void) | |||
521 | omap_serial_init(); | 428 | omap_serial_init(); |
522 | 429 | ||
523 | omap_mux_init_gpio(170, OMAP_PIN_INPUT); | 430 | omap_mux_init_gpio(170, OMAP_PIN_INPUT); |
524 | gpio_request(176, "DVI_nPD"); | ||
525 | /* REVISIT leave DVI powered down until it's needed ... */ | 431 | /* REVISIT leave DVI powered down until it's needed ... */ |
526 | gpio_direction_output(176, true); | 432 | gpio_request_one(176, GPIOF_OUT_INIT_HIGH, "DVI_nPD"); |
527 | 433 | ||
528 | /* Touchscreen and accelerometer */ | 434 | /* Touchscreen and accelerometer */ |
529 | spi_register_board_info(omap3_ads7846_spi_board_info, | 435 | omap_ads7846_init(4, OMAP3_TS_GPIO, 310, &ads7846_pdata); |
530 | ARRAY_SIZE(omap3_ads7846_spi_board_info)); | 436 | usb_musb_init(NULL); |
531 | omap3_ads7846_init(); | ||
532 | usb_musb_init(&musb_board_data); | ||
533 | usbhs_init(&usbhs_bdata); | 437 | usbhs_init(&usbhs_bdata); |
534 | omap3touchbook_flash_init(); | 438 | omap_nand_flash_init(NAND_BUSWIDTH_16, omap3touchbook_nand_partitions, |
439 | ARRAY_SIZE(omap3touchbook_nand_partitions)); | ||
535 | 440 | ||
536 | /* Ensure SDRC pins are mux'd for self-refresh */ | 441 | /* Ensure SDRC pins are mux'd for self-refresh */ |
537 | omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); | 442 | omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); |
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c index e4973ac77cbc..90485fced973 100644 --- a/arch/arm/mach-omap2/board-omap4panda.c +++ b/arch/arm/mach-omap2/board-omap4panda.c | |||
@@ -46,6 +46,7 @@ | |||
46 | #include "hsmmc.h" | 46 | #include "hsmmc.h" |
47 | #include "control.h" | 47 | #include "control.h" |
48 | #include "mux.h" | 48 | #include "mux.h" |
49 | #include "common-board-devices.h" | ||
49 | 50 | ||
50 | #define GPIO_HUB_POWER 1 | 51 | #define GPIO_HUB_POWER 1 |
51 | #define GPIO_HUB_NRESET 62 | 52 | #define GPIO_HUB_NRESET 62 |
@@ -111,6 +112,11 @@ static const struct usbhs_omap_board_data usbhs_bdata __initconst = { | |||
111 | .reset_gpio_port[2] = -EINVAL | 112 | .reset_gpio_port[2] = -EINVAL |
112 | }; | 113 | }; |
113 | 114 | ||
115 | static struct gpio panda_ehci_gpios[] __initdata = { | ||
116 | { GPIO_HUB_POWER, GPIOF_OUT_INIT_LOW, "hub_power" }, | ||
117 | { GPIO_HUB_NRESET, GPIOF_OUT_INIT_LOW, "hub_nreset" }, | ||
118 | }; | ||
119 | |||
114 | static void __init omap4_ehci_init(void) | 120 | static void __init omap4_ehci_init(void) |
115 | { | 121 | { |
116 | int ret; | 122 | int ret; |
@@ -120,44 +126,27 @@ static void __init omap4_ehci_init(void) | |||
120 | phy_ref_clk = clk_get(NULL, "auxclk3_ck"); | 126 | phy_ref_clk = clk_get(NULL, "auxclk3_ck"); |
121 | if (IS_ERR(phy_ref_clk)) { | 127 | if (IS_ERR(phy_ref_clk)) { |
122 | pr_err("Cannot request auxclk3\n"); | 128 | pr_err("Cannot request auxclk3\n"); |
123 | goto error1; | 129 | return; |
124 | } | 130 | } |
125 | clk_set_rate(phy_ref_clk, 19200000); | 131 | clk_set_rate(phy_ref_clk, 19200000); |
126 | clk_enable(phy_ref_clk); | 132 | clk_enable(phy_ref_clk); |
127 | 133 | ||
128 | /* disable the power to the usb hub prior to init */ | 134 | /* disable the power to the usb hub prior to init and reset phy+hub */ |
129 | ret = gpio_request(GPIO_HUB_POWER, "hub_power"); | 135 | ret = gpio_request_array(panda_ehci_gpios, |
136 | ARRAY_SIZE(panda_ehci_gpios)); | ||
130 | if (ret) { | 137 | if (ret) { |
131 | pr_err("Cannot request GPIO %d\n", GPIO_HUB_POWER); | 138 | pr_err("Unable to initialize EHCI power/reset\n"); |
132 | goto error1; | 139 | return; |
133 | } | 140 | } |
134 | gpio_export(GPIO_HUB_POWER, 0); | ||
135 | gpio_direction_output(GPIO_HUB_POWER, 0); | ||
136 | gpio_set_value(GPIO_HUB_POWER, 0); | ||
137 | 141 | ||
138 | /* reset phy+hub */ | 142 | gpio_export(GPIO_HUB_POWER, 0); |
139 | ret = gpio_request(GPIO_HUB_NRESET, "hub_nreset"); | ||
140 | if (ret) { | ||
141 | pr_err("Cannot request GPIO %d\n", GPIO_HUB_NRESET); | ||
142 | goto error2; | ||
143 | } | ||
144 | gpio_export(GPIO_HUB_NRESET, 0); | 143 | gpio_export(GPIO_HUB_NRESET, 0); |
145 | gpio_direction_output(GPIO_HUB_NRESET, 0); | ||
146 | gpio_set_value(GPIO_HUB_NRESET, 0); | ||
147 | gpio_set_value(GPIO_HUB_NRESET, 1); | 144 | gpio_set_value(GPIO_HUB_NRESET, 1); |
148 | 145 | ||
149 | usbhs_init(&usbhs_bdata); | 146 | usbhs_init(&usbhs_bdata); |
150 | 147 | ||
151 | /* enable power to hub */ | 148 | /* enable power to hub */ |
152 | gpio_set_value(GPIO_HUB_POWER, 1); | 149 | gpio_set_value(GPIO_HUB_POWER, 1); |
153 | return; | ||
154 | |||
155 | error2: | ||
156 | gpio_free(GPIO_HUB_POWER); | ||
157 | error1: | ||
158 | pr_err("Unable to initialize EHCI power/reset\n"); | ||
159 | return; | ||
160 | |||
161 | } | 150 | } |
162 | 151 | ||
163 | static struct omap_musb_board_data musb_board_data = { | 152 | static struct omap_musb_board_data musb_board_data = { |
@@ -408,15 +397,6 @@ static struct twl4030_platform_data omap4_panda_twldata = { | |||
408 | .usb = &omap4_usbphy_data, | 397 | .usb = &omap4_usbphy_data, |
409 | }; | 398 | }; |
410 | 399 | ||
411 | static struct i2c_board_info __initdata omap4_panda_i2c_boardinfo[] = { | ||
412 | { | ||
413 | I2C_BOARD_INFO("twl6030", 0x48), | ||
414 | .flags = I2C_CLIENT_WAKE, | ||
415 | .irq = OMAP44XX_IRQ_SYS_1N, | ||
416 | .platform_data = &omap4_panda_twldata, | ||
417 | }, | ||
418 | }; | ||
419 | |||
420 | /* | 400 | /* |
421 | * Display monitor features are burnt in their EEPROM as EDID data. The EEPROM | 401 | * Display monitor features are burnt in their EEPROM as EDID data. The EEPROM |
422 | * is connected as I2C slave device, and can be accessed at address 0x50 | 402 | * is connected as I2C slave device, and can be accessed at address 0x50 |
@@ -429,12 +409,7 @@ static struct i2c_board_info __initdata panda_i2c_eeprom[] = { | |||
429 | 409 | ||
430 | static int __init omap4_panda_i2c_init(void) | 410 | static int __init omap4_panda_i2c_init(void) |
431 | { | 411 | { |
432 | /* | 412 | omap4_pmic_init("twl6030", &omap4_panda_twldata); |
433 | * Phoenix Audio IC needs I2C1 to | ||
434 | * start with 400 KHz or less | ||
435 | */ | ||
436 | omap_register_i2c_bus(1, 400, omap4_panda_i2c_boardinfo, | ||
437 | ARRAY_SIZE(omap4_panda_i2c_boardinfo)); | ||
438 | omap_register_i2c_bus(2, 400, NULL, 0); | 413 | omap_register_i2c_bus(2, 400, NULL, 0); |
439 | /* | 414 | /* |
440 | * Bus 3 is attached to the DVI port where devices like the pico DLP | 415 | * Bus 3 is attached to the DVI port where devices like the pico DLP |
@@ -651,27 +626,19 @@ static void omap4_panda_hdmi_mux_init(void) | |||
651 | OMAP_PIN_INPUT_PULLUP); | 626 | OMAP_PIN_INPUT_PULLUP); |
652 | } | 627 | } |
653 | 628 | ||
629 | static struct gpio panda_hdmi_gpios[] = { | ||
630 | { HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_hpd" }, | ||
631 | { HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, "hdmi_gpio_ls_oe" }, | ||
632 | }; | ||
633 | |||
654 | static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev) | 634 | static int omap4_panda_panel_enable_hdmi(struct omap_dss_device *dssdev) |
655 | { | 635 | { |
656 | int status; | 636 | int status; |
657 | 637 | ||
658 | status = gpio_request_one(HDMI_GPIO_HPD, GPIOF_OUT_INIT_HIGH, | 638 | status = gpio_request_array(panda_hdmi_gpios, |
659 | "hdmi_gpio_hpd"); | 639 | ARRAY_SIZE(panda_hdmi_gpios)); |
660 | if (status) { | 640 | if (status) |
661 | pr_err("Cannot request GPIO %d\n", HDMI_GPIO_HPD); | 641 | pr_err("Cannot request HDMI GPIOs\n"); |
662 | return status; | ||
663 | } | ||
664 | status = gpio_request_one(HDMI_GPIO_LS_OE, GPIOF_OUT_INIT_HIGH, | ||
665 | "hdmi_gpio_ls_oe"); | ||
666 | if (status) { | ||
667 | pr_err("Cannot request GPIO %d\n", HDMI_GPIO_LS_OE); | ||
668 | goto error1; | ||
669 | } | ||
670 | |||
671 | return 0; | ||
672 | |||
673 | error1: | ||
674 | gpio_free(HDMI_GPIO_HPD); | ||
675 | 642 | ||
676 | return status; | 643 | return status; |
677 | } | 644 | } |
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c index 9d192ff3b9ac..1555918e3ffa 100644 --- a/arch/arm/mach-omap2/board-overo.c +++ b/arch/arm/mach-omap2/board-overo.c | |||
@@ -56,6 +56,7 @@ | |||
56 | #include "mux.h" | 56 | #include "mux.h" |
57 | #include "sdram-micron-mt46h32m32lf-6.h" | 57 | #include "sdram-micron-mt46h32m32lf-6.h" |
58 | #include "hsmmc.h" | 58 | #include "hsmmc.h" |
59 | #include "common-board-devices.h" | ||
59 | 60 | ||
60 | #define OVERO_GPIO_BT_XGATE 15 | 61 | #define OVERO_GPIO_BT_XGATE 15 |
61 | #define OVERO_GPIO_W2W_NRESET 16 | 62 | #define OVERO_GPIO_W2W_NRESET 16 |
@@ -74,30 +75,6 @@ | |||
74 | #if defined(CONFIG_TOUCHSCREEN_ADS7846) || \ | 75 | #if defined(CONFIG_TOUCHSCREEN_ADS7846) || \ |
75 | defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) | 76 | defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) |
76 | 77 | ||
77 | #include <linux/spi/ads7846.h> | ||
78 | |||
79 | static struct omap2_mcspi_device_config ads7846_mcspi_config = { | ||
80 | .turbo_mode = 0, | ||
81 | .single_channel = 1, /* 0: slave, 1: master */ | ||
82 | }; | ||
83 | |||
84 | static int ads7846_get_pendown_state(void) | ||
85 | { | ||
86 | return !gpio_get_value(OVERO_GPIO_PENDOWN); | ||
87 | } | ||
88 | |||
89 | static struct ads7846_platform_data ads7846_config = { | ||
90 | .x_max = 0x0fff, | ||
91 | .y_max = 0x0fff, | ||
92 | .x_plate_ohms = 180, | ||
93 | .pressure_max = 255, | ||
94 | .debounce_max = 10, | ||
95 | .debounce_tol = 3, | ||
96 | .debounce_rep = 1, | ||
97 | .get_pendown_state = ads7846_get_pendown_state, | ||
98 | .keep_vref_on = 1, | ||
99 | }; | ||
100 | |||
101 | /* fixed regulator for ads7846 */ | 78 | /* fixed regulator for ads7846 */ |
102 | static struct regulator_consumer_supply ads7846_supply = | 79 | static struct regulator_consumer_supply ads7846_supply = |
103 | REGULATOR_SUPPLY("vcc", "spi1.0"); | 80 | REGULATOR_SUPPLY("vcc", "spi1.0"); |
@@ -128,14 +105,7 @@ static struct platform_device vads7846_device = { | |||
128 | 105 | ||
129 | static void __init overo_ads7846_init(void) | 106 | static void __init overo_ads7846_init(void) |
130 | { | 107 | { |
131 | if ((gpio_request(OVERO_GPIO_PENDOWN, "ADS7846_PENDOWN") == 0) && | 108 | omap_ads7846_init(1, OVERO_GPIO_PENDOWN, 0, NULL); |
132 | (gpio_direction_input(OVERO_GPIO_PENDOWN) == 0)) { | ||
133 | gpio_export(OVERO_GPIO_PENDOWN, 0); | ||
134 | } else { | ||
135 | printk(KERN_ERR "could not obtain gpio for ADS7846_PENDOWN\n"); | ||
136 | return; | ||
137 | } | ||
138 | |||
139 | platform_device_register(&vads7846_device); | 109 | platform_device_register(&vads7846_device); |
140 | } | 110 | } |
141 | 111 | ||
@@ -146,106 +116,28 @@ static inline void __init overo_ads7846_init(void) { return; } | |||
146 | #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) | 116 | #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) |
147 | 117 | ||
148 | #include <linux/smsc911x.h> | 118 | #include <linux/smsc911x.h> |
119 | #include <plat/gpmc-smsc911x.h> | ||
149 | 120 | ||
150 | static struct resource overo_smsc911x_resources[] = { | 121 | static struct omap_smsc911x_platform_data smsc911x_cfg = { |
151 | { | ||
152 | .name = "smsc911x-memory", | ||
153 | .flags = IORESOURCE_MEM, | ||
154 | }, | ||
155 | { | ||
156 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, | ||
157 | }, | ||
158 | }; | ||
159 | |||
160 | static struct resource overo_smsc911x2_resources[] = { | ||
161 | { | ||
162 | .name = "smsc911x2-memory", | ||
163 | .flags = IORESOURCE_MEM, | ||
164 | }, | ||
165 | { | ||
166 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, | ||
167 | }, | ||
168 | }; | ||
169 | |||
170 | static struct smsc911x_platform_config overo_smsc911x_config = { | ||
171 | .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, | ||
172 | .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, | ||
173 | .flags = SMSC911X_USE_32BIT , | ||
174 | .phy_interface = PHY_INTERFACE_MODE_MII, | ||
175 | }; | ||
176 | |||
177 | static struct platform_device overo_smsc911x_device = { | ||
178 | .name = "smsc911x", | ||
179 | .id = 0, | 122 | .id = 0, |
180 | .num_resources = ARRAY_SIZE(overo_smsc911x_resources), | 123 | .cs = OVERO_SMSC911X_CS, |
181 | .resource = overo_smsc911x_resources, | 124 | .gpio_irq = OVERO_SMSC911X_GPIO, |
182 | .dev = { | 125 | .gpio_reset = -EINVAL, |
183 | .platform_data = &overo_smsc911x_config, | 126 | .flags = SMSC911X_USE_32BIT, |
184 | }, | ||
185 | }; | 127 | }; |
186 | 128 | ||
187 | static struct platform_device overo_smsc911x2_device = { | 129 | static struct omap_smsc911x_platform_data smsc911x2_cfg = { |
188 | .name = "smsc911x", | ||
189 | .id = 1, | 130 | .id = 1, |
190 | .num_resources = ARRAY_SIZE(overo_smsc911x2_resources), | 131 | .cs = OVERO_SMSC911X2_CS, |
191 | .resource = overo_smsc911x2_resources, | 132 | .gpio_irq = OVERO_SMSC911X2_GPIO, |
192 | .dev = { | 133 | .gpio_reset = -EINVAL, |
193 | .platform_data = &overo_smsc911x_config, | 134 | .flags = SMSC911X_USE_32BIT, |
194 | }, | ||
195 | }; | 135 | }; |
196 | 136 | ||
197 | static struct platform_device *smsc911x_devices[] = { | 137 | static void __init overo_init_smsc911x(void) |
198 | &overo_smsc911x_device, | ||
199 | &overo_smsc911x2_device, | ||
200 | }; | ||
201 | |||
202 | static inline void __init overo_init_smsc911x(void) | ||
203 | { | 138 | { |
204 | unsigned long cs_mem_base, cs_mem_base2; | 139 | gpmc_smsc911x_init(&smsc911x_cfg); |
205 | 140 | gpmc_smsc911x_init(&smsc911x2_cfg); | |
206 | /* set up first smsc911x chip */ | ||
207 | |||
208 | if (gpmc_cs_request(OVERO_SMSC911X_CS, SZ_16M, &cs_mem_base) < 0) { | ||
209 | printk(KERN_ERR "Failed request for GPMC mem for smsc911x\n"); | ||
210 | return; | ||
211 | } | ||
212 | |||
213 | overo_smsc911x_resources[0].start = cs_mem_base + 0x0; | ||
214 | overo_smsc911x_resources[0].end = cs_mem_base + 0xff; | ||
215 | |||
216 | if ((gpio_request(OVERO_SMSC911X_GPIO, "SMSC911X IRQ") == 0) && | ||
217 | (gpio_direction_input(OVERO_SMSC911X_GPIO) == 0)) { | ||
218 | gpio_export(OVERO_SMSC911X_GPIO, 0); | ||
219 | } else { | ||
220 | printk(KERN_ERR "could not obtain gpio for SMSC911X IRQ\n"); | ||
221 | return; | ||
222 | } | ||
223 | |||
224 | overo_smsc911x_resources[1].start = OMAP_GPIO_IRQ(OVERO_SMSC911X_GPIO); | ||
225 | overo_smsc911x_resources[1].end = 0; | ||
226 | |||
227 | /* set up second smsc911x chip */ | ||
228 | |||
229 | if (gpmc_cs_request(OVERO_SMSC911X2_CS, SZ_16M, &cs_mem_base2) < 0) { | ||
230 | printk(KERN_ERR "Failed request for GPMC mem for smsc911x2\n"); | ||
231 | return; | ||
232 | } | ||
233 | |||
234 | overo_smsc911x2_resources[0].start = cs_mem_base2 + 0x0; | ||
235 | overo_smsc911x2_resources[0].end = cs_mem_base2 + 0xff; | ||
236 | |||
237 | if ((gpio_request(OVERO_SMSC911X2_GPIO, "SMSC911X2 IRQ") == 0) && | ||
238 | (gpio_direction_input(OVERO_SMSC911X2_GPIO) == 0)) { | ||
239 | gpio_export(OVERO_SMSC911X2_GPIO, 0); | ||
240 | } else { | ||
241 | printk(KERN_ERR "could not obtain gpio for SMSC911X2 IRQ\n"); | ||
242 | return; | ||
243 | } | ||
244 | |||
245 | overo_smsc911x2_resources[1].start = OMAP_GPIO_IRQ(OVERO_SMSC911X2_GPIO); | ||
246 | overo_smsc911x2_resources[1].end = 0; | ||
247 | |||
248 | platform_add_devices(smsc911x_devices, ARRAY_SIZE(smsc911x_devices)); | ||
249 | } | 141 | } |
250 | 142 | ||
251 | #else | 143 | #else |
@@ -259,21 +151,20 @@ static int dvi_enabled; | |||
259 | #define OVERO_GPIO_LCD_EN 144 | 151 | #define OVERO_GPIO_LCD_EN 144 |
260 | #define OVERO_GPIO_LCD_BL 145 | 152 | #define OVERO_GPIO_LCD_BL 145 |
261 | 153 | ||
154 | static struct gpio overo_dss_gpios[] __initdata = { | ||
155 | { OVERO_GPIO_LCD_EN, GPIOF_OUT_INIT_HIGH, "OVERO_GPIO_LCD_EN" }, | ||
156 | { OVERO_GPIO_LCD_BL, GPIOF_OUT_INIT_HIGH, "OVERO_GPIO_LCD_BL" }, | ||
157 | }; | ||
158 | |||
262 | static void __init overo_display_init(void) | 159 | static void __init overo_display_init(void) |
263 | { | 160 | { |
264 | if ((gpio_request(OVERO_GPIO_LCD_EN, "OVERO_GPIO_LCD_EN") == 0) && | 161 | if (gpio_request_array(overo_dss_gpios, ARRAY_SIZE(overo_dss_gpios))) { |
265 | (gpio_direction_output(OVERO_GPIO_LCD_EN, 1) == 0)) | 162 | printk(KERN_ERR "could not obtain DSS control GPIOs\n"); |
266 | gpio_export(OVERO_GPIO_LCD_EN, 0); | 163 | return; |
267 | else | 164 | } |
268 | printk(KERN_ERR "could not obtain gpio for " | ||
269 | "OVERO_GPIO_LCD_EN\n"); | ||
270 | 165 | ||
271 | if ((gpio_request(OVERO_GPIO_LCD_BL, "OVERO_GPIO_LCD_BL") == 0) && | 166 | gpio_export(OVERO_GPIO_LCD_EN, 0); |
272 | (gpio_direction_output(OVERO_GPIO_LCD_BL, 1) == 0)) | 167 | gpio_export(OVERO_GPIO_LCD_BL, 0); |
273 | gpio_export(OVERO_GPIO_LCD_BL, 0); | ||
274 | else | ||
275 | printk(KERN_ERR "could not obtain gpio for " | ||
276 | "OVERO_GPIO_LCD_BL\n"); | ||
277 | } | 168 | } |
278 | 169 | ||
279 | static int overo_panel_enable_dvi(struct omap_dss_device *dssdev) | 170 | static int overo_panel_enable_dvi(struct omap_dss_device *dssdev) |
@@ -412,45 +303,6 @@ static struct mtd_partition overo_nand_partitions[] = { | |||
412 | }, | 303 | }, |
413 | }; | 304 | }; |
414 | 305 | ||
415 | static struct omap_nand_platform_data overo_nand_data = { | ||
416 | .parts = overo_nand_partitions, | ||
417 | .nr_parts = ARRAY_SIZE(overo_nand_partitions), | ||
418 | .dma_channel = -1, /* disable DMA in OMAP NAND driver */ | ||
419 | }; | ||
420 | |||
421 | static void __init overo_flash_init(void) | ||
422 | { | ||
423 | u8 cs = 0; | ||
424 | u8 nandcs = GPMC_CS_NUM + 1; | ||
425 | |||
426 | /* find out the chip-select on which NAND exists */ | ||
427 | while (cs < GPMC_CS_NUM) { | ||
428 | u32 ret = 0; | ||
429 | ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); | ||
430 | |||
431 | if ((ret & 0xC00) == 0x800) { | ||
432 | printk(KERN_INFO "Found NAND on CS%d\n", cs); | ||
433 | if (nandcs > GPMC_CS_NUM) | ||
434 | nandcs = cs; | ||
435 | } | ||
436 | cs++; | ||
437 | } | ||
438 | |||
439 | if (nandcs > GPMC_CS_NUM) { | ||
440 | printk(KERN_INFO "NAND: Unable to find configuration " | ||
441 | "in GPMC\n "); | ||
442 | return; | ||
443 | } | ||
444 | |||
445 | if (nandcs < GPMC_CS_NUM) { | ||
446 | overo_nand_data.cs = nandcs; | ||
447 | |||
448 | printk(KERN_INFO "Registering NAND on CS%d\n", nandcs); | ||
449 | if (gpmc_nand_init(&overo_nand_data) < 0) | ||
450 | printk(KERN_ERR "Unable to register NAND device\n"); | ||
451 | } | ||
452 | } | ||
453 | |||
454 | static struct omap2_hsmmc_info mmc[] = { | 306 | static struct omap2_hsmmc_info mmc[] = { |
455 | { | 307 | { |
456 | .mmc = 1, | 308 | .mmc = 1, |
@@ -648,37 +500,15 @@ static struct twl4030_platform_data overo_twldata = { | |||
648 | .vpll2 = &overo_vpll2, | 500 | .vpll2 = &overo_vpll2, |
649 | }; | 501 | }; |
650 | 502 | ||
651 | static struct i2c_board_info __initdata overo_i2c_boardinfo[] = { | ||
652 | { | ||
653 | I2C_BOARD_INFO("tps65950", 0x48), | ||
654 | .flags = I2C_CLIENT_WAKE, | ||
655 | .irq = INT_34XX_SYS_NIRQ, | ||
656 | .platform_data = &overo_twldata, | ||
657 | }, | ||
658 | }; | ||
659 | |||
660 | static int __init overo_i2c_init(void) | 503 | static int __init overo_i2c_init(void) |
661 | { | 504 | { |
662 | omap_register_i2c_bus(1, 2600, overo_i2c_boardinfo, | 505 | omap3_pmic_init("tps65950", &overo_twldata); |
663 | ARRAY_SIZE(overo_i2c_boardinfo)); | ||
664 | /* i2c2 pins are used for gpio */ | 506 | /* i2c2 pins are used for gpio */ |
665 | omap_register_i2c_bus(3, 400, NULL, 0); | 507 | omap_register_i2c_bus(3, 400, NULL, 0); |
666 | return 0; | 508 | return 0; |
667 | } | 509 | } |
668 | 510 | ||
669 | static struct spi_board_info overo_spi_board_info[] __initdata = { | 511 | static struct spi_board_info overo_spi_board_info[] __initdata = { |
670 | #if defined(CONFIG_TOUCHSCREEN_ADS7846) || \ | ||
671 | defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) | ||
672 | { | ||
673 | .modalias = "ads7846", | ||
674 | .bus_num = 1, | ||
675 | .chip_select = 0, | ||
676 | .max_speed_hz = 1500000, | ||
677 | .controller_data = &ads7846_mcspi_config, | ||
678 | .irq = OMAP_GPIO_IRQ(OVERO_GPIO_PENDOWN), | ||
679 | .platform_data = &ads7846_config, | ||
680 | }, | ||
681 | #endif | ||
682 | #if defined(CONFIG_PANEL_LGPHILIPS_LB035Q02) || \ | 512 | #if defined(CONFIG_PANEL_LGPHILIPS_LB035Q02) || \ |
683 | defined(CONFIG_PANEL_LGPHILIPS_LB035Q02_MODULE) | 513 | defined(CONFIG_PANEL_LGPHILIPS_LB035Q02_MODULE) |
684 | { | 514 | { |
@@ -722,20 +552,22 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
722 | }; | 552 | }; |
723 | #endif | 553 | #endif |
724 | 554 | ||
725 | static struct omap_musb_board_data musb_board_data = { | 555 | static struct gpio overo_bt_gpios[] __initdata = { |
726 | .interface_type = MUSB_INTERFACE_ULPI, | 556 | { OVERO_GPIO_BT_XGATE, GPIOF_OUT_INIT_LOW, "lcd enable" }, |
727 | .mode = MUSB_OTG, | 557 | { OVERO_GPIO_BT_NRESET, GPIOF_OUT_INIT_HIGH, "lcd bl enable" }, |
728 | .power = 100, | ||
729 | }; | 558 | }; |
730 | 559 | ||
731 | static void __init overo_init(void) | 560 | static void __init overo_init(void) |
732 | { | 561 | { |
562 | int ret; | ||
563 | |||
733 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); | 564 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); |
734 | overo_i2c_init(); | 565 | overo_i2c_init(); |
735 | omap_display_init(&overo_dss_data); | 566 | omap_display_init(&overo_dss_data); |
736 | omap_serial_init(); | 567 | omap_serial_init(); |
737 | overo_flash_init(); | 568 | omap_nand_flash_init(0, overo_nand_partitions, |
738 | usb_musb_init(&musb_board_data); | 569 | ARRAY_SIZE(overo_nand_partitions)); |
570 | usb_musb_init(NULL); | ||
739 | usbhs_init(&usbhs_bdata); | 571 | usbhs_init(&usbhs_bdata); |
740 | overo_spi_init(); | 572 | overo_spi_init(); |
741 | overo_ads7846_init(); | 573 | overo_ads7846_init(); |
@@ -748,9 +580,9 @@ static void __init overo_init(void) | |||
748 | omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); | 580 | omap_mux_init_signal("sdrc_cke0", OMAP_PIN_OUTPUT); |
749 | omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT); | 581 | omap_mux_init_signal("sdrc_cke1", OMAP_PIN_OUTPUT); |
750 | 582 | ||
751 | if ((gpio_request(OVERO_GPIO_W2W_NRESET, | 583 | ret = gpio_request_one(OVERO_GPIO_W2W_NRESET, GPIOF_OUT_INIT_HIGH, |
752 | "OVERO_GPIO_W2W_NRESET") == 0) && | 584 | "OVERO_GPIO_W2W_NRESET"); |
753 | (gpio_direction_output(OVERO_GPIO_W2W_NRESET, 1) == 0)) { | 585 | if (ret == 0) { |
754 | gpio_export(OVERO_GPIO_W2W_NRESET, 0); | 586 | gpio_export(OVERO_GPIO_W2W_NRESET, 0); |
755 | gpio_set_value(OVERO_GPIO_W2W_NRESET, 0); | 587 | gpio_set_value(OVERO_GPIO_W2W_NRESET, 0); |
756 | udelay(10); | 588 | udelay(10); |
@@ -760,25 +592,20 @@ static void __init overo_init(void) | |||
760 | "OVERO_GPIO_W2W_NRESET\n"); | 592 | "OVERO_GPIO_W2W_NRESET\n"); |
761 | } | 593 | } |
762 | 594 | ||
763 | if ((gpio_request(OVERO_GPIO_BT_XGATE, "OVERO_GPIO_BT_XGATE") == 0) && | 595 | ret = gpio_request_array(overo_bt_gpios, ARRAY_SIZE(overo_bt_gpios)); |
764 | (gpio_direction_output(OVERO_GPIO_BT_XGATE, 0) == 0)) | 596 | if (ret) { |
597 | pr_err("%s: could not obtain BT gpios\n", __func__); | ||
598 | } else { | ||
765 | gpio_export(OVERO_GPIO_BT_XGATE, 0); | 599 | gpio_export(OVERO_GPIO_BT_XGATE, 0); |
766 | else | ||
767 | printk(KERN_ERR "could not obtain gpio for OVERO_GPIO_BT_XGATE\n"); | ||
768 | |||
769 | if ((gpio_request(OVERO_GPIO_BT_NRESET, "OVERO_GPIO_BT_NRESET") == 0) && | ||
770 | (gpio_direction_output(OVERO_GPIO_BT_NRESET, 1) == 0)) { | ||
771 | gpio_export(OVERO_GPIO_BT_NRESET, 0); | 600 | gpio_export(OVERO_GPIO_BT_NRESET, 0); |
772 | gpio_set_value(OVERO_GPIO_BT_NRESET, 0); | 601 | gpio_set_value(OVERO_GPIO_BT_NRESET, 0); |
773 | mdelay(6); | 602 | mdelay(6); |
774 | gpio_set_value(OVERO_GPIO_BT_NRESET, 1); | 603 | gpio_set_value(OVERO_GPIO_BT_NRESET, 1); |
775 | } else { | ||
776 | printk(KERN_ERR "could not obtain gpio for " | ||
777 | "OVERO_GPIO_BT_NRESET\n"); | ||
778 | } | 604 | } |
779 | 605 | ||
780 | if ((gpio_request(OVERO_GPIO_USBH_CPEN, "OVERO_GPIO_USBH_CPEN") == 0) && | 606 | ret = gpio_request_one(OVERO_GPIO_USBH_CPEN, GPIOF_OUT_INIT_HIGH, |
781 | (gpio_direction_output(OVERO_GPIO_USBH_CPEN, 1) == 0)) | 607 | "OVERO_GPIO_USBH_CPEN"); |
608 | if (ret == 0) | ||
782 | gpio_export(OVERO_GPIO_USBH_CPEN, 0); | 609 | gpio_export(OVERO_GPIO_USBH_CPEN, 0); |
783 | else | 610 | else |
784 | printk(KERN_ERR "could not obtain gpio for " | 611 | printk(KERN_ERR "could not obtain gpio for " |
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c index 2af8b05e786d..42d10b12da3c 100644 --- a/arch/arm/mach-omap2/board-rm680.c +++ b/arch/arm/mach-omap2/board-rm680.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include "mux.h" | 31 | #include "mux.h" |
32 | #include "hsmmc.h" | 32 | #include "hsmmc.h" |
33 | #include "sdram-nokia.h" | 33 | #include "sdram-nokia.h" |
34 | #include "common-board-devices.h" | ||
34 | 35 | ||
35 | static struct regulator_consumer_supply rm680_vemmc_consumers[] = { | 36 | static struct regulator_consumer_supply rm680_vemmc_consumers[] = { |
36 | REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"), | 37 | REGULATOR_SUPPLY("vmmc", "omap_hsmmc.1"), |
@@ -90,19 +91,9 @@ static struct twl4030_platform_data rm680_twl_data = { | |||
90 | /* add rest of the children here */ | 91 | /* add rest of the children here */ |
91 | }; | 92 | }; |
92 | 93 | ||
93 | static struct i2c_board_info __initdata rm680_twl_i2c_board_info[] = { | ||
94 | { | ||
95 | I2C_BOARD_INFO("twl5031", 0x48), | ||
96 | .flags = I2C_CLIENT_WAKE, | ||
97 | .irq = INT_34XX_SYS_NIRQ, | ||
98 | .platform_data = &rm680_twl_data, | ||
99 | }, | ||
100 | }; | ||
101 | |||
102 | static void __init rm680_i2c_init(void) | 94 | static void __init rm680_i2c_init(void) |
103 | { | 95 | { |
104 | omap_register_i2c_bus(1, 2900, rm680_twl_i2c_board_info, | 96 | omap_pmic_init(1, 2900, "twl5031", INT_34XX_SYS_NIRQ, &rm680_twl_data); |
105 | ARRAY_SIZE(rm680_twl_i2c_board_info)); | ||
106 | omap_register_i2c_bus(2, 400, NULL, 0); | 97 | omap_register_i2c_bus(2, 400, NULL, 0); |
107 | omap_register_i2c_bus(3, 400, NULL, 0); | 98 | omap_register_i2c_bus(3, 400, NULL, 0); |
108 | } | 99 | } |
@@ -153,17 +144,11 @@ static struct omap_board_mux board_mux[] __initdata = { | |||
153 | }; | 144 | }; |
154 | #endif | 145 | #endif |
155 | 146 | ||
156 | static struct omap_musb_board_data rm680_musb_data = { | ||
157 | .interface_type = MUSB_INTERFACE_ULPI, | ||
158 | .mode = MUSB_PERIPHERAL, | ||
159 | .power = 100, | ||
160 | }; | ||
161 | |||
162 | static void __init rm680_init(void) | 147 | static void __init rm680_init(void) |
163 | { | 148 | { |
164 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); | 149 | omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); |
165 | omap_serial_init(); | 150 | omap_serial_init(); |
166 | usb_musb_init(&rm680_musb_data); | 151 | usb_musb_init(NULL); |
167 | rm680_peripherals_init(); | 152 | rm680_peripherals_init(); |
168 | } | 153 | } |
169 | 154 | ||
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index bbcb6775a6a3..f6247e71a194 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/gpio.h> | 23 | #include <linux/gpio.h> |
24 | #include <linux/gpio_keys.h> | 24 | #include <linux/gpio_keys.h> |
25 | #include <linux/mmc/host.h> | 25 | #include <linux/mmc/host.h> |
26 | #include <linux/power/isp1704_charger.h> | ||
26 | 27 | ||
27 | #include <plat/mcspi.h> | 28 | #include <plat/mcspi.h> |
28 | #include <plat/board.h> | 29 | #include <plat/board.h> |
@@ -43,6 +44,7 @@ | |||
43 | 44 | ||
44 | #include "mux.h" | 45 | #include "mux.h" |
45 | #include "hsmmc.h" | 46 | #include "hsmmc.h" |
47 | #include "common-board-devices.h" | ||
46 | 48 | ||
47 | #define SYSTEM_REV_B_USES_VAUX3 0x1699 | 49 | #define SYSTEM_REV_B_USES_VAUX3 0x1699 |
48 | #define SYSTEM_REV_S_USES_VAUX3 0x8 | 50 | #define SYSTEM_REV_S_USES_VAUX3 0x8 |
@@ -52,6 +54,8 @@ | |||
52 | #define RX51_FMTX_RESET_GPIO 163 | 54 | #define RX51_FMTX_RESET_GPIO 163 |
53 | #define RX51_FMTX_IRQ 53 | 55 | #define RX51_FMTX_IRQ 53 |
54 | 56 | ||
57 | #define RX51_USB_TRANSCEIVER_RST_GPIO 67 | ||
58 | |||
55 | /* list all spi devices here */ | 59 | /* list all spi devices here */ |
56 | enum { | 60 | enum { |
57 | RX51_SPI_WL1251, | 61 | RX51_SPI_WL1251, |
@@ -110,10 +114,30 @@ static struct spi_board_info rx51_peripherals_spi_board_info[] __initdata = { | |||
110 | }, | 114 | }, |
111 | }; | 115 | }; |
112 | 116 | ||
117 | static void rx51_charger_set_power(bool on) | ||
118 | { | ||
119 | gpio_set_value(RX51_USB_TRANSCEIVER_RST_GPIO, on); | ||
120 | } | ||
121 | |||
122 | static struct isp1704_charger_data rx51_charger_data = { | ||
123 | .set_power = rx51_charger_set_power, | ||
124 | }; | ||
125 | |||
113 | static struct platform_device rx51_charger_device = { | 126 | static struct platform_device rx51_charger_device = { |
114 | .name = "isp1704_charger", | 127 | .name = "isp1704_charger", |
128 | .dev = { | ||
129 | .platform_data = &rx51_charger_data, | ||
130 | }, | ||
115 | }; | 131 | }; |
116 | 132 | ||
133 | static void __init rx51_charger_init(void) | ||
134 | { | ||
135 | WARN_ON(gpio_request_one(RX51_USB_TRANSCEIVER_RST_GPIO, | ||
136 | GPIOF_OUT_INIT_LOW, "isp1704_reset")); | ||
137 | |||
138 | platform_device_register(&rx51_charger_device); | ||
139 | } | ||
140 | |||
117 | #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) | 141 | #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) |
118 | 142 | ||
119 | #define RX51_GPIO_CAMERA_LENS_COVER 110 | 143 | #define RX51_GPIO_CAMERA_LENS_COVER 110 |
@@ -557,10 +581,8 @@ static __init void rx51_init_si4713(void) | |||
557 | static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n) | 581 | static int rx51_twlgpio_setup(struct device *dev, unsigned gpio, unsigned n) |
558 | { | 582 | { |
559 | /* FIXME this gpio setup is just a placeholder for now */ | 583 | /* FIXME this gpio setup is just a placeholder for now */ |
560 | gpio_request(gpio + 6, "backlight_pwm"); | 584 | gpio_request_one(gpio + 6, GPIOF_OUT_INIT_LOW, "backlight_pwm"); |
561 | gpio_direction_output(gpio + 6, 0); | 585 | gpio_request_one(gpio + 7, GPIOF_OUT_INIT_HIGH, "speaker_en"); |
562 | gpio_request(gpio + 7, "speaker_en"); | ||
563 | gpio_direction_output(gpio + 7, 1); | ||
564 | 586 | ||
565 | return 0; | 587 | return 0; |
566 | } | 588 | } |
@@ -730,7 +752,7 @@ static struct twl4030_resconfig twl4030_rconfig[] __initdata = { | |||
730 | { .resource = RES_RESET, .devgroup = -1, | 752 | { .resource = RES_RESET, .devgroup = -1, |
731 | .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1 | 753 | .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1 |
732 | }, | 754 | }, |
733 | { .resource = RES_Main_Ref, .devgroup = -1, | 755 | { .resource = RES_MAIN_REF, .devgroup = -1, |
734 | .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1 | 756 | .type = 1, .type2 = -1, .remap_off = -1, .remap_sleep = -1 |
735 | }, | 757 | }, |
736 | { 0, 0}, | 758 | { 0, 0}, |
@@ -777,15 +799,6 @@ static struct tpa6130a2_platform_data rx51_tpa6130a2_data __initdata_or_module = | |||
777 | .power_gpio = 98, | 799 | .power_gpio = 98, |
778 | }; | 800 | }; |
779 | 801 | ||
780 | static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_1[] = { | ||
781 | { | ||
782 | I2C_BOARD_INFO("twl5030", 0x48), | ||
783 | .flags = I2C_CLIENT_WAKE, | ||
784 | .irq = INT_34XX_SYS_NIRQ, | ||
785 | .platform_data = &rx51_twldata, | ||
786 | }, | ||
787 | }; | ||
788 | |||
789 | /* Audio setup data */ | 802 | /* Audio setup data */ |
790 | static struct aic3x_setup_data rx51_aic34_setup = { | 803 | static struct aic3x_setup_data rx51_aic34_setup = { |
791 | .gpio_func[0] = AIC3X_GPIO1_FUNC_DISABLED, | 804 | .gpio_func[0] = AIC3X_GPIO1_FUNC_DISABLED, |
@@ -833,8 +846,7 @@ static int __init rx51_i2c_init(void) | |||
833 | rx51_twldata.vaux3 = &rx51_vaux3_cam; | 846 | rx51_twldata.vaux3 = &rx51_vaux3_cam; |
834 | } | 847 | } |
835 | rx51_twldata.vmmc2 = &rx51_vmmc2; | 848 | rx51_twldata.vmmc2 = &rx51_vmmc2; |
836 | omap_register_i2c_bus(1, 2200, rx51_peripherals_i2c_board_info_1, | 849 | omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata); |
837 | ARRAY_SIZE(rx51_peripherals_i2c_board_info_1)); | ||
838 | omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2, | 850 | omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2, |
839 | ARRAY_SIZE(rx51_peripherals_i2c_board_info_2)); | 851 | ARRAY_SIZE(rx51_peripherals_i2c_board_info_2)); |
840 | omap_register_i2c_bus(3, 400, NULL, 0); | 852 | omap_register_i2c_bus(3, 400, NULL, 0); |
@@ -921,26 +933,20 @@ static void rx51_wl1251_set_power(bool enable) | |||
921 | gpio_set_value(RX51_WL1251_POWER_GPIO, enable); | 933 | gpio_set_value(RX51_WL1251_POWER_GPIO, enable); |
922 | } | 934 | } |
923 | 935 | ||
936 | static struct gpio rx51_wl1251_gpios[] __initdata = { | ||
937 | { RX51_WL1251_POWER_GPIO, GPIOF_OUT_INIT_LOW, "wl1251 power" }, | ||
938 | { RX51_WL1251_IRQ_GPIO, GPIOF_IN, "wl1251 irq" }, | ||
939 | }; | ||
940 | |||
924 | static void __init rx51_init_wl1251(void) | 941 | static void __init rx51_init_wl1251(void) |
925 | { | 942 | { |
926 | int irq, ret; | 943 | int irq, ret; |
927 | 944 | ||
928 | ret = gpio_request(RX51_WL1251_POWER_GPIO, "wl1251 power"); | 945 | ret = gpio_request_array(rx51_wl1251_gpios, |
946 | ARRAY_SIZE(rx51_wl1251_gpios)); | ||
929 | if (ret < 0) | 947 | if (ret < 0) |
930 | goto error; | 948 | goto error; |
931 | 949 | ||
932 | ret = gpio_direction_output(RX51_WL1251_POWER_GPIO, 0); | ||
933 | if (ret < 0) | ||
934 | goto err_power; | ||
935 | |||
936 | ret = gpio_request(RX51_WL1251_IRQ_GPIO, "wl1251 irq"); | ||
937 | if (ret < 0) | ||
938 | goto err_power; | ||
939 | |||
940 | ret = gpio_direction_input(RX51_WL1251_IRQ_GPIO); | ||
941 | if (ret < 0) | ||
942 | goto err_irq; | ||
943 | |||
944 | irq = gpio_to_irq(RX51_WL1251_IRQ_GPIO); | 950 | irq = gpio_to_irq(RX51_WL1251_IRQ_GPIO); |
945 | if (irq < 0) | 951 | if (irq < 0) |
946 | goto err_irq; | 952 | goto err_irq; |
@@ -952,10 +958,7 @@ static void __init rx51_init_wl1251(void) | |||
952 | 958 | ||
953 | err_irq: | 959 | err_irq: |
954 | gpio_free(RX51_WL1251_IRQ_GPIO); | 960 | gpio_free(RX51_WL1251_IRQ_GPIO); |
955 | |||
956 | err_power: | ||
957 | gpio_free(RX51_WL1251_POWER_GPIO); | 961 | gpio_free(RX51_WL1251_POWER_GPIO); |
958 | |||
959 | error: | 962 | error: |
960 | printk(KERN_ERR "wl1251 board initialisation failed\n"); | 963 | printk(KERN_ERR "wl1251 board initialisation failed\n"); |
961 | wl1251_pdata.set_power = NULL; | 964 | wl1251_pdata.set_power = NULL; |
@@ -981,6 +984,6 @@ void __init rx51_peripherals_init(void) | |||
981 | if (partition) | 984 | if (partition) |
982 | omap2_hsmmc_init(mmc); | 985 | omap2_hsmmc_init(mmc); |
983 | 986 | ||
984 | platform_device_register(&rx51_charger_device); | 987 | rx51_charger_init(); |
985 | } | 988 | } |
986 | 989 | ||
diff --git a/arch/arm/mach-omap2/board-rx51-video.c b/arch/arm/mach-omap2/board-rx51-video.c index 2df10b6a5940..2c1289bd5e6a 100644 --- a/arch/arm/mach-omap2/board-rx51-video.c +++ b/arch/arm/mach-omap2/board-rx51-video.c | |||
@@ -76,13 +76,12 @@ static int __init rx51_video_init(void) | |||
76 | return 0; | 76 | return 0; |
77 | } | 77 | } |
78 | 78 | ||
79 | if (gpio_request(RX51_LCD_RESET_GPIO, "LCD ACX565AKM reset")) { | 79 | if (gpio_request_one(RX51_LCD_RESET_GPIO, GPIOF_OUT_INIT_HIGH, |
80 | "LCD ACX565AKM reset")) { | ||
80 | pr_err("%s failed to get LCD Reset GPIO\n", __func__); | 81 | pr_err("%s failed to get LCD Reset GPIO\n", __func__); |
81 | return 0; | 82 | return 0; |
82 | } | 83 | } |
83 | 84 | ||
84 | gpio_direction_output(RX51_LCD_RESET_GPIO, 1); | ||
85 | |||
86 | omap_display_init(&rx51_dss_board_info); | 85 | omap_display_init(&rx51_dss_board_info); |
87 | return 0; | 86 | return 0; |
88 | } | 87 | } |
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c index f8ba20a14e62..fec4cac8fa0a 100644 --- a/arch/arm/mach-omap2/board-rx51.c +++ b/arch/arm/mach-omap2/board-rx51.c | |||
@@ -58,21 +58,25 @@ static struct platform_device leds_gpio = { | |||
58 | }, | 58 | }, |
59 | }; | 59 | }; |
60 | 60 | ||
61 | /* | ||
62 | * cpuidle C-states definition override from the default values. | ||
63 | * The 'exit_latency' field is the sum of sleep and wake-up latencies. | ||
64 | */ | ||
61 | static struct cpuidle_params rx51_cpuidle_params[] = { | 65 | static struct cpuidle_params rx51_cpuidle_params[] = { |
62 | /* C1 */ | 66 | /* C1 */ |
63 | {1, 110, 162, 5}, | 67 | {110 + 162, 5 , 1}, |
64 | /* C2 */ | 68 | /* C2 */ |
65 | {1, 106, 180, 309}, | 69 | {106 + 180, 309, 1}, |
66 | /* C3 */ | 70 | /* C3 */ |
67 | {0, 107, 410, 46057}, | 71 | {107 + 410, 46057, 0}, |
68 | /* C4 */ | 72 | /* C4 */ |
69 | {0, 121, 3374, 46057}, | 73 | {121 + 3374, 46057, 0}, |
70 | /* C5 */ | 74 | /* C5 */ |
71 | {1, 855, 1146, 46057}, | 75 | {855 + 1146, 46057, 1}, |
72 | /* C6 */ | 76 | /* C6 */ |
73 | {0, 7580, 4134, 484329}, | 77 | {7580 + 4134, 484329, 0}, |
74 | /* C7 */ | 78 | /* C7 */ |
75 | {1, 7505, 15274, 484329}, | 79 | {7505 + 15274, 484329, 1}, |
76 | }; | 80 | }; |
77 | 81 | ||
78 | static struct omap_lcd_config rx51_lcd_config = { | 82 | static struct omap_lcd_config rx51_lcd_config = { |
diff --git a/arch/arm/mach-omap2/board-zoom-debugboard.c b/arch/arm/mach-omap2/board-zoom-debugboard.c index 007ebdc6c993..6402e781c458 100644 --- a/arch/arm/mach-omap2/board-zoom-debugboard.c +++ b/arch/arm/mach-omap2/board-zoom-debugboard.c | |||
@@ -15,6 +15,7 @@ | |||
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | 16 | ||
17 | #include <plat/gpmc.h> | 17 | #include <plat/gpmc.h> |
18 | #include <plat/gpmc-smsc911x.h> | ||
18 | 19 | ||
19 | #include <mach/board-zoom.h> | 20 | #include <mach/board-zoom.h> |
20 | 21 | ||
@@ -26,60 +27,16 @@ | |||
26 | #define DEBUG_BASE 0x08000000 | 27 | #define DEBUG_BASE 0x08000000 |
27 | #define ZOOM_ETHR_START DEBUG_BASE | 28 | #define ZOOM_ETHR_START DEBUG_BASE |
28 | 29 | ||
29 | static struct resource zoom_smsc911x_resources[] = { | 30 | static struct omap_smsc911x_platform_data zoom_smsc911x_cfg = { |
30 | [0] = { | 31 | .cs = ZOOM_SMSC911X_CS, |
31 | .start = ZOOM_ETHR_START, | 32 | .gpio_irq = ZOOM_SMSC911X_GPIO, |
32 | .end = ZOOM_ETHR_START + SZ_4K, | 33 | .gpio_reset = -EINVAL, |
33 | .flags = IORESOURCE_MEM, | ||
34 | }, | ||
35 | [1] = { | ||
36 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, | ||
37 | }, | ||
38 | }; | ||
39 | |||
40 | static struct smsc911x_platform_config zoom_smsc911x_config = { | ||
41 | .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, | ||
42 | .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, | ||
43 | .flags = SMSC911X_USE_32BIT, | 34 | .flags = SMSC911X_USE_32BIT, |
44 | .phy_interface = PHY_INTERFACE_MODE_MII, | ||
45 | }; | ||
46 | |||
47 | static struct platform_device zoom_smsc911x_device = { | ||
48 | .name = "smsc911x", | ||
49 | .id = -1, | ||
50 | .num_resources = ARRAY_SIZE(zoom_smsc911x_resources), | ||
51 | .resource = zoom_smsc911x_resources, | ||
52 | .dev = { | ||
53 | .platform_data = &zoom_smsc911x_config, | ||
54 | }, | ||
55 | }; | 35 | }; |
56 | 36 | ||
57 | static inline void __init zoom_init_smsc911x(void) | 37 | static inline void __init zoom_init_smsc911x(void) |
58 | { | 38 | { |
59 | int eth_cs; | 39 | gpmc_smsc911x_init(&zoom_smsc911x_cfg); |
60 | unsigned long cs_mem_base; | ||
61 | int eth_gpio = 0; | ||
62 | |||
63 | eth_cs = ZOOM_SMSC911X_CS; | ||
64 | |||
65 | if (gpmc_cs_request(eth_cs, SZ_16M, &cs_mem_base) < 0) { | ||
66 | printk(KERN_ERR "Failed to request GPMC mem for smsc911x\n"); | ||
67 | return; | ||
68 | } | ||
69 | |||
70 | zoom_smsc911x_resources[0].start = cs_mem_base + 0x0; | ||
71 | zoom_smsc911x_resources[0].end = cs_mem_base + 0xff; | ||
72 | |||
73 | eth_gpio = ZOOM_SMSC911X_GPIO; | ||
74 | |||
75 | zoom_smsc911x_resources[1].start = OMAP_GPIO_IRQ(eth_gpio); | ||
76 | |||
77 | if (gpio_request(eth_gpio, "smsc911x irq") < 0) { | ||
78 | printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n", | ||
79 | eth_gpio); | ||
80 | return; | ||
81 | } | ||
82 | gpio_direction_input(eth_gpio); | ||
83 | } | 40 | } |
84 | 41 | ||
85 | static struct plat_serial8250_port serial_platform_data[] = { | 42 | static struct plat_serial8250_port serial_platform_data[] = { |
@@ -120,12 +77,9 @@ static inline void __init zoom_init_quaduart(void) | |||
120 | 77 | ||
121 | quart_gpio = ZOOM_QUADUART_GPIO; | 78 | quart_gpio = ZOOM_QUADUART_GPIO; |
122 | 79 | ||
123 | if (gpio_request(quart_gpio, "TL16CP754C GPIO") < 0) { | 80 | if (gpio_request_one(quart_gpio, GPIOF_IN, "TL16CP754C GPIO") < 0) |
124 | printk(KERN_ERR "Failed to request GPIO%d for TL16CP754C\n", | 81 | printk(KERN_ERR "Failed to request GPIO%d for TL16CP754C\n", |
125 | quart_gpio); | 82 | quart_gpio); |
126 | return; | ||
127 | } | ||
128 | gpio_direction_input(quart_gpio); | ||
129 | } | 83 | } |
130 | 84 | ||
131 | static inline int omap_zoom_debugboard_detect(void) | 85 | static inline int omap_zoom_debugboard_detect(void) |
@@ -135,12 +89,12 @@ static inline int omap_zoom_debugboard_detect(void) | |||
135 | 89 | ||
136 | debug_board_detect = ZOOM_SMSC911X_GPIO; | 90 | debug_board_detect = ZOOM_SMSC911X_GPIO; |
137 | 91 | ||
138 | if (gpio_request(debug_board_detect, "Zoom debug board detect") < 0) { | 92 | if (gpio_request_one(debug_board_detect, GPIOF_IN, |
93 | "Zoom debug board detect") < 0) { | ||
139 | printk(KERN_ERR "Failed to request GPIO%d for Zoom debug" | 94 | printk(KERN_ERR "Failed to request GPIO%d for Zoom debug" |
140 | "board detect\n", debug_board_detect); | 95 | "board detect\n", debug_board_detect); |
141 | return 0; | 96 | return 0; |
142 | } | 97 | } |
143 | gpio_direction_input(debug_board_detect); | ||
144 | 98 | ||
145 | if (!gpio_get_value(debug_board_detect)) { | 99 | if (!gpio_get_value(debug_board_detect)) { |
146 | ret = 0; | 100 | ret = 0; |
@@ -150,7 +104,6 @@ static inline int omap_zoom_debugboard_detect(void) | |||
150 | } | 104 | } |
151 | 105 | ||
152 | static struct platform_device *zoom_devices[] __initdata = { | 106 | static struct platform_device *zoom_devices[] __initdata = { |
153 | &zoom_smsc911x_device, | ||
154 | &zoom_debugboard_serial_device, | 107 | &zoom_debugboard_serial_device, |
155 | }; | 108 | }; |
156 | 109 | ||
diff --git a/arch/arm/mach-omap2/board-zoom-display.c b/arch/arm/mach-omap2/board-zoom-display.c index 60e8645db59d..c7c6beb1ec24 100644 --- a/arch/arm/mach-omap2/board-zoom-display.c +++ b/arch/arm/mach-omap2/board-zoom-display.c | |||
@@ -21,34 +21,19 @@ | |||
21 | #define LCD_PANEL_RESET_GPIO_PILOT 55 | 21 | #define LCD_PANEL_RESET_GPIO_PILOT 55 |
22 | #define LCD_PANEL_QVGA_GPIO 56 | 22 | #define LCD_PANEL_QVGA_GPIO 56 |
23 | 23 | ||
24 | static struct gpio zoom_lcd_gpios[] __initdata = { | ||
25 | { -EINVAL, GPIOF_OUT_INIT_HIGH, "lcd reset" }, | ||
26 | { LCD_PANEL_QVGA_GPIO, GPIOF_OUT_INIT_HIGH, "lcd qvga" }, | ||
27 | }; | ||
28 | |||
24 | static void zoom_lcd_panel_init(void) | 29 | static void zoom_lcd_panel_init(void) |
25 | { | 30 | { |
26 | int ret; | 31 | zoom_lcd_gpios[0].gpio = (omap_rev() > OMAP3430_REV_ES3_0) ? |
27 | unsigned char lcd_panel_reset_gpio; | ||
28 | |||
29 | lcd_panel_reset_gpio = (omap_rev() > OMAP3430_REV_ES3_0) ? | ||
30 | LCD_PANEL_RESET_GPIO_PROD : | 32 | LCD_PANEL_RESET_GPIO_PROD : |
31 | LCD_PANEL_RESET_GPIO_PILOT; | 33 | LCD_PANEL_RESET_GPIO_PILOT; |
32 | 34 | ||
33 | ret = gpio_request(lcd_panel_reset_gpio, "lcd reset"); | 35 | if (gpio_request_array(zoom_lcd_gpios, ARRAY_SIZE(zoom_lcd_gpios))) |
34 | if (ret) { | 36 | pr_err("%s: Failed to get LCD GPIOs.\n", __func__); |
35 | pr_err("Failed to get LCD reset GPIO (gpio%d).\n", | ||
36 | lcd_panel_reset_gpio); | ||
37 | return; | ||
38 | } | ||
39 | gpio_direction_output(lcd_panel_reset_gpio, 1); | ||
40 | |||
41 | ret = gpio_request(LCD_PANEL_QVGA_GPIO, "lcd qvga"); | ||
42 | if (ret) { | ||
43 | pr_err("Failed to get LCD_PANEL_QVGA_GPIO (gpio%d).\n", | ||
44 | LCD_PANEL_QVGA_GPIO); | ||
45 | goto err0; | ||
46 | } | ||
47 | gpio_direction_output(LCD_PANEL_QVGA_GPIO, 1); | ||
48 | |||
49 | return; | ||
50 | err0: | ||
51 | gpio_free(lcd_panel_reset_gpio); | ||
52 | } | 37 | } |
53 | 38 | ||
54 | static int zoom_panel_enable_lcd(struct omap_dss_device *dssdev) | 39 | static int zoom_panel_enable_lcd(struct omap_dss_device *dssdev) |
diff --git a/arch/arm/mach-omap2/board-zoom-peripherals.c b/arch/arm/mach-omap2/board-zoom-peripherals.c index 8dee7549fbdf..118c6f53c5eb 100644 --- a/arch/arm/mach-omap2/board-zoom-peripherals.c +++ b/arch/arm/mach-omap2/board-zoom-peripherals.c | |||
@@ -31,6 +31,7 @@ | |||
31 | 31 | ||
32 | #include "mux.h" | 32 | #include "mux.h" |
33 | #include "hsmmc.h" | 33 | #include "hsmmc.h" |
34 | #include "common-board-devices.h" | ||
34 | 35 | ||
35 | #define OMAP_ZOOM_WLAN_PMENA_GPIO (101) | 36 | #define OMAP_ZOOM_WLAN_PMENA_GPIO (101) |
36 | #define OMAP_ZOOM_WLAN_IRQ_GPIO (162) | 37 | #define OMAP_ZOOM_WLAN_IRQ_GPIO (162) |
@@ -276,13 +277,11 @@ static int zoom_twl_gpio_setup(struct device *dev, | |||
276 | zoom_vsim_supply.dev = mmc[0].dev; | 277 | zoom_vsim_supply.dev = mmc[0].dev; |
277 | zoom_vmmc2_supply.dev = mmc[1].dev; | 278 | zoom_vmmc2_supply.dev = mmc[1].dev; |
278 | 279 | ||
279 | ret = gpio_request(LCD_PANEL_ENABLE_GPIO, "lcd enable"); | 280 | ret = gpio_request_one(LCD_PANEL_ENABLE_GPIO, GPIOF_OUT_INIT_LOW, |
280 | if (ret) { | 281 | "lcd enable"); |
282 | if (ret) | ||
281 | pr_err("Failed to get LCD_PANEL_ENABLE_GPIO (gpio%d).\n", | 283 | pr_err("Failed to get LCD_PANEL_ENABLE_GPIO (gpio%d).\n", |
282 | LCD_PANEL_ENABLE_GPIO); | 284 | LCD_PANEL_ENABLE_GPIO); |
283 | return ret; | ||
284 | } | ||
285 | gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 0); | ||
286 | 285 | ||
287 | return ret; | 286 | return ret; |
288 | } | 287 | } |
@@ -349,15 +348,6 @@ static struct twl4030_platform_data zoom_twldata = { | |||
349 | .vdac = &zoom_vdac, | 348 | .vdac = &zoom_vdac, |
350 | }; | 349 | }; |
351 | 350 | ||
352 | static struct i2c_board_info __initdata zoom_i2c_boardinfo[] = { | ||
353 | { | ||
354 | I2C_BOARD_INFO("twl5030", 0x48), | ||
355 | .flags = I2C_CLIENT_WAKE, | ||
356 | .irq = INT_34XX_SYS_NIRQ, | ||
357 | .platform_data = &zoom_twldata, | ||
358 | }, | ||
359 | }; | ||
360 | |||
361 | static int __init omap_i2c_init(void) | 351 | static int __init omap_i2c_init(void) |
362 | { | 352 | { |
363 | if (machine_is_omap_zoom2()) { | 353 | if (machine_is_omap_zoom2()) { |
@@ -365,19 +355,12 @@ static int __init omap_i2c_init(void) | |||
365 | zoom_audio_data.hs_extmute = 1; | 355 | zoom_audio_data.hs_extmute = 1; |
366 | zoom_audio_data.set_hs_extmute = zoom2_set_hs_extmute; | 356 | zoom_audio_data.set_hs_extmute = zoom2_set_hs_extmute; |
367 | } | 357 | } |
368 | omap_register_i2c_bus(1, 2400, zoom_i2c_boardinfo, | 358 | omap_pmic_init(1, 2400, "twl5030", INT_34XX_SYS_NIRQ, &zoom_twldata); |
369 | ARRAY_SIZE(zoom_i2c_boardinfo)); | ||
370 | omap_register_i2c_bus(2, 400, NULL, 0); | 359 | omap_register_i2c_bus(2, 400, NULL, 0); |
371 | omap_register_i2c_bus(3, 400, NULL, 0); | 360 | omap_register_i2c_bus(3, 400, NULL, 0); |
372 | return 0; | 361 | return 0; |
373 | } | 362 | } |
374 | 363 | ||
375 | static struct omap_musb_board_data musb_board_data = { | ||
376 | .interface_type = MUSB_INTERFACE_ULPI, | ||
377 | .mode = MUSB_OTG, | ||
378 | .power = 100, | ||
379 | }; | ||
380 | |||
381 | static void enable_board_wakeup_source(void) | 364 | static void enable_board_wakeup_source(void) |
382 | { | 365 | { |
383 | /* T2 interrupt line (keypad) */ | 366 | /* T2 interrupt line (keypad) */ |
@@ -392,7 +375,7 @@ void __init zoom_peripherals_init(void) | |||
392 | 375 | ||
393 | omap_i2c_init(); | 376 | omap_i2c_init(); |
394 | platform_device_register(&omap_vwlan_device); | 377 | platform_device_register(&omap_vwlan_device); |
395 | usb_musb_init(&musb_board_data); | 378 | usb_musb_init(NULL); |
396 | enable_board_wakeup_source(); | 379 | enable_board_wakeup_source(); |
397 | omap_serial_init(); | 380 | omap_serial_init(); |
398 | } | 381 | } |
diff --git a/arch/arm/mach-omap2/common-board-devices.c b/arch/arm/mach-omap2/common-board-devices.c new file mode 100644 index 000000000000..e94903b2c65b --- /dev/null +++ b/arch/arm/mach-omap2/common-board-devices.c | |||
@@ -0,0 +1,163 @@ | |||
1 | /* | ||
2 | * common-board-devices.c | ||
3 | * | ||
4 | * Copyright (C) 2011 CompuLab, Ltd. | ||
5 | * Author: Mike Rapoport <mike@compulab.co.il> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or | ||
8 | * modify it under the terms of the GNU General Public License | ||
9 | * version 2 as published by the Free Software Foundation. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, but | ||
12 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
14 | * General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
19 | * 02110-1301 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/i2c.h> | ||
24 | #include <linux/i2c/twl.h> | ||
25 | |||
26 | #include <linux/gpio.h> | ||
27 | #include <linux/spi/spi.h> | ||
28 | #include <linux/spi/ads7846.h> | ||
29 | |||
30 | #include <plat/i2c.h> | ||
31 | #include <plat/mcspi.h> | ||
32 | #include <plat/nand.h> | ||
33 | |||
34 | #include "common-board-devices.h" | ||
35 | |||
36 | static struct i2c_board_info __initdata pmic_i2c_board_info = { | ||
37 | .addr = 0x48, | ||
38 | .flags = I2C_CLIENT_WAKE, | ||
39 | }; | ||
40 | |||
41 | void __init omap_pmic_init(int bus, u32 clkrate, | ||
42 | const char *pmic_type, int pmic_irq, | ||
43 | struct twl4030_platform_data *pmic_data) | ||
44 | { | ||
45 | strncpy(pmic_i2c_board_info.type, pmic_type, | ||
46 | sizeof(pmic_i2c_board_info.type)); | ||
47 | pmic_i2c_board_info.irq = pmic_irq; | ||
48 | pmic_i2c_board_info.platform_data = pmic_data; | ||
49 | |||
50 | omap_register_i2c_bus(bus, clkrate, &pmic_i2c_board_info, 1); | ||
51 | } | ||
52 | |||
53 | #if defined(CONFIG_TOUCHSCREEN_ADS7846) || \ | ||
54 | defined(CONFIG_TOUCHSCREEN_ADS7846_MODULE) | ||
55 | static struct omap2_mcspi_device_config ads7846_mcspi_config = { | ||
56 | .turbo_mode = 0, | ||
57 | .single_channel = 1, /* 0: slave, 1: master */ | ||
58 | }; | ||
59 | |||
60 | static struct ads7846_platform_data ads7846_config = { | ||
61 | .x_max = 0x0fff, | ||
62 | .y_max = 0x0fff, | ||
63 | .x_plate_ohms = 180, | ||
64 | .pressure_max = 255, | ||
65 | .debounce_max = 10, | ||
66 | .debounce_tol = 3, | ||
67 | .debounce_rep = 1, | ||
68 | .gpio_pendown = -EINVAL, | ||
69 | .keep_vref_on = 1, | ||
70 | }; | ||
71 | |||
72 | static struct spi_board_info ads7846_spi_board_info __initdata = { | ||
73 | .modalias = "ads7846", | ||
74 | .bus_num = -EINVAL, | ||
75 | .chip_select = 0, | ||
76 | .max_speed_hz = 1500000, | ||
77 | .controller_data = &ads7846_mcspi_config, | ||
78 | .irq = -EINVAL, | ||
79 | .platform_data = &ads7846_config, | ||
80 | }; | ||
81 | |||
82 | void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce, | ||
83 | struct ads7846_platform_data *board_pdata) | ||
84 | { | ||
85 | struct spi_board_info *spi_bi = &ads7846_spi_board_info; | ||
86 | int err; | ||
87 | |||
88 | err = gpio_request(gpio_pendown, "TS PenDown"); | ||
89 | if (err) { | ||
90 | pr_err("Could not obtain gpio for TS PenDown: %d\n", err); | ||
91 | return; | ||
92 | } | ||
93 | |||
94 | gpio_direction_input(gpio_pendown); | ||
95 | gpio_export(gpio_pendown, 0); | ||
96 | |||
97 | if (gpio_debounce) | ||
98 | gpio_set_debounce(gpio_pendown, gpio_debounce); | ||
99 | |||
100 | ads7846_config.gpio_pendown = gpio_pendown; | ||
101 | |||
102 | spi_bi->bus_num = bus_num; | ||
103 | spi_bi->irq = OMAP_GPIO_IRQ(gpio_pendown); | ||
104 | |||
105 | if (board_pdata) | ||
106 | spi_bi->platform_data = board_pdata; | ||
107 | |||
108 | spi_register_board_info(&ads7846_spi_board_info, 1); | ||
109 | } | ||
110 | #else | ||
111 | void __init omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce, | ||
112 | struct ads7846_platform_data *board_pdata) | ||
113 | { | ||
114 | } | ||
115 | #endif | ||
116 | |||
117 | #if defined(CONFIG_MTD_NAND_OMAP2) || defined(CONFIG_MTD_NAND_OMAP2_MODULE) | ||
118 | static struct omap_nand_platform_data nand_data = { | ||
119 | .dma_channel = -1, /* disable DMA in OMAP NAND driver */ | ||
120 | }; | ||
121 | |||
122 | void __init omap_nand_flash_init(int options, struct mtd_partition *parts, | ||
123 | int nr_parts) | ||
124 | { | ||
125 | u8 cs = 0; | ||
126 | u8 nandcs = GPMC_CS_NUM + 1; | ||
127 | |||
128 | /* find out the chip-select on which NAND exists */ | ||
129 | while (cs < GPMC_CS_NUM) { | ||
130 | u32 ret = 0; | ||
131 | ret = gpmc_cs_read_reg(cs, GPMC_CS_CONFIG1); | ||
132 | |||
133 | if ((ret & 0xC00) == 0x800) { | ||
134 | printk(KERN_INFO "Found NAND on CS%d\n", cs); | ||
135 | if (nandcs > GPMC_CS_NUM) | ||
136 | nandcs = cs; | ||
137 | } | ||
138 | cs++; | ||
139 | } | ||
140 | |||
141 | if (nandcs > GPMC_CS_NUM) { | ||
142 | printk(KERN_INFO "NAND: Unable to find configuration " | ||
143 | "in GPMC\n "); | ||
144 | return; | ||
145 | } | ||
146 | |||
147 | if (nandcs < GPMC_CS_NUM) { | ||
148 | nand_data.cs = nandcs; | ||
149 | nand_data.parts = parts; | ||
150 | nand_data.nr_parts = nr_parts; | ||
151 | nand_data.options = options; | ||
152 | |||
153 | printk(KERN_INFO "Registering NAND on CS%d\n", nandcs); | ||
154 | if (gpmc_nand_init(&nand_data) < 0) | ||
155 | printk(KERN_ERR "Unable to register NAND device\n"); | ||
156 | } | ||
157 | } | ||
158 | #else | ||
159 | void __init omap_nand_flash_init(int options, struct mtd_partition *parts, | ||
160 | int nr_parts) | ||
161 | { | ||
162 | } | ||
163 | #endif | ||
diff --git a/arch/arm/mach-omap2/common-board-devices.h b/arch/arm/mach-omap2/common-board-devices.h new file mode 100644 index 000000000000..eb80b3b0ef47 --- /dev/null +++ b/arch/arm/mach-omap2/common-board-devices.h | |||
@@ -0,0 +1,35 @@ | |||
1 | #ifndef __OMAP_COMMON_BOARD_DEVICES__ | ||
2 | #define __OMAP_COMMON_BOARD_DEVICES__ | ||
3 | |||
4 | struct twl4030_platform_data; | ||
5 | struct mtd_partition; | ||
6 | |||
7 | void omap_pmic_init(int bus, u32 clkrate, const char *pmic_type, int pmic_irq, | ||
8 | struct twl4030_platform_data *pmic_data); | ||
9 | |||
10 | static inline void omap2_pmic_init(const char *pmic_type, | ||
11 | struct twl4030_platform_data *pmic_data) | ||
12 | { | ||
13 | omap_pmic_init(2, 2600, pmic_type, INT_24XX_SYS_NIRQ, pmic_data); | ||
14 | } | ||
15 | |||
16 | static inline void omap3_pmic_init(const char *pmic_type, | ||
17 | struct twl4030_platform_data *pmic_data) | ||
18 | { | ||
19 | omap_pmic_init(1, 2600, pmic_type, INT_34XX_SYS_NIRQ, pmic_data); | ||
20 | } | ||
21 | |||
22 | static inline void omap4_pmic_init(const char *pmic_type, | ||
23 | struct twl4030_platform_data *pmic_data) | ||
24 | { | ||
25 | /* Phoenix Audio IC needs I2C1 to start with 400 KHz or less */ | ||
26 | omap_pmic_init(1, 400, pmic_type, OMAP44XX_IRQ_SYS_1N, pmic_data); | ||
27 | } | ||
28 | |||
29 | struct ads7846_platform_data; | ||
30 | |||
31 | void omap_ads7846_init(int bus_num, int gpio_pendown, int gpio_debounce, | ||
32 | struct ads7846_platform_data *board_pdata); | ||
33 | void omap_nand_flash_init(int opts, struct mtd_partition *parts, int n_parts); | ||
34 | |||
35 | #endif /* __OMAP_COMMON_BOARD_DEVICES__ */ | ||
diff --git a/arch/arm/mach-omap2/cpuidle34xx.c b/arch/arm/mach-omap2/cpuidle34xx.c index 1c240eff3918..4bf6e6e8b100 100644 --- a/arch/arm/mach-omap2/cpuidle34xx.c +++ b/arch/arm/mach-omap2/cpuidle34xx.c | |||
@@ -36,36 +36,6 @@ | |||
36 | 36 | ||
37 | #ifdef CONFIG_CPU_IDLE | 37 | #ifdef CONFIG_CPU_IDLE |
38 | 38 | ||
39 | #define OMAP3_MAX_STATES 7 | ||
40 | #define OMAP3_STATE_C1 0 /* C1 - MPU WFI + Core active */ | ||
41 | #define OMAP3_STATE_C2 1 /* C2 - MPU WFI + Core inactive */ | ||
42 | #define OMAP3_STATE_C3 2 /* C3 - MPU CSWR + Core inactive */ | ||
43 | #define OMAP3_STATE_C4 3 /* C4 - MPU OFF + Core iactive */ | ||
44 | #define OMAP3_STATE_C5 4 /* C5 - MPU RET + Core RET */ | ||
45 | #define OMAP3_STATE_C6 5 /* C6 - MPU OFF + Core RET */ | ||
46 | #define OMAP3_STATE_C7 6 /* C7 - MPU OFF + Core OFF */ | ||
47 | |||
48 | #define OMAP3_STATE_MAX OMAP3_STATE_C7 | ||
49 | |||
50 | #define CPUIDLE_FLAG_CHECK_BM 0x10000 /* use omap3_enter_idle_bm() */ | ||
51 | |||
52 | struct omap3_processor_cx { | ||
53 | u8 valid; | ||
54 | u8 type; | ||
55 | u32 sleep_latency; | ||
56 | u32 wakeup_latency; | ||
57 | u32 mpu_state; | ||
58 | u32 core_state; | ||
59 | u32 threshold; | ||
60 | u32 flags; | ||
61 | const char *desc; | ||
62 | }; | ||
63 | |||
64 | struct omap3_processor_cx omap3_power_states[OMAP3_MAX_STATES]; | ||
65 | struct omap3_processor_cx current_cx_state; | ||
66 | struct powerdomain *mpu_pd, *core_pd, *per_pd; | ||
67 | struct powerdomain *cam_pd; | ||
68 | |||
69 | /* | 39 | /* |
70 | * The latencies/thresholds for various C states have | 40 | * The latencies/thresholds for various C states have |
71 | * to be configured from the respective board files. | 41 | * to be configured from the respective board files. |
@@ -75,27 +45,31 @@ struct powerdomain *cam_pd; | |||
75 | */ | 45 | */ |
76 | static struct cpuidle_params cpuidle_params_table[] = { | 46 | static struct cpuidle_params cpuidle_params_table[] = { |
77 | /* C1 */ | 47 | /* C1 */ |
78 | {1, 2, 2, 5}, | 48 | {2 + 2, 5, 1}, |
79 | /* C2 */ | 49 | /* C2 */ |
80 | {1, 10, 10, 30}, | 50 | {10 + 10, 30, 1}, |
81 | /* C3 */ | 51 | /* C3 */ |
82 | {1, 50, 50, 300}, | 52 | {50 + 50, 300, 1}, |
83 | /* C4 */ | 53 | /* C4 */ |
84 | {1, 1500, 1800, 4000}, | 54 | {1500 + 1800, 4000, 1}, |
85 | /* C5 */ | 55 | /* C5 */ |
86 | {1, 2500, 7500, 12000}, | 56 | {2500 + 7500, 12000, 1}, |
87 | /* C6 */ | 57 | /* C6 */ |
88 | {1, 3000, 8500, 15000}, | 58 | {3000 + 8500, 15000, 1}, |
89 | /* C7 */ | 59 | /* C7 */ |
90 | {1, 10000, 30000, 300000}, | 60 | {10000 + 30000, 300000, 1}, |
91 | }; | 61 | }; |
62 | #define OMAP3_NUM_STATES ARRAY_SIZE(cpuidle_params_table) | ||
92 | 63 | ||
93 | static int omap3_idle_bm_check(void) | 64 | /* Mach specific information to be recorded in the C-state driver_data */ |
94 | { | 65 | struct omap3_idle_statedata { |
95 | if (!omap3_can_sleep()) | 66 | u32 mpu_state; |
96 | return 1; | 67 | u32 core_state; |
97 | return 0; | 68 | u8 valid; |
98 | } | 69 | }; |
70 | struct omap3_idle_statedata omap3_idle_data[OMAP3_NUM_STATES]; | ||
71 | |||
72 | struct powerdomain *mpu_pd, *core_pd, *per_pd, *cam_pd; | ||
99 | 73 | ||
100 | static int _cpuidle_allow_idle(struct powerdomain *pwrdm, | 74 | static int _cpuidle_allow_idle(struct powerdomain *pwrdm, |
101 | struct clockdomain *clkdm) | 75 | struct clockdomain *clkdm) |
@@ -122,12 +96,10 @@ static int _cpuidle_deny_idle(struct powerdomain *pwrdm, | |||
122 | static int omap3_enter_idle(struct cpuidle_device *dev, | 96 | static int omap3_enter_idle(struct cpuidle_device *dev, |
123 | struct cpuidle_state *state) | 97 | struct cpuidle_state *state) |
124 | { | 98 | { |
125 | struct omap3_processor_cx *cx = cpuidle_get_statedata(state); | 99 | struct omap3_idle_statedata *cx = cpuidle_get_statedata(state); |
126 | struct timespec ts_preidle, ts_postidle, ts_idle; | 100 | struct timespec ts_preidle, ts_postidle, ts_idle; |
127 | u32 mpu_state = cx->mpu_state, core_state = cx->core_state; | 101 | u32 mpu_state = cx->mpu_state, core_state = cx->core_state; |
128 | 102 | ||
129 | current_cx_state = *cx; | ||
130 | |||
131 | /* Used to keep track of the total time in idle */ | 103 | /* Used to keep track of the total time in idle */ |
132 | getnstimeofday(&ts_preidle); | 104 | getnstimeofday(&ts_preidle); |
133 | 105 | ||
@@ -140,7 +112,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev, | |||
140 | if (omap_irq_pending() || need_resched()) | 112 | if (omap_irq_pending() || need_resched()) |
141 | goto return_sleep_time; | 113 | goto return_sleep_time; |
142 | 114 | ||
143 | if (cx->type == OMAP3_STATE_C1) { | 115 | /* Deny idle for C1 */ |
116 | if (state == &dev->states[0]) { | ||
144 | pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); | 117 | pwrdm_for_each_clkdm(mpu_pd, _cpuidle_deny_idle); |
145 | pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); | 118 | pwrdm_for_each_clkdm(core_pd, _cpuidle_deny_idle); |
146 | } | 119 | } |
@@ -148,7 +121,8 @@ static int omap3_enter_idle(struct cpuidle_device *dev, | |||
148 | /* Execute ARM wfi */ | 121 | /* Execute ARM wfi */ |
149 | omap_sram_idle(); | 122 | omap_sram_idle(); |
150 | 123 | ||
151 | if (cx->type == OMAP3_STATE_C1) { | 124 | /* Re-allow idle for C1 */ |
125 | if (state == &dev->states[0]) { | ||
152 | pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); | 126 | pwrdm_for_each_clkdm(mpu_pd, _cpuidle_allow_idle); |
153 | pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); | 127 | pwrdm_for_each_clkdm(core_pd, _cpuidle_allow_idle); |
154 | } | 128 | } |
@@ -164,41 +138,53 @@ return_sleep_time: | |||
164 | } | 138 | } |
165 | 139 | ||
166 | /** | 140 | /** |
167 | * next_valid_state - Find next valid c-state | 141 | * next_valid_state - Find next valid C-state |
168 | * @dev: cpuidle device | 142 | * @dev: cpuidle device |
169 | * @state: Currently selected c-state | 143 | * @state: Currently selected C-state |
170 | * | 144 | * |
171 | * If the current state is valid, it is returned back to the caller. | 145 | * If the current state is valid, it is returned back to the caller. |
172 | * Else, this function searches for a lower c-state which is still | 146 | * Else, this function searches for a lower c-state which is still |
173 | * valid (as defined in omap3_power_states[]). | 147 | * valid. |
148 | * | ||
149 | * A state is valid if the 'valid' field is enabled and | ||
150 | * if it satisfies the enable_off_mode condition. | ||
174 | */ | 151 | */ |
175 | static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, | 152 | static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, |
176 | struct cpuidle_state *curr) | 153 | struct cpuidle_state *curr) |
177 | { | 154 | { |
178 | struct cpuidle_state *next = NULL; | 155 | struct cpuidle_state *next = NULL; |
179 | struct omap3_processor_cx *cx; | 156 | struct omap3_idle_statedata *cx = cpuidle_get_statedata(curr); |
157 | u32 mpu_deepest_state = PWRDM_POWER_RET; | ||
158 | u32 core_deepest_state = PWRDM_POWER_RET; | ||
180 | 159 | ||
181 | cx = (struct omap3_processor_cx *)cpuidle_get_statedata(curr); | 160 | if (enable_off_mode) { |
161 | mpu_deepest_state = PWRDM_POWER_OFF; | ||
162 | /* | ||
163 | * Erratum i583: valable for ES rev < Es1.2 on 3630. | ||
164 | * CORE OFF mode is not supported in a stable form, restrict | ||
165 | * instead the CORE state to RET. | ||
166 | */ | ||
167 | if (!IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) | ||
168 | core_deepest_state = PWRDM_POWER_OFF; | ||
169 | } | ||
182 | 170 | ||
183 | /* Check if current state is valid */ | 171 | /* Check if current state is valid */ |
184 | if (cx->valid) { | 172 | if ((cx->valid) && |
173 | (cx->mpu_state >= mpu_deepest_state) && | ||
174 | (cx->core_state >= core_deepest_state)) { | ||
185 | return curr; | 175 | return curr; |
186 | } else { | 176 | } else { |
187 | u8 idx = OMAP3_STATE_MAX; | 177 | int idx = OMAP3_NUM_STATES - 1; |
188 | 178 | ||
189 | /* | 179 | /* Reach the current state starting at highest C-state */ |
190 | * Reach the current state starting at highest C-state | 180 | for (; idx >= 0; idx--) { |
191 | */ | ||
192 | for (; idx >= OMAP3_STATE_C1; idx--) { | ||
193 | if (&dev->states[idx] == curr) { | 181 | if (&dev->states[idx] == curr) { |
194 | next = &dev->states[idx]; | 182 | next = &dev->states[idx]; |
195 | break; | 183 | break; |
196 | } | 184 | } |
197 | } | 185 | } |
198 | 186 | ||
199 | /* | 187 | /* Should never hit this condition */ |
200 | * Should never hit this condition. | ||
201 | */ | ||
202 | WARN_ON(next == NULL); | 188 | WARN_ON(next == NULL); |
203 | 189 | ||
204 | /* | 190 | /* |
@@ -206,17 +192,17 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, | |||
206 | * Start search from the next (lower) state. | 192 | * Start search from the next (lower) state. |
207 | */ | 193 | */ |
208 | idx--; | 194 | idx--; |
209 | for (; idx >= OMAP3_STATE_C1; idx--) { | 195 | for (; idx >= 0; idx--) { |
210 | struct omap3_processor_cx *cx; | ||
211 | |||
212 | cx = cpuidle_get_statedata(&dev->states[idx]); | 196 | cx = cpuidle_get_statedata(&dev->states[idx]); |
213 | if (cx->valid) { | 197 | if ((cx->valid) && |
198 | (cx->mpu_state >= mpu_deepest_state) && | ||
199 | (cx->core_state >= core_deepest_state)) { | ||
214 | next = &dev->states[idx]; | 200 | next = &dev->states[idx]; |
215 | break; | 201 | break; |
216 | } | 202 | } |
217 | } | 203 | } |
218 | /* | 204 | /* |
219 | * C1 and C2 are always valid. | 205 | * C1 is always valid. |
220 | * So, no need to check for 'next==NULL' outside this loop. | 206 | * So, no need to check for 'next==NULL' outside this loop. |
221 | */ | 207 | */ |
222 | } | 208 | } |
@@ -229,36 +215,22 @@ static struct cpuidle_state *next_valid_state(struct cpuidle_device *dev, | |||
229 | * @dev: cpuidle device | 215 | * @dev: cpuidle device |
230 | * @state: The target state to be programmed | 216 | * @state: The target state to be programmed |
231 | * | 217 | * |
232 | * Used for C states with CPUIDLE_FLAG_CHECK_BM flag set. This | 218 | * This function checks for any pending activity and then programs |
233 | * function checks for any pending activity and then programs the | 219 | * the device to the specified or a safer state. |
234 | * device to the specified or a safer state. | ||
235 | */ | 220 | */ |
236 | static int omap3_enter_idle_bm(struct cpuidle_device *dev, | 221 | static int omap3_enter_idle_bm(struct cpuidle_device *dev, |
237 | struct cpuidle_state *state) | 222 | struct cpuidle_state *state) |
238 | { | 223 | { |
239 | struct cpuidle_state *new_state = next_valid_state(dev, state); | 224 | struct cpuidle_state *new_state; |
240 | u32 core_next_state, per_next_state = 0, per_saved_state = 0; | 225 | u32 core_next_state, per_next_state = 0, per_saved_state = 0, cam_state; |
241 | u32 cam_state; | 226 | struct omap3_idle_statedata *cx; |
242 | struct omap3_processor_cx *cx; | ||
243 | int ret; | 227 | int ret; |
244 | 228 | ||
245 | if ((state->flags & CPUIDLE_FLAG_CHECK_BM) && omap3_idle_bm_check()) { | 229 | if (!omap3_can_sleep()) { |
246 | BUG_ON(!dev->safe_state); | ||
247 | new_state = dev->safe_state; | 230 | new_state = dev->safe_state; |
248 | goto select_state; | 231 | goto select_state; |
249 | } | 232 | } |
250 | 233 | ||
251 | cx = cpuidle_get_statedata(state); | ||
252 | core_next_state = cx->core_state; | ||
253 | |||
254 | /* | ||
255 | * FIXME: we currently manage device-specific idle states | ||
256 | * for PER and CORE in combination with CPU-specific | ||
257 | * idle states. This is wrong, and device-specific | ||
258 | * idle management needs to be separated out into | ||
259 | * its own code. | ||
260 | */ | ||
261 | |||
262 | /* | 234 | /* |
263 | * Prevent idle completely if CAM is active. | 235 | * Prevent idle completely if CAM is active. |
264 | * CAM does not have wakeup capability in OMAP3. | 236 | * CAM does not have wakeup capability in OMAP3. |
@@ -270,9 +242,19 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, | |||
270 | } | 242 | } |
271 | 243 | ||
272 | /* | 244 | /* |
245 | * FIXME: we currently manage device-specific idle states | ||
246 | * for PER and CORE in combination with CPU-specific | ||
247 | * idle states. This is wrong, and device-specific | ||
248 | * idle management needs to be separated out into | ||
249 | * its own code. | ||
250 | */ | ||
251 | |||
252 | /* | ||
273 | * Prevent PER off if CORE is not in retention or off as this | 253 | * Prevent PER off if CORE is not in retention or off as this |
274 | * would disable PER wakeups completely. | 254 | * would disable PER wakeups completely. |
275 | */ | 255 | */ |
256 | cx = cpuidle_get_statedata(state); | ||
257 | core_next_state = cx->core_state; | ||
276 | per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd); | 258 | per_next_state = per_saved_state = pwrdm_read_next_pwrst(per_pd); |
277 | if ((per_next_state == PWRDM_POWER_OFF) && | 259 | if ((per_next_state == PWRDM_POWER_OFF) && |
278 | (core_next_state > PWRDM_POWER_RET)) | 260 | (core_next_state > PWRDM_POWER_RET)) |
@@ -282,6 +264,8 @@ static int omap3_enter_idle_bm(struct cpuidle_device *dev, | |||
282 | if (per_next_state != per_saved_state) | 264 | if (per_next_state != per_saved_state) |
283 | pwrdm_set_next_pwrst(per_pd, per_next_state); | 265 | pwrdm_set_next_pwrst(per_pd, per_next_state); |
284 | 266 | ||
267 | new_state = next_valid_state(dev, state); | ||
268 | |||
285 | select_state: | 269 | select_state: |
286 | dev->last_state = new_state; | 270 | dev->last_state = new_state; |
287 | ret = omap3_enter_idle(dev, new_state); | 271 | ret = omap3_enter_idle(dev, new_state); |
@@ -295,31 +279,6 @@ select_state: | |||
295 | 279 | ||
296 | DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev); | 280 | DEFINE_PER_CPU(struct cpuidle_device, omap3_idle_dev); |
297 | 281 | ||
298 | /** | ||
299 | * omap3_cpuidle_update_states() - Update the cpuidle states | ||
300 | * @mpu_deepest_state: Enable states up to and including this for mpu domain | ||
301 | * @core_deepest_state: Enable states up to and including this for core domain | ||
302 | * | ||
303 | * This goes through the list of states available and enables and disables the | ||
304 | * validity of C states based on deepest state that can be achieved for the | ||
305 | * variable domain | ||
306 | */ | ||
307 | void omap3_cpuidle_update_states(u32 mpu_deepest_state, u32 core_deepest_state) | ||
308 | { | ||
309 | int i; | ||
310 | |||
311 | for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { | ||
312 | struct omap3_processor_cx *cx = &omap3_power_states[i]; | ||
313 | |||
314 | if ((cx->mpu_state >= mpu_deepest_state) && | ||
315 | (cx->core_state >= core_deepest_state)) { | ||
316 | cx->valid = 1; | ||
317 | } else { | ||
318 | cx->valid = 0; | ||
319 | } | ||
320 | } | ||
321 | } | ||
322 | |||
323 | void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params) | 282 | void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params) |
324 | { | 283 | { |
325 | int i; | 284 | int i; |
@@ -327,212 +286,109 @@ void omap3_pm_init_cpuidle(struct cpuidle_params *cpuidle_board_params) | |||
327 | if (!cpuidle_board_params) | 286 | if (!cpuidle_board_params) |
328 | return; | 287 | return; |
329 | 288 | ||
330 | for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { | 289 | for (i = 0; i < OMAP3_NUM_STATES; i++) { |
331 | cpuidle_params_table[i].valid = | 290 | cpuidle_params_table[i].valid = cpuidle_board_params[i].valid; |
332 | cpuidle_board_params[i].valid; | 291 | cpuidle_params_table[i].exit_latency = |
333 | cpuidle_params_table[i].sleep_latency = | 292 | cpuidle_board_params[i].exit_latency; |
334 | cpuidle_board_params[i].sleep_latency; | 293 | cpuidle_params_table[i].target_residency = |
335 | cpuidle_params_table[i].wake_latency = | 294 | cpuidle_board_params[i].target_residency; |
336 | cpuidle_board_params[i].wake_latency; | ||
337 | cpuidle_params_table[i].threshold = | ||
338 | cpuidle_board_params[i].threshold; | ||
339 | } | 295 | } |
340 | return; | 296 | return; |
341 | } | 297 | } |
342 | 298 | ||
343 | /* omap3_init_power_states - Initialises the OMAP3 specific C states. | ||
344 | * | ||
345 | * Below is the desciption of each C state. | ||
346 | * C1 . MPU WFI + Core active | ||
347 | * C2 . MPU WFI + Core inactive | ||
348 | * C3 . MPU CSWR + Core inactive | ||
349 | * C4 . MPU OFF + Core inactive | ||
350 | * C5 . MPU CSWR + Core CSWR | ||
351 | * C6 . MPU OFF + Core CSWR | ||
352 | * C7 . MPU OFF + Core OFF | ||
353 | */ | ||
354 | void omap_init_power_states(void) | ||
355 | { | ||
356 | /* C1 . MPU WFI + Core active */ | ||
357 | omap3_power_states[OMAP3_STATE_C1].valid = | ||
358 | cpuidle_params_table[OMAP3_STATE_C1].valid; | ||
359 | omap3_power_states[OMAP3_STATE_C1].type = OMAP3_STATE_C1; | ||
360 | omap3_power_states[OMAP3_STATE_C1].sleep_latency = | ||
361 | cpuidle_params_table[OMAP3_STATE_C1].sleep_latency; | ||
362 | omap3_power_states[OMAP3_STATE_C1].wakeup_latency = | ||
363 | cpuidle_params_table[OMAP3_STATE_C1].wake_latency; | ||
364 | omap3_power_states[OMAP3_STATE_C1].threshold = | ||
365 | cpuidle_params_table[OMAP3_STATE_C1].threshold; | ||
366 | omap3_power_states[OMAP3_STATE_C1].mpu_state = PWRDM_POWER_ON; | ||
367 | omap3_power_states[OMAP3_STATE_C1].core_state = PWRDM_POWER_ON; | ||
368 | omap3_power_states[OMAP3_STATE_C1].flags = CPUIDLE_FLAG_TIME_VALID; | ||
369 | omap3_power_states[OMAP3_STATE_C1].desc = "MPU ON + CORE ON"; | ||
370 | |||
371 | /* C2 . MPU WFI + Core inactive */ | ||
372 | omap3_power_states[OMAP3_STATE_C2].valid = | ||
373 | cpuidle_params_table[OMAP3_STATE_C2].valid; | ||
374 | omap3_power_states[OMAP3_STATE_C2].type = OMAP3_STATE_C2; | ||
375 | omap3_power_states[OMAP3_STATE_C2].sleep_latency = | ||
376 | cpuidle_params_table[OMAP3_STATE_C2].sleep_latency; | ||
377 | omap3_power_states[OMAP3_STATE_C2].wakeup_latency = | ||
378 | cpuidle_params_table[OMAP3_STATE_C2].wake_latency; | ||
379 | omap3_power_states[OMAP3_STATE_C2].threshold = | ||
380 | cpuidle_params_table[OMAP3_STATE_C2].threshold; | ||
381 | omap3_power_states[OMAP3_STATE_C2].mpu_state = PWRDM_POWER_ON; | ||
382 | omap3_power_states[OMAP3_STATE_C2].core_state = PWRDM_POWER_ON; | ||
383 | omap3_power_states[OMAP3_STATE_C2].flags = CPUIDLE_FLAG_TIME_VALID | | ||
384 | CPUIDLE_FLAG_CHECK_BM; | ||
385 | omap3_power_states[OMAP3_STATE_C2].desc = "MPU ON + CORE ON"; | ||
386 | |||
387 | /* C3 . MPU CSWR + Core inactive */ | ||
388 | omap3_power_states[OMAP3_STATE_C3].valid = | ||
389 | cpuidle_params_table[OMAP3_STATE_C3].valid; | ||
390 | omap3_power_states[OMAP3_STATE_C3].type = OMAP3_STATE_C3; | ||
391 | omap3_power_states[OMAP3_STATE_C3].sleep_latency = | ||
392 | cpuidle_params_table[OMAP3_STATE_C3].sleep_latency; | ||
393 | omap3_power_states[OMAP3_STATE_C3].wakeup_latency = | ||
394 | cpuidle_params_table[OMAP3_STATE_C3].wake_latency; | ||
395 | omap3_power_states[OMAP3_STATE_C3].threshold = | ||
396 | cpuidle_params_table[OMAP3_STATE_C3].threshold; | ||
397 | omap3_power_states[OMAP3_STATE_C3].mpu_state = PWRDM_POWER_RET; | ||
398 | omap3_power_states[OMAP3_STATE_C3].core_state = PWRDM_POWER_ON; | ||
399 | omap3_power_states[OMAP3_STATE_C3].flags = CPUIDLE_FLAG_TIME_VALID | | ||
400 | CPUIDLE_FLAG_CHECK_BM; | ||
401 | omap3_power_states[OMAP3_STATE_C3].desc = "MPU RET + CORE ON"; | ||
402 | |||
403 | /* C4 . MPU OFF + Core inactive */ | ||
404 | omap3_power_states[OMAP3_STATE_C4].valid = | ||
405 | cpuidle_params_table[OMAP3_STATE_C4].valid; | ||
406 | omap3_power_states[OMAP3_STATE_C4].type = OMAP3_STATE_C4; | ||
407 | omap3_power_states[OMAP3_STATE_C4].sleep_latency = | ||
408 | cpuidle_params_table[OMAP3_STATE_C4].sleep_latency; | ||
409 | omap3_power_states[OMAP3_STATE_C4].wakeup_latency = | ||
410 | cpuidle_params_table[OMAP3_STATE_C4].wake_latency; | ||
411 | omap3_power_states[OMAP3_STATE_C4].threshold = | ||
412 | cpuidle_params_table[OMAP3_STATE_C4].threshold; | ||
413 | omap3_power_states[OMAP3_STATE_C4].mpu_state = PWRDM_POWER_OFF; | ||
414 | omap3_power_states[OMAP3_STATE_C4].core_state = PWRDM_POWER_ON; | ||
415 | omap3_power_states[OMAP3_STATE_C4].flags = CPUIDLE_FLAG_TIME_VALID | | ||
416 | CPUIDLE_FLAG_CHECK_BM; | ||
417 | omap3_power_states[OMAP3_STATE_C4].desc = "MPU OFF + CORE ON"; | ||
418 | |||
419 | /* C5 . MPU CSWR + Core CSWR*/ | ||
420 | omap3_power_states[OMAP3_STATE_C5].valid = | ||
421 | cpuidle_params_table[OMAP3_STATE_C5].valid; | ||
422 | omap3_power_states[OMAP3_STATE_C5].type = OMAP3_STATE_C5; | ||
423 | omap3_power_states[OMAP3_STATE_C5].sleep_latency = | ||
424 | cpuidle_params_table[OMAP3_STATE_C5].sleep_latency; | ||
425 | omap3_power_states[OMAP3_STATE_C5].wakeup_latency = | ||
426 | cpuidle_params_table[OMAP3_STATE_C5].wake_latency; | ||
427 | omap3_power_states[OMAP3_STATE_C5].threshold = | ||
428 | cpuidle_params_table[OMAP3_STATE_C5].threshold; | ||
429 | omap3_power_states[OMAP3_STATE_C5].mpu_state = PWRDM_POWER_RET; | ||
430 | omap3_power_states[OMAP3_STATE_C5].core_state = PWRDM_POWER_RET; | ||
431 | omap3_power_states[OMAP3_STATE_C5].flags = CPUIDLE_FLAG_TIME_VALID | | ||
432 | CPUIDLE_FLAG_CHECK_BM; | ||
433 | omap3_power_states[OMAP3_STATE_C5].desc = "MPU RET + CORE RET"; | ||
434 | |||
435 | /* C6 . MPU OFF + Core CSWR */ | ||
436 | omap3_power_states[OMAP3_STATE_C6].valid = | ||
437 | cpuidle_params_table[OMAP3_STATE_C6].valid; | ||
438 | omap3_power_states[OMAP3_STATE_C6].type = OMAP3_STATE_C6; | ||
439 | omap3_power_states[OMAP3_STATE_C6].sleep_latency = | ||
440 | cpuidle_params_table[OMAP3_STATE_C6].sleep_latency; | ||
441 | omap3_power_states[OMAP3_STATE_C6].wakeup_latency = | ||
442 | cpuidle_params_table[OMAP3_STATE_C6].wake_latency; | ||
443 | omap3_power_states[OMAP3_STATE_C6].threshold = | ||
444 | cpuidle_params_table[OMAP3_STATE_C6].threshold; | ||
445 | omap3_power_states[OMAP3_STATE_C6].mpu_state = PWRDM_POWER_OFF; | ||
446 | omap3_power_states[OMAP3_STATE_C6].core_state = PWRDM_POWER_RET; | ||
447 | omap3_power_states[OMAP3_STATE_C6].flags = CPUIDLE_FLAG_TIME_VALID | | ||
448 | CPUIDLE_FLAG_CHECK_BM; | ||
449 | omap3_power_states[OMAP3_STATE_C6].desc = "MPU OFF + CORE RET"; | ||
450 | |||
451 | /* C7 . MPU OFF + Core OFF */ | ||
452 | omap3_power_states[OMAP3_STATE_C7].valid = | ||
453 | cpuidle_params_table[OMAP3_STATE_C7].valid; | ||
454 | omap3_power_states[OMAP3_STATE_C7].type = OMAP3_STATE_C7; | ||
455 | omap3_power_states[OMAP3_STATE_C7].sleep_latency = | ||
456 | cpuidle_params_table[OMAP3_STATE_C7].sleep_latency; | ||
457 | omap3_power_states[OMAP3_STATE_C7].wakeup_latency = | ||
458 | cpuidle_params_table[OMAP3_STATE_C7].wake_latency; | ||
459 | omap3_power_states[OMAP3_STATE_C7].threshold = | ||
460 | cpuidle_params_table[OMAP3_STATE_C7].threshold; | ||
461 | omap3_power_states[OMAP3_STATE_C7].mpu_state = PWRDM_POWER_OFF; | ||
462 | omap3_power_states[OMAP3_STATE_C7].core_state = PWRDM_POWER_OFF; | ||
463 | omap3_power_states[OMAP3_STATE_C7].flags = CPUIDLE_FLAG_TIME_VALID | | ||
464 | CPUIDLE_FLAG_CHECK_BM; | ||
465 | omap3_power_states[OMAP3_STATE_C7].desc = "MPU OFF + CORE OFF"; | ||
466 | |||
467 | /* | ||
468 | * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot | ||
469 | * enable OFF mode in a stable form for previous revisions. | ||
470 | * we disable C7 state as a result. | ||
471 | */ | ||
472 | if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) { | ||
473 | omap3_power_states[OMAP3_STATE_C7].valid = 0; | ||
474 | cpuidle_params_table[OMAP3_STATE_C7].valid = 0; | ||
475 | pr_warn("%s: core off state C7 disabled due to i583\n", | ||
476 | __func__); | ||
477 | } | ||
478 | } | ||
479 | |||
480 | struct cpuidle_driver omap3_idle_driver = { | 299 | struct cpuidle_driver omap3_idle_driver = { |
481 | .name = "omap3_idle", | 300 | .name = "omap3_idle", |
482 | .owner = THIS_MODULE, | 301 | .owner = THIS_MODULE, |
483 | }; | 302 | }; |
484 | 303 | ||
304 | /* Helper to fill the C-state common data and register the driver_data */ | ||
305 | static inline struct omap3_idle_statedata *_fill_cstate( | ||
306 | struct cpuidle_device *dev, | ||
307 | int idx, const char *descr) | ||
308 | { | ||
309 | struct omap3_idle_statedata *cx = &omap3_idle_data[idx]; | ||
310 | struct cpuidle_state *state = &dev->states[idx]; | ||
311 | |||
312 | state->exit_latency = cpuidle_params_table[idx].exit_latency; | ||
313 | state->target_residency = cpuidle_params_table[idx].target_residency; | ||
314 | state->flags = CPUIDLE_FLAG_TIME_VALID; | ||
315 | state->enter = omap3_enter_idle_bm; | ||
316 | cx->valid = cpuidle_params_table[idx].valid; | ||
317 | sprintf(state->name, "C%d", idx + 1); | ||
318 | strncpy(state->desc, descr, CPUIDLE_DESC_LEN); | ||
319 | cpuidle_set_statedata(state, cx); | ||
320 | |||
321 | return cx; | ||
322 | } | ||
323 | |||
485 | /** | 324 | /** |
486 | * omap3_idle_init - Init routine for OMAP3 idle | 325 | * omap3_idle_init - Init routine for OMAP3 idle |
487 | * | 326 | * |
488 | * Registers the OMAP3 specific cpuidle driver with the cpuidle | 327 | * Registers the OMAP3 specific cpuidle driver to the cpuidle |
489 | * framework with the valid set of states. | 328 | * framework with the valid set of states. |
490 | */ | 329 | */ |
491 | int __init omap3_idle_init(void) | 330 | int __init omap3_idle_init(void) |
492 | { | 331 | { |
493 | int i, count = 0; | ||
494 | struct omap3_processor_cx *cx; | ||
495 | struct cpuidle_state *state; | ||
496 | struct cpuidle_device *dev; | 332 | struct cpuidle_device *dev; |
333 | struct omap3_idle_statedata *cx; | ||
497 | 334 | ||
498 | mpu_pd = pwrdm_lookup("mpu_pwrdm"); | 335 | mpu_pd = pwrdm_lookup("mpu_pwrdm"); |
499 | core_pd = pwrdm_lookup("core_pwrdm"); | 336 | core_pd = pwrdm_lookup("core_pwrdm"); |
500 | per_pd = pwrdm_lookup("per_pwrdm"); | 337 | per_pd = pwrdm_lookup("per_pwrdm"); |
501 | cam_pd = pwrdm_lookup("cam_pwrdm"); | 338 | cam_pd = pwrdm_lookup("cam_pwrdm"); |
502 | 339 | ||
503 | omap_init_power_states(); | ||
504 | cpuidle_register_driver(&omap3_idle_driver); | 340 | cpuidle_register_driver(&omap3_idle_driver); |
505 | |||
506 | dev = &per_cpu(omap3_idle_dev, smp_processor_id()); | 341 | dev = &per_cpu(omap3_idle_dev, smp_processor_id()); |
507 | 342 | ||
508 | for (i = OMAP3_STATE_C1; i < OMAP3_MAX_STATES; i++) { | 343 | /* C1 . MPU WFI + Core active */ |
509 | cx = &omap3_power_states[i]; | 344 | cx = _fill_cstate(dev, 0, "MPU ON + CORE ON"); |
510 | state = &dev->states[count]; | 345 | (&dev->states[0])->enter = omap3_enter_idle; |
511 | 346 | dev->safe_state = &dev->states[0]; | |
512 | if (!cx->valid) | 347 | cx->valid = 1; /* C1 is always valid */ |
513 | continue; | 348 | cx->mpu_state = PWRDM_POWER_ON; |
514 | cpuidle_set_statedata(state, cx); | 349 | cx->core_state = PWRDM_POWER_ON; |
515 | state->exit_latency = cx->sleep_latency + cx->wakeup_latency; | ||
516 | state->target_residency = cx->threshold; | ||
517 | state->flags = cx->flags; | ||
518 | state->enter = (state->flags & CPUIDLE_FLAG_CHECK_BM) ? | ||
519 | omap3_enter_idle_bm : omap3_enter_idle; | ||
520 | if (cx->type == OMAP3_STATE_C1) | ||
521 | dev->safe_state = state; | ||
522 | sprintf(state->name, "C%d", count+1); | ||
523 | strncpy(state->desc, cx->desc, CPUIDLE_DESC_LEN); | ||
524 | count++; | ||
525 | } | ||
526 | 350 | ||
527 | if (!count) | 351 | /* C2 . MPU WFI + Core inactive */ |
528 | return -EINVAL; | 352 | cx = _fill_cstate(dev, 1, "MPU ON + CORE ON"); |
529 | dev->state_count = count; | 353 | cx->mpu_state = PWRDM_POWER_ON; |
354 | cx->core_state = PWRDM_POWER_ON; | ||
355 | |||
356 | /* C3 . MPU CSWR + Core inactive */ | ||
357 | cx = _fill_cstate(dev, 2, "MPU RET + CORE ON"); | ||
358 | cx->mpu_state = PWRDM_POWER_RET; | ||
359 | cx->core_state = PWRDM_POWER_ON; | ||
530 | 360 | ||
531 | if (enable_off_mode) | 361 | /* C4 . MPU OFF + Core inactive */ |
532 | omap3_cpuidle_update_states(PWRDM_POWER_OFF, PWRDM_POWER_OFF); | 362 | cx = _fill_cstate(dev, 3, "MPU OFF + CORE ON"); |
533 | else | 363 | cx->mpu_state = PWRDM_POWER_OFF; |
534 | omap3_cpuidle_update_states(PWRDM_POWER_RET, PWRDM_POWER_RET); | 364 | cx->core_state = PWRDM_POWER_ON; |
365 | |||
366 | /* C5 . MPU RET + Core RET */ | ||
367 | cx = _fill_cstate(dev, 4, "MPU RET + CORE RET"); | ||
368 | cx->mpu_state = PWRDM_POWER_RET; | ||
369 | cx->core_state = PWRDM_POWER_RET; | ||
370 | |||
371 | /* C6 . MPU OFF + Core RET */ | ||
372 | cx = _fill_cstate(dev, 5, "MPU OFF + CORE RET"); | ||
373 | cx->mpu_state = PWRDM_POWER_OFF; | ||
374 | cx->core_state = PWRDM_POWER_RET; | ||
375 | |||
376 | /* C7 . MPU OFF + Core OFF */ | ||
377 | cx = _fill_cstate(dev, 6, "MPU OFF + CORE OFF"); | ||
378 | /* | ||
379 | * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot | ||
380 | * enable OFF mode in a stable form for previous revisions. | ||
381 | * We disable C7 state as a result. | ||
382 | */ | ||
383 | if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) { | ||
384 | cx->valid = 0; | ||
385 | pr_warn("%s: core off state C7 disabled due to i583\n", | ||
386 | __func__); | ||
387 | } | ||
388 | cx->mpu_state = PWRDM_POWER_OFF; | ||
389 | cx->core_state = PWRDM_POWER_OFF; | ||
535 | 390 | ||
391 | dev->state_count = OMAP3_NUM_STATES; | ||
536 | if (cpuidle_register_device(dev)) { | 392 | if (cpuidle_register_device(dev)) { |
537 | printk(KERN_ERR "%s: CPUidle register device failed\n", | 393 | printk(KERN_ERR "%s: CPUidle register device failed\n", |
538 | __func__); | 394 | __func__); |
diff --git a/arch/arm/mach-omap2/gpmc-smc91x.c b/arch/arm/mach-omap2/gpmc-smc91x.c index 877c6f5807b7..ba10c24f3d8d 100644 --- a/arch/arm/mach-omap2/gpmc-smc91x.c +++ b/arch/arm/mach-omap2/gpmc-smc91x.c | |||
@@ -147,25 +147,24 @@ void __init gpmc_smc91x_init(struct omap_smc91x_platform_data *board_data) | |||
147 | goto free1; | 147 | goto free1; |
148 | } | 148 | } |
149 | 149 | ||
150 | if (gpio_request(gpmc_cfg->gpio_irq, "SMC91X irq") < 0) | 150 | if (gpio_request_one(gpmc_cfg->gpio_irq, GPIOF_IN, "SMC91X irq") < 0) |
151 | goto free1; | 151 | goto free1; |
152 | 152 | ||
153 | gpio_direction_input(gpmc_cfg->gpio_irq); | ||
154 | gpmc_smc91x_resources[1].start = gpio_to_irq(gpmc_cfg->gpio_irq); | 153 | gpmc_smc91x_resources[1].start = gpio_to_irq(gpmc_cfg->gpio_irq); |
155 | 154 | ||
156 | if (gpmc_cfg->gpio_pwrdwn) { | 155 | if (gpmc_cfg->gpio_pwrdwn) { |
157 | ret = gpio_request(gpmc_cfg->gpio_pwrdwn, "SMC91X powerdown"); | 156 | ret = gpio_request_one(gpmc_cfg->gpio_pwrdwn, |
157 | GPIOF_OUT_INIT_LOW, "SMC91X powerdown"); | ||
158 | if (ret) | 158 | if (ret) |
159 | goto free2; | 159 | goto free2; |
160 | gpio_direction_output(gpmc_cfg->gpio_pwrdwn, 0); | ||
161 | } | 160 | } |
162 | 161 | ||
163 | if (gpmc_cfg->gpio_reset) { | 162 | if (gpmc_cfg->gpio_reset) { |
164 | ret = gpio_request(gpmc_cfg->gpio_reset, "SMC91X reset"); | 163 | ret = gpio_request_one(gpmc_cfg->gpio_reset, |
164 | GPIOF_OUT_INIT_LOW, "SMC91X reset"); | ||
165 | if (ret) | 165 | if (ret) |
166 | goto free3; | 166 | goto free3; |
167 | 167 | ||
168 | gpio_direction_output(gpmc_cfg->gpio_reset, 0); | ||
169 | gpio_set_value(gpmc_cfg->gpio_reset, 1); | 168 | gpio_set_value(gpmc_cfg->gpio_reset, 1); |
170 | msleep(100); | 169 | msleep(100); |
171 | gpio_set_value(gpmc_cfg->gpio_reset, 0); | 170 | gpio_set_value(gpmc_cfg->gpio_reset, 0); |
diff --git a/arch/arm/mach-omap2/gpmc-smsc911x.c b/arch/arm/mach-omap2/gpmc-smsc911x.c index 703f150dd01d..997033129d26 100644 --- a/arch/arm/mach-omap2/gpmc-smsc911x.c +++ b/arch/arm/mach-omap2/gpmc-smsc911x.c | |||
@@ -10,6 +10,7 @@ | |||
10 | * it under the terms of the GNU General Public License version 2 as | 10 | * it under the terms of the GNU General Public License version 2 as |
11 | * published by the Free Software Foundation. | 11 | * published by the Free Software Foundation. |
12 | */ | 12 | */ |
13 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
13 | 14 | ||
14 | #include <linux/kernel.h> | 15 | #include <linux/kernel.h> |
15 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
@@ -30,7 +31,7 @@ static struct resource gpmc_smsc911x_resources[] = { | |||
30 | .flags = IORESOURCE_MEM, | 31 | .flags = IORESOURCE_MEM, |
31 | }, | 32 | }, |
32 | [1] = { | 33 | [1] = { |
33 | .flags = IORESOURCE_IRQ, | 34 | .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, |
34 | }, | 35 | }, |
35 | }; | 36 | }; |
36 | 37 | ||
@@ -41,16 +42,6 @@ static struct smsc911x_platform_config gpmc_smsc911x_config = { | |||
41 | .flags = SMSC911X_USE_16BIT, | 42 | .flags = SMSC911X_USE_16BIT, |
42 | }; | 43 | }; |
43 | 44 | ||
44 | static struct platform_device gpmc_smsc911x_device = { | ||
45 | .name = "smsc911x", | ||
46 | .id = -1, | ||
47 | .num_resources = ARRAY_SIZE(gpmc_smsc911x_resources), | ||
48 | .resource = gpmc_smsc911x_resources, | ||
49 | .dev = { | ||
50 | .platform_data = &gpmc_smsc911x_config, | ||
51 | }, | ||
52 | }; | ||
53 | |||
54 | /* | 45 | /* |
55 | * Initialize smsc911x device connected to the GPMC. Note that we | 46 | * Initialize smsc911x device connected to the GPMC. Note that we |
56 | * assume that pin multiplexing is done in the board-*.c file, | 47 | * assume that pin multiplexing is done in the board-*.c file, |
@@ -58,46 +49,49 @@ static struct platform_device gpmc_smsc911x_device = { | |||
58 | */ | 49 | */ |
59 | void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *board_data) | 50 | void __init gpmc_smsc911x_init(struct omap_smsc911x_platform_data *board_data) |
60 | { | 51 | { |
52 | struct platform_device *pdev; | ||
61 | unsigned long cs_mem_base; | 53 | unsigned long cs_mem_base; |
62 | int ret; | 54 | int ret; |
63 | 55 | ||
64 | gpmc_cfg = board_data; | 56 | gpmc_cfg = board_data; |
65 | 57 | ||
66 | if (gpmc_cs_request(gpmc_cfg->cs, SZ_16M, &cs_mem_base) < 0) { | 58 | if (gpmc_cs_request(gpmc_cfg->cs, SZ_16M, &cs_mem_base) < 0) { |
67 | printk(KERN_ERR "Failed to request GPMC mem for smsc911x\n"); | 59 | pr_err("Failed to request GPMC mem region\n"); |
68 | return; | 60 | return; |
69 | } | 61 | } |
70 | 62 | ||
71 | gpmc_smsc911x_resources[0].start = cs_mem_base + 0x0; | 63 | gpmc_smsc911x_resources[0].start = cs_mem_base + 0x0; |
72 | gpmc_smsc911x_resources[0].end = cs_mem_base + 0xff; | 64 | gpmc_smsc911x_resources[0].end = cs_mem_base + 0xff; |
73 | 65 | ||
74 | if (gpio_request(gpmc_cfg->gpio_irq, "smsc911x irq") < 0) { | 66 | if (gpio_request_one(gpmc_cfg->gpio_irq, GPIOF_IN, "smsc911x irq")) { |
75 | printk(KERN_ERR "Failed to request GPIO%d for smsc911x IRQ\n", | 67 | pr_err("Failed to request IRQ GPIO%d\n", gpmc_cfg->gpio_irq); |
76 | gpmc_cfg->gpio_irq); | ||
77 | goto free1; | 68 | goto free1; |
78 | } | 69 | } |
79 | 70 | ||
80 | gpio_direction_input(gpmc_cfg->gpio_irq); | ||
81 | gpmc_smsc911x_resources[1].start = gpio_to_irq(gpmc_cfg->gpio_irq); | 71 | gpmc_smsc911x_resources[1].start = gpio_to_irq(gpmc_cfg->gpio_irq); |
82 | gpmc_smsc911x_resources[1].flags |= | ||
83 | (gpmc_cfg->flags & IRQF_TRIGGER_MASK); | ||
84 | 72 | ||
85 | if (gpio_is_valid(gpmc_cfg->gpio_reset)) { | 73 | if (gpio_is_valid(gpmc_cfg->gpio_reset)) { |
86 | ret = gpio_request(gpmc_cfg->gpio_reset, "smsc911x reset"); | 74 | ret = gpio_request_one(gpmc_cfg->gpio_reset, |
75 | GPIOF_OUT_INIT_HIGH, "smsc911x reset"); | ||
87 | if (ret) { | 76 | if (ret) { |
88 | printk(KERN_ERR "Failed to request GPIO%d for smsc911x reset\n", | 77 | pr_err("Failed to request reset GPIO%d\n", |
89 | gpmc_cfg->gpio_reset); | 78 | gpmc_cfg->gpio_reset); |
90 | goto free2; | 79 | goto free2; |
91 | } | 80 | } |
92 | 81 | ||
93 | gpio_direction_output(gpmc_cfg->gpio_reset, 1); | ||
94 | gpio_set_value(gpmc_cfg->gpio_reset, 0); | 82 | gpio_set_value(gpmc_cfg->gpio_reset, 0); |
95 | msleep(100); | 83 | msleep(100); |
96 | gpio_set_value(gpmc_cfg->gpio_reset, 1); | 84 | gpio_set_value(gpmc_cfg->gpio_reset, 1); |
97 | } | 85 | } |
98 | 86 | ||
99 | if (platform_device_register(&gpmc_smsc911x_device) < 0) { | 87 | if (gpmc_cfg->flags) |
100 | printk(KERN_ERR "Unable to register smsc911x device\n"); | 88 | gpmc_smsc911x_config.flags = gpmc_cfg->flags; |
89 | |||
90 | pdev = platform_device_register_resndata(NULL, "smsc911x", gpmc_cfg->id, | ||
91 | gpmc_smsc911x_resources, ARRAY_SIZE(gpmc_smsc911x_resources), | ||
92 | &gpmc_smsc911x_config, sizeof(gpmc_smsc911x_config)); | ||
93 | if (!pdev) { | ||
94 | pr_err("Unable to register platform device\n"); | ||
101 | gpio_free(gpmc_cfg->gpio_reset); | 95 | gpio_free(gpmc_cfg->gpio_reset); |
102 | goto free2; | 96 | goto free2; |
103 | } | 97 | } |
@@ -109,5 +103,5 @@ free2: | |||
109 | free1: | 103 | free1: |
110 | gpmc_cs_free(gpmc_cfg->cs); | 104 | gpmc_cs_free(gpmc_cfg->cs); |
111 | 105 | ||
112 | printk(KERN_ERR "Could not initialize smsc911x\n"); | 106 | pr_err("Could not initialize smsc911x device\n"); |
113 | } | 107 | } |
diff --git a/arch/arm/mach-omap2/omap_l3_noc.c b/arch/arm/mach-omap2/omap_l3_noc.c index 82632c24076f..7b9f1909ddb2 100644 --- a/arch/arm/mach-omap2/omap_l3_noc.c +++ b/arch/arm/mach-omap2/omap_l3_noc.c | |||
@@ -63,10 +63,7 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3) | |||
63 | char *source_name; | 63 | char *source_name; |
64 | 64 | ||
65 | /* Get the Type of interrupt */ | 65 | /* Get the Type of interrupt */ |
66 | if (irq == l3->app_irq) | 66 | inttype = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR; |
67 | inttype = L3_APPLICATION_ERROR; | ||
68 | else | ||
69 | inttype = L3_DEBUG_ERROR; | ||
70 | 67 | ||
71 | for (i = 0; i < L3_MODULES; i++) { | 68 | for (i = 0; i < L3_MODULES; i++) { |
72 | /* | 69 | /* |
@@ -84,10 +81,10 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3) | |||
84 | 81 | ||
85 | err_src = j; | 82 | err_src = j; |
86 | /* Read the stderrlog_main_source from clk domain */ | 83 | /* Read the stderrlog_main_source from clk domain */ |
87 | std_err_main_addr = base + (*(l3_targ[i] + err_src)); | 84 | std_err_main_addr = base + *(l3_targ[i] + err_src); |
88 | std_err_main = readl(std_err_main_addr); | 85 | std_err_main = readl(std_err_main_addr); |
89 | 86 | ||
90 | switch ((std_err_main & CUSTOM_ERROR)) { | 87 | switch (std_err_main & CUSTOM_ERROR) { |
91 | case STANDARD_ERROR: | 88 | case STANDARD_ERROR: |
92 | source_name = | 89 | source_name = |
93 | l3_targ_stderrlog_main_name[i][err_src]; | 90 | l3_targ_stderrlog_main_name[i][err_src]; |
@@ -132,49 +129,49 @@ static int __init omap4_l3_probe(struct platform_device *pdev) | |||
132 | 129 | ||
133 | l3 = kzalloc(sizeof(*l3), GFP_KERNEL); | 130 | l3 = kzalloc(sizeof(*l3), GFP_KERNEL); |
134 | if (!l3) | 131 | if (!l3) |
135 | ret = -ENOMEM; | 132 | return -ENOMEM; |
136 | 133 | ||
137 | platform_set_drvdata(pdev, l3); | 134 | platform_set_drvdata(pdev, l3); |
138 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 135 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
139 | if (!res) { | 136 | if (!res) { |
140 | dev_err(&pdev->dev, "couldn't find resource 0\n"); | 137 | dev_err(&pdev->dev, "couldn't find resource 0\n"); |
141 | ret = -ENODEV; | 138 | ret = -ENODEV; |
142 | goto err1; | 139 | goto err0; |
143 | } | 140 | } |
144 | 141 | ||
145 | l3->l3_base[0] = ioremap(res->start, resource_size(res)); | 142 | l3->l3_base[0] = ioremap(res->start, resource_size(res)); |
146 | if (!(l3->l3_base[0])) { | 143 | if (!l3->l3_base[0]) { |
147 | dev_err(&pdev->dev, "ioremap failed\n"); | 144 | dev_err(&pdev->dev, "ioremap failed\n"); |
148 | ret = -ENOMEM; | 145 | ret = -ENOMEM; |
149 | goto err2; | 146 | goto err0; |
150 | } | 147 | } |
151 | 148 | ||
152 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); | 149 | res = platform_get_resource(pdev, IORESOURCE_MEM, 1); |
153 | if (!res) { | 150 | if (!res) { |
154 | dev_err(&pdev->dev, "couldn't find resource 1\n"); | 151 | dev_err(&pdev->dev, "couldn't find resource 1\n"); |
155 | ret = -ENODEV; | 152 | ret = -ENODEV; |
156 | goto err3; | 153 | goto err1; |
157 | } | 154 | } |
158 | 155 | ||
159 | l3->l3_base[1] = ioremap(res->start, resource_size(res)); | 156 | l3->l3_base[1] = ioremap(res->start, resource_size(res)); |
160 | if (!(l3->l3_base[1])) { | 157 | if (!l3->l3_base[1]) { |
161 | dev_err(&pdev->dev, "ioremap failed\n"); | 158 | dev_err(&pdev->dev, "ioremap failed\n"); |
162 | ret = -ENOMEM; | 159 | ret = -ENOMEM; |
163 | goto err4; | 160 | goto err1; |
164 | } | 161 | } |
165 | 162 | ||
166 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); | 163 | res = platform_get_resource(pdev, IORESOURCE_MEM, 2); |
167 | if (!res) { | 164 | if (!res) { |
168 | dev_err(&pdev->dev, "couldn't find resource 2\n"); | 165 | dev_err(&pdev->dev, "couldn't find resource 2\n"); |
169 | ret = -ENODEV; | 166 | ret = -ENODEV; |
170 | goto err5; | 167 | goto err2; |
171 | } | 168 | } |
172 | 169 | ||
173 | l3->l3_base[2] = ioremap(res->start, resource_size(res)); | 170 | l3->l3_base[2] = ioremap(res->start, resource_size(res)); |
174 | if (!(l3->l3_base[2])) { | 171 | if (!l3->l3_base[2]) { |
175 | dev_err(&pdev->dev, "ioremap failed\n"); | 172 | dev_err(&pdev->dev, "ioremap failed\n"); |
176 | ret = -ENOMEM; | 173 | ret = -ENOMEM; |
177 | goto err6; | 174 | goto err2; |
178 | } | 175 | } |
179 | 176 | ||
180 | /* | 177 | /* |
@@ -187,7 +184,7 @@ static int __init omap4_l3_probe(struct platform_device *pdev) | |||
187 | if (ret) { | 184 | if (ret) { |
188 | pr_crit("L3: request_irq failed to register for 0x%x\n", | 185 | pr_crit("L3: request_irq failed to register for 0x%x\n", |
189 | OMAP44XX_IRQ_L3_DBG); | 186 | OMAP44XX_IRQ_L3_DBG); |
190 | goto err7; | 187 | goto err3; |
191 | } | 188 | } |
192 | l3->debug_irq = irq; | 189 | l3->debug_irq = irq; |
193 | 190 | ||
@@ -198,24 +195,22 @@ static int __init omap4_l3_probe(struct platform_device *pdev) | |||
198 | if (ret) { | 195 | if (ret) { |
199 | pr_crit("L3: request_irq failed to register for 0x%x\n", | 196 | pr_crit("L3: request_irq failed to register for 0x%x\n", |
200 | OMAP44XX_IRQ_L3_APP); | 197 | OMAP44XX_IRQ_L3_APP); |
201 | goto err8; | 198 | goto err4; |
202 | } | 199 | } |
203 | l3->app_irq = irq; | 200 | l3->app_irq = irq; |
204 | 201 | ||
205 | goto err0; | 202 | return 0; |
206 | err8: | 203 | |
207 | err7: | ||
208 | iounmap(l3->l3_base[2]); | ||
209 | err6: | ||
210 | err5: | ||
211 | iounmap(l3->l3_base[1]); | ||
212 | err4: | 204 | err4: |
205 | free_irq(l3->debug_irq, l3); | ||
213 | err3: | 206 | err3: |
214 | iounmap(l3->l3_base[0]); | 207 | iounmap(l3->l3_base[2]); |
215 | err2: | 208 | err2: |
209 | iounmap(l3->l3_base[1]); | ||
216 | err1: | 210 | err1: |
217 | kfree(l3); | 211 | iounmap(l3->l3_base[0]); |
218 | err0: | 212 | err0: |
213 | kfree(l3); | ||
219 | return ret; | 214 | return ret; |
220 | } | 215 | } |
221 | 216 | ||
diff --git a/arch/arm/mach-omap2/omap_l3_smx.c b/arch/arm/mach-omap2/omap_l3_smx.c index 4321e7938929..873c0e33b512 100644 --- a/arch/arm/mach-omap2/omap_l3_smx.c +++ b/arch/arm/mach-omap2/omap_l3_smx.c | |||
@@ -155,7 +155,7 @@ static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3, | |||
155 | u8 multi = error & L3_ERROR_LOG_MULTI; | 155 | u8 multi = error & L3_ERROR_LOG_MULTI; |
156 | u32 address = omap3_l3_decode_addr(error_addr); | 156 | u32 address = omap3_l3_decode_addr(error_addr); |
157 | 157 | ||
158 | WARN(true, "%s Error seen by %s %s at address %x\n", | 158 | WARN(true, "%s seen by %s %s at address %x\n", |
159 | omap3_l3_code_string(code), | 159 | omap3_l3_code_string(code), |
160 | omap3_l3_initiator_string(initid), | 160 | omap3_l3_initiator_string(initid), |
161 | multi ? "Multiple Errors" : "", | 161 | multi ? "Multiple Errors" : "", |
@@ -167,21 +167,15 @@ static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3, | |||
167 | static irqreturn_t omap3_l3_app_irq(int irq, void *_l3) | 167 | static irqreturn_t omap3_l3_app_irq(int irq, void *_l3) |
168 | { | 168 | { |
169 | struct omap3_l3 *l3 = _l3; | 169 | struct omap3_l3 *l3 = _l3; |
170 | |||
171 | u64 status, clear; | 170 | u64 status, clear; |
172 | u64 error; | 171 | u64 error; |
173 | u64 error_addr; | 172 | u64 error_addr; |
174 | u64 err_source = 0; | 173 | u64 err_source = 0; |
175 | void __iomem *base; | 174 | void __iomem *base; |
176 | int int_type; | 175 | int int_type; |
177 | |||
178 | irqreturn_t ret = IRQ_NONE; | 176 | irqreturn_t ret = IRQ_NONE; |
179 | 177 | ||
180 | if (irq == l3->app_irq) | 178 | int_type = irq == l3->app_irq ? L3_APPLICATION_ERROR : L3_DEBUG_ERROR; |
181 | int_type = L3_APPLICATION_ERROR; | ||
182 | else | ||
183 | int_type = L3_DEBUG_ERROR; | ||
184 | |||
185 | if (!int_type) { | 179 | if (!int_type) { |
186 | status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0); | 180 | status = omap3_l3_readll(l3->rt, L3_SI_FLAG_STATUS_0); |
187 | /* | 181 | /* |
@@ -202,7 +196,6 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3) | |||
202 | 196 | ||
203 | base = l3->rt + *(omap3_l3_bases[int_type] + err_source); | 197 | base = l3->rt + *(omap3_l3_bases[int_type] + err_source); |
204 | error = omap3_l3_readll(base, L3_ERROR_LOG); | 198 | error = omap3_l3_readll(base, L3_ERROR_LOG); |
205 | |||
206 | if (error) { | 199 | if (error) { |
207 | error_addr = omap3_l3_readll(base, L3_ERROR_LOG_ADDR); | 200 | error_addr = omap3_l3_readll(base, L3_ERROR_LOG_ADDR); |
208 | 201 | ||
@@ -210,9 +203,8 @@ static irqreturn_t omap3_l3_app_irq(int irq, void *_l3) | |||
210 | } | 203 | } |
211 | 204 | ||
212 | /* Clear the status register */ | 205 | /* Clear the status register */ |
213 | clear = ((L3_AGENT_STATUS_CLEAR_IA << int_type) | | 206 | clear = (L3_AGENT_STATUS_CLEAR_IA << int_type) | |
214 | (L3_AGENT_STATUS_CLEAR_TA)); | 207 | L3_AGENT_STATUS_CLEAR_TA; |
215 | |||
216 | omap3_l3_writell(base, L3_AGENT_STATUS, clear); | 208 | omap3_l3_writell(base, L3_AGENT_STATUS, clear); |
217 | 209 | ||
218 | /* clear the error log register */ | 210 | /* clear the error log register */ |
@@ -228,10 +220,8 @@ static int __init omap3_l3_probe(struct platform_device *pdev) | |||
228 | int ret; | 220 | int ret; |
229 | 221 | ||
230 | l3 = kzalloc(sizeof(*l3), GFP_KERNEL); | 222 | l3 = kzalloc(sizeof(*l3), GFP_KERNEL); |
231 | if (!l3) { | 223 | if (!l3) |
232 | ret = -ENOMEM; | 224 | return -ENOMEM; |
233 | goto err0; | ||
234 | } | ||
235 | 225 | ||
236 | platform_set_drvdata(pdev, l3); | 226 | platform_set_drvdata(pdev, l3); |
237 | 227 | ||
@@ -239,13 +229,13 @@ static int __init omap3_l3_probe(struct platform_device *pdev) | |||
239 | if (!res) { | 229 | if (!res) { |
240 | dev_err(&pdev->dev, "couldn't find resource\n"); | 230 | dev_err(&pdev->dev, "couldn't find resource\n"); |
241 | ret = -ENODEV; | 231 | ret = -ENODEV; |
242 | goto err1; | 232 | goto err0; |
243 | } | 233 | } |
244 | l3->rt = ioremap(res->start, resource_size(res)); | 234 | l3->rt = ioremap(res->start, resource_size(res)); |
245 | if (!(l3->rt)) { | 235 | if (!l3->rt) { |
246 | dev_err(&pdev->dev, "ioremap failed\n"); | 236 | dev_err(&pdev->dev, "ioremap failed\n"); |
247 | ret = -ENOMEM; | 237 | ret = -ENOMEM; |
248 | goto err2; | 238 | goto err0; |
249 | } | 239 | } |
250 | 240 | ||
251 | l3->debug_irq = platform_get_irq(pdev, 0); | 241 | l3->debug_irq = platform_get_irq(pdev, 0); |
@@ -254,28 +244,26 @@ static int __init omap3_l3_probe(struct platform_device *pdev) | |||
254 | "l3-debug-irq", l3); | 244 | "l3-debug-irq", l3); |
255 | if (ret) { | 245 | if (ret) { |
256 | dev_err(&pdev->dev, "couldn't request debug irq\n"); | 246 | dev_err(&pdev->dev, "couldn't request debug irq\n"); |
257 | goto err3; | 247 | goto err1; |
258 | } | 248 | } |
259 | 249 | ||
260 | l3->app_irq = platform_get_irq(pdev, 1); | 250 | l3->app_irq = platform_get_irq(pdev, 1); |
261 | ret = request_irq(l3->app_irq, omap3_l3_app_irq, | 251 | ret = request_irq(l3->app_irq, omap3_l3_app_irq, |
262 | IRQF_DISABLED | IRQF_TRIGGER_RISING, | 252 | IRQF_DISABLED | IRQF_TRIGGER_RISING, |
263 | "l3-app-irq", l3); | 253 | "l3-app-irq", l3); |
264 | |||
265 | if (ret) { | 254 | if (ret) { |
266 | dev_err(&pdev->dev, "couldn't request app irq\n"); | 255 | dev_err(&pdev->dev, "couldn't request app irq\n"); |
267 | goto err4; | 256 | goto err2; |
268 | } | 257 | } |
269 | 258 | ||
270 | goto err0; | 259 | return 0; |
271 | 260 | ||
272 | err4: | ||
273 | err3: | ||
274 | iounmap(l3->rt); | ||
275 | err2: | 261 | err2: |
262 | free_irq(l3->debug_irq, l3); | ||
276 | err1: | 263 | err1: |
277 | kfree(l3); | 264 | iounmap(l3->rt); |
278 | err0: | 265 | err0: |
266 | kfree(l3); | ||
279 | return ret; | 267 | return ret; |
280 | } | 268 | } |
281 | 269 | ||
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c index 05f6abc96b0d..f47813edd951 100644 --- a/arch/arm/mach-omap2/omap_phy_internal.c +++ b/arch/arm/mach-omap2/omap_phy_internal.c | |||
@@ -50,13 +50,16 @@ int omap4430_phy_init(struct device *dev) | |||
50 | { | 50 | { |
51 | ctrl_base = ioremap(OMAP443X_SCM_BASE, SZ_1K); | 51 | ctrl_base = ioremap(OMAP443X_SCM_BASE, SZ_1K); |
52 | if (!ctrl_base) { | 52 | if (!ctrl_base) { |
53 | dev_err(dev, "control module ioremap failed\n"); | 53 | pr_err("control module ioremap failed\n"); |
54 | return -ENOMEM; | 54 | return -ENOMEM; |
55 | } | 55 | } |
56 | /* Power down the phy */ | 56 | /* Power down the phy */ |
57 | __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF); | 57 | __raw_writel(PHY_PD, ctrl_base + CONTROL_DEV_CONF); |
58 | phyclk = clk_get(dev, "ocp2scp_usb_phy_ick"); | ||
59 | 58 | ||
59 | if (!dev) | ||
60 | return 0; | ||
61 | |||
62 | phyclk = clk_get(dev, "ocp2scp_usb_phy_ick"); | ||
60 | if (IS_ERR(phyclk)) { | 63 | if (IS_ERR(phyclk)) { |
61 | dev_err(dev, "cannot clk_get ocp2scp_usb_phy_ick\n"); | 64 | dev_err(dev, "cannot clk_get ocp2scp_usb_phy_ick\n"); |
62 | iounmap(ctrl_base); | 65 | iounmap(ctrl_base); |
@@ -228,7 +231,7 @@ void am35x_musb_clear_irq(void) | |||
228 | regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); | 231 | regval = omap_ctrl_readl(AM35XX_CONTROL_LVL_INTR_CLEAR); |
229 | } | 232 | } |
230 | 233 | ||
231 | void am35x_musb_set_mode(u8 musb_mode) | 234 | void am35x_set_mode(u8 musb_mode) |
232 | { | 235 | { |
233 | u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2); | 236 | u32 devconf2 = omap_ctrl_readl(AM35XX_CONTROL_DEVCONF2); |
234 | 237 | ||
diff --git a/arch/arm/mach-omap2/pm.h b/arch/arm/mach-omap2/pm.h index 797bfd12b643..45bcfce77352 100644 --- a/arch/arm/mach-omap2/pm.h +++ b/arch/arm/mach-omap2/pm.h | |||
@@ -36,11 +36,16 @@ static inline int omap4_opp_init(void) | |||
36 | } | 36 | } |
37 | #endif | 37 | #endif |
38 | 38 | ||
39 | /* | ||
40 | * cpuidle mach specific parameters | ||
41 | * | ||
42 | * The board code can override the default C-states definition using | ||
43 | * omap3_pm_init_cpuidle | ||
44 | */ | ||
39 | struct cpuidle_params { | 45 | struct cpuidle_params { |
40 | u8 valid; | 46 | u32 exit_latency; /* exit_latency = sleep + wake-up latencies */ |
41 | u32 sleep_latency; | 47 | u32 target_residency; |
42 | u32 wake_latency; | 48 | u8 valid; /* validates the C-state */ |
43 | u32 threshold; | ||
44 | }; | 49 | }; |
45 | 50 | ||
46 | #if defined(CONFIG_PM) && defined(CONFIG_CPU_IDLE) | 51 | #if defined(CONFIG_PM) && defined(CONFIG_CPU_IDLE) |
@@ -73,10 +78,6 @@ extern u32 sleep_while_idle; | |||
73 | #define sleep_while_idle 0 | 78 | #define sleep_while_idle 0 |
74 | #endif | 79 | #endif |
75 | 80 | ||
76 | #if defined(CONFIG_CPU_IDLE) | ||
77 | extern void omap3_cpuidle_update_states(u32, u32); | ||
78 | #endif | ||
79 | |||
80 | #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) | 81 | #if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) |
81 | extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev); | 82 | extern void pm_dbg_update_time(struct powerdomain *pwrdm, int prev); |
82 | extern int pm_dbg_regset_save(int reg_set); | 83 | extern int pm_dbg_regset_save(int reg_set); |
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c index 0c5e3a46a3ad..c155c9d1c82c 100644 --- a/arch/arm/mach-omap2/pm34xx.c +++ b/arch/arm/mach-omap2/pm34xx.c | |||
@@ -779,18 +779,6 @@ void omap3_pm_off_mode_enable(int enable) | |||
779 | else | 779 | else |
780 | state = PWRDM_POWER_RET; | 780 | state = PWRDM_POWER_RET; |
781 | 781 | ||
782 | #ifdef CONFIG_CPU_IDLE | ||
783 | /* | ||
784 | * Erratum i583: implementation for ES rev < Es1.2 on 3630. We cannot | ||
785 | * enable OFF mode in a stable form for previous revisions, restrict | ||
786 | * instead to RET | ||
787 | */ | ||
788 | if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583)) | ||
789 | omap3_cpuidle_update_states(state, PWRDM_POWER_RET); | ||
790 | else | ||
791 | omap3_cpuidle_update_states(state, state); | ||
792 | #endif | ||
793 | |||
794 | list_for_each_entry(pwrst, &pwrst_list, node) { | 782 | list_for_each_entry(pwrst, &pwrst_list, node) { |
795 | if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) && | 783 | if (IS_PM34XX_ERRATUM(PM_SDRC_WAKEUP_ERRATUM_i583) && |
796 | pwrst->pwrdm == core_pwrdm && | 784 | pwrst->pwrdm == core_pwrdm && |
@@ -895,8 +883,6 @@ static int __init omap3_pm_init(void) | |||
895 | 883 | ||
896 | pm_errata_configure(); | 884 | pm_errata_configure(); |
897 | 885 | ||
898 | printk(KERN_ERR "Power Management for TI OMAP3.\n"); | ||
899 | |||
900 | /* XXX prcm_setup_regs needs to be before enabling hw | 886 | /* XXX prcm_setup_regs needs to be before enabling hw |
901 | * supervised mode for powerdomains */ | 887 | * supervised mode for powerdomains */ |
902 | prcm_setup_regs(); | 888 | prcm_setup_regs(); |
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c index 76cfff2db514..59a870be8390 100644 --- a/arch/arm/mach-omap2/pm44xx.c +++ b/arch/arm/mach-omap2/pm44xx.c | |||
@@ -105,13 +105,11 @@ static int __init omap4_pm_init(void) | |||
105 | 105 | ||
106 | pr_err("Power Management for TI OMAP4.\n"); | 106 | pr_err("Power Management for TI OMAP4.\n"); |
107 | 107 | ||
108 | #ifdef CONFIG_PM | ||
109 | ret = pwrdm_for_each(pwrdms_setup, NULL); | 108 | ret = pwrdm_for_each(pwrdms_setup, NULL); |
110 | if (ret) { | 109 | if (ret) { |
111 | pr_err("Failed to setup powerdomains\n"); | 110 | pr_err("Failed to setup powerdomains\n"); |
112 | goto err2; | 111 | goto err2; |
113 | } | 112 | } |
114 | #endif | ||
115 | 113 | ||
116 | #ifdef CONFIG_SUSPEND | 114 | #ifdef CONFIG_SUSPEND |
117 | suspend_set_ops(&omap_pm_ops); | 115 | suspend_set_ops(&omap_pm_ops); |
diff --git a/arch/arm/mach-omap2/smartreflex.c b/arch/arm/mach-omap2/smartreflex.c index 13e24f913dd4..fb7dc52394a8 100644 --- a/arch/arm/mach-omap2/smartreflex.c +++ b/arch/arm/mach-omap2/smartreflex.c | |||
@@ -847,6 +847,14 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
847 | goto err_free_devinfo; | 847 | goto err_free_devinfo; |
848 | } | 848 | } |
849 | 849 | ||
850 | mem = request_mem_region(mem->start, resource_size(mem), | ||
851 | dev_name(&pdev->dev)); | ||
852 | if (!mem) { | ||
853 | dev_err(&pdev->dev, "%s: no mem region\n", __func__); | ||
854 | ret = -EBUSY; | ||
855 | goto err_free_devinfo; | ||
856 | } | ||
857 | |||
850 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 858 | irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
851 | 859 | ||
852 | pm_runtime_enable(&pdev->dev); | 860 | pm_runtime_enable(&pdev->dev); |
@@ -883,7 +891,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
883 | ret = sr_late_init(sr_info); | 891 | ret = sr_late_init(sr_info); |
884 | if (ret) { | 892 | if (ret) { |
885 | pr_warning("%s: Error in SR late init\n", __func__); | 893 | pr_warning("%s: Error in SR late init\n", __func__); |
886 | goto err_release_region; | 894 | return ret; |
887 | } | 895 | } |
888 | } | 896 | } |
889 | 897 | ||
@@ -896,7 +904,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
896 | vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); | 904 | vdd_dbg_dir = omap_voltage_get_dbgdir(sr_info->voltdm); |
897 | if (!vdd_dbg_dir) { | 905 | if (!vdd_dbg_dir) { |
898 | ret = -EINVAL; | 906 | ret = -EINVAL; |
899 | goto err_release_region; | 907 | goto err_iounmap; |
900 | } | 908 | } |
901 | 909 | ||
902 | sr_info->dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir); | 910 | sr_info->dbg_dir = debugfs_create_dir("smartreflex", vdd_dbg_dir); |
@@ -904,7 +912,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
904 | dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", | 912 | dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", |
905 | __func__); | 913 | __func__); |
906 | ret = PTR_ERR(sr_info->dbg_dir); | 914 | ret = PTR_ERR(sr_info->dbg_dir); |
907 | goto err_release_region; | 915 | goto err_iounmap; |
908 | } | 916 | } |
909 | 917 | ||
910 | (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, | 918 | (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, |
@@ -921,7 +929,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
921 | dev_err(&pdev->dev, "%s: Unable to create debugfs directory" | 929 | dev_err(&pdev->dev, "%s: Unable to create debugfs directory" |
922 | "for n-values\n", __func__); | 930 | "for n-values\n", __func__); |
923 | ret = PTR_ERR(nvalue_dir); | 931 | ret = PTR_ERR(nvalue_dir); |
924 | goto err_release_region; | 932 | goto err_debugfs; |
925 | } | 933 | } |
926 | 934 | ||
927 | omap_voltage_get_volttable(sr_info->voltdm, &volt_data); | 935 | omap_voltage_get_volttable(sr_info->voltdm, &volt_data); |
@@ -931,7 +939,7 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
931 | "entries for n-values\n", | 939 | "entries for n-values\n", |
932 | __func__, sr_info->voltdm->name); | 940 | __func__, sr_info->voltdm->name); |
933 | ret = -ENODATA; | 941 | ret = -ENODATA; |
934 | goto err_release_region; | 942 | goto err_debugfs; |
935 | } | 943 | } |
936 | 944 | ||
937 | for (i = 0; i < sr_info->nvalue_count; i++) { | 945 | for (i = 0; i < sr_info->nvalue_count; i++) { |
@@ -945,6 +953,11 @@ static int __init omap_sr_probe(struct platform_device *pdev) | |||
945 | 953 | ||
946 | return ret; | 954 | return ret; |
947 | 955 | ||
956 | err_debugfs: | ||
957 | debugfs_remove_recursive(sr_info->dbg_dir); | ||
958 | err_iounmap: | ||
959 | list_del(&sr_info->node); | ||
960 | iounmap(sr_info->base); | ||
948 | err_release_region: | 961 | err_release_region: |
949 | release_mem_region(mem->start, resource_size(mem)); | 962 | release_mem_region(mem->start, resource_size(mem)); |
950 | err_free_devinfo: | 963 | err_free_devinfo: |
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c index 35559f77e2de..c7ed540d868d 100644 --- a/arch/arm/mach-omap2/usb-musb.c +++ b/arch/arm/mach-omap2/usb-musb.c | |||
@@ -108,7 +108,13 @@ static void usb_musb_mux_init(struct omap_musb_board_data *board_data) | |||
108 | } | 108 | } |
109 | } | 109 | } |
110 | 110 | ||
111 | void __init usb_musb_init(struct omap_musb_board_data *board_data) | 111 | static struct omap_musb_board_data musb_default_board_data = { |
112 | .interface_type = MUSB_INTERFACE_ULPI, | ||
113 | .mode = MUSB_OTG, | ||
114 | .power = 100, | ||
115 | }; | ||
116 | |||
117 | void __init usb_musb_init(struct omap_musb_board_data *musb_board_data) | ||
112 | { | 118 | { |
113 | struct omap_hwmod *oh; | 119 | struct omap_hwmod *oh; |
114 | struct omap_device *od; | 120 | struct omap_device *od; |
@@ -116,11 +122,12 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data) | |||
116 | struct device *dev; | 122 | struct device *dev; |
117 | int bus_id = -1; | 123 | int bus_id = -1; |
118 | const char *oh_name, *name; | 124 | const char *oh_name, *name; |
125 | struct omap_musb_board_data *board_data; | ||
119 | 126 | ||
120 | if (cpu_is_omap3517() || cpu_is_omap3505()) { | 127 | if (musb_board_data) |
121 | } else if (cpu_is_omap44xx()) { | 128 | board_data = musb_board_data; |
122 | usb_musb_mux_init(board_data); | 129 | else |
123 | } | 130 | board_data = &musb_default_board_data; |
124 | 131 | ||
125 | /* | 132 | /* |
126 | * REVISIT: This line can be removed once all the platforms using | 133 | * REVISIT: This line can be removed once all the platforms using |
@@ -164,10 +171,15 @@ void __init usb_musb_init(struct omap_musb_board_data *board_data) | |||
164 | dev->dma_mask = &musb_dmamask; | 171 | dev->dma_mask = &musb_dmamask; |
165 | dev->coherent_dma_mask = musb_dmamask; | 172 | dev->coherent_dma_mask = musb_dmamask; |
166 | put_device(dev); | 173 | put_device(dev); |
174 | |||
175 | if (cpu_is_omap44xx()) | ||
176 | omap4430_phy_init(dev); | ||
167 | } | 177 | } |
168 | 178 | ||
169 | #else | 179 | #else |
170 | void __init usb_musb_init(struct omap_musb_board_data *board_data) | 180 | void __init usb_musb_init(struct omap_musb_board_data *board_data) |
171 | { | 181 | { |
182 | if (cpu_is_omap44xx()) | ||
183 | omap4430_phy_init(NULL); | ||
172 | } | 184 | } |
173 | #endif /* CONFIG_USB_MUSB_SOC */ | 185 | #endif /* CONFIG_USB_MUSB_SOC */ |
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c index 8a3c05f3c1d6..8dd26b765b7d 100644 --- a/arch/arm/mach-omap2/usb-tusb6010.c +++ b/arch/arm/mach-omap2/usb-tusb6010.c | |||
@@ -293,12 +293,11 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data, | |||
293 | ); | 293 | ); |
294 | 294 | ||
295 | /* IRQ */ | 295 | /* IRQ */ |
296 | status = gpio_request(irq, "TUSB6010 irq"); | 296 | status = gpio_request_one(irq, GPIOF_IN, "TUSB6010 irq"); |
297 | if (status < 0) { | 297 | if (status < 0) { |
298 | printk(error, 3, status); | 298 | printk(error, 3, status); |
299 | return status; | 299 | return status; |
300 | } | 300 | } |
301 | gpio_direction_input(irq); | ||
302 | tusb_resources[2].start = irq + IH_GPIO_BASE; | 301 | tusb_resources[2].start = irq + IH_GPIO_BASE; |
303 | 302 | ||
304 | /* set up memory timings ... can speed them up later */ | 303 | /* set up memory timings ... can speed them up later */ |
diff --git a/arch/arm/mach-omap2/voltage.c b/arch/arm/mach-omap2/voltage.c index 0c1552d9d995..9ef3789ded4b 100644 --- a/arch/arm/mach-omap2/voltage.c +++ b/arch/arm/mach-omap2/voltage.c | |||
@@ -148,7 +148,6 @@ static int vp_volt_debug_get(void *data, u64 *val) | |||
148 | } | 148 | } |
149 | 149 | ||
150 | vsel = vdd->read_reg(prm_mod_offs, vdd->vp_data->voltage); | 150 | vsel = vdd->read_reg(prm_mod_offs, vdd->vp_data->voltage); |
151 | pr_notice("curr_vsel = %x\n", vsel); | ||
152 | 151 | ||
153 | if (!vdd->pmic_info->vsel_to_uv) { | 152 | if (!vdd->pmic_info->vsel_to_uv) { |
154 | pr_warning("PMIC function to convert vsel to voltage" | 153 | pr_warning("PMIC function to convert vsel to voltage" |
diff --git a/arch/arm/plat-omap/include/plat/gpmc-smsc911x.h b/arch/arm/plat-omap/include/plat/gpmc-smsc911x.h index 872de0bf1e6b..ea6c9c88c725 100644 --- a/arch/arm/plat-omap/include/plat/gpmc-smsc911x.h +++ b/arch/arm/plat-omap/include/plat/gpmc-smsc911x.h | |||
@@ -14,14 +14,14 @@ | |||
14 | #ifndef __ASM_ARCH_OMAP_GPMC_SMSC911X_H__ | 14 | #ifndef __ASM_ARCH_OMAP_GPMC_SMSC911X_H__ |
15 | 15 | ||
16 | struct omap_smsc911x_platform_data { | 16 | struct omap_smsc911x_platform_data { |
17 | int id; | ||
17 | int cs; | 18 | int cs; |
18 | int gpio_irq; | 19 | int gpio_irq; |
19 | int gpio_reset; | 20 | int gpio_reset; |
20 | u32 flags; | 21 | u32 flags; |
21 | }; | 22 | }; |
22 | 23 | ||
23 | #if defined(CONFIG_SMSC911X) || \ | 24 | #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) |
24 | defined(CONFIG_SMSC911X_MODULE) | ||
25 | 25 | ||
26 | extern void gpmc_smsc911x_init(struct omap_smsc911x_platform_data *d); | 26 | extern void gpmc_smsc911x_init(struct omap_smsc911x_platform_data *d); |
27 | 27 | ||
diff --git a/arch/arm/plat-omap/include/plat/uncompress.h b/arch/arm/plat-omap/include/plat/uncompress.h index 565d2664f5a7..ac4b60d9aa29 100644 --- a/arch/arm/plat-omap/include/plat/uncompress.h +++ b/arch/arm/plat-omap/include/plat/uncompress.h | |||
@@ -129,7 +129,6 @@ static inline void __arch_decomp_setup(unsigned long arch_id) | |||
129 | DEBUG_LL_OMAP1(3, sx1); | 129 | DEBUG_LL_OMAP1(3, sx1); |
130 | 130 | ||
131 | /* omap2 based boards using UART1 */ | 131 | /* omap2 based boards using UART1 */ |
132 | DEBUG_LL_OMAP2(1, omap2evm); | ||
133 | DEBUG_LL_OMAP2(1, omap_2430sdp); | 132 | DEBUG_LL_OMAP2(1, omap_2430sdp); |
134 | DEBUG_LL_OMAP2(1, omap_apollon); | 133 | DEBUG_LL_OMAP2(1, omap_apollon); |
135 | DEBUG_LL_OMAP2(1, omap_h4); | 134 | DEBUG_LL_OMAP2(1, omap_h4); |
diff --git a/arch/arm/plat-omap/include/plat/usb.h b/arch/arm/plat-omap/include/plat/usb.h index 02b96c8f6a17..17d3c939775c 100644 --- a/arch/arm/plat-omap/include/plat/usb.h +++ b/arch/arm/plat-omap/include/plat/usb.h | |||
@@ -113,7 +113,7 @@ extern int omap4430_phy_suspend(struct device *dev, int suspend); | |||
113 | extern void am35x_musb_reset(void); | 113 | extern void am35x_musb_reset(void); |
114 | extern void am35x_musb_phy_power(u8 on); | 114 | extern void am35x_musb_phy_power(u8 on); |
115 | extern void am35x_musb_clear_irq(void); | 115 | extern void am35x_musb_clear_irq(void); |
116 | extern void am35x_musb_set_mode(u8 musb_mode); | 116 | extern void am35x_set_mode(u8 musb_mode); |
117 | 117 | ||
118 | /* | 118 | /* |
119 | * FIXME correct answer depends on hmc_mode, | 119 | * FIXME correct answer depends on hmc_mode, |
diff --git a/arch/avr32/include/asm/bitops.h b/arch/avr32/include/asm/bitops.h index 72444d97f80c..b70c19bab63a 100644 --- a/arch/avr32/include/asm/bitops.h +++ b/arch/avr32/include/asm/bitops.h | |||
@@ -270,14 +270,21 @@ static inline int __fls(unsigned long word) | |||
270 | 270 | ||
271 | unsigned long find_first_zero_bit(const unsigned long *addr, | 271 | unsigned long find_first_zero_bit(const unsigned long *addr, |
272 | unsigned long size); | 272 | unsigned long size); |
273 | #define find_first_zero_bit find_first_zero_bit | ||
274 | |||
273 | unsigned long find_next_zero_bit(const unsigned long *addr, | 275 | unsigned long find_next_zero_bit(const unsigned long *addr, |
274 | unsigned long size, | 276 | unsigned long size, |
275 | unsigned long offset); | 277 | unsigned long offset); |
278 | #define find_next_zero_bit find_next_zero_bit | ||
279 | |||
276 | unsigned long find_first_bit(const unsigned long *addr, | 280 | unsigned long find_first_bit(const unsigned long *addr, |
277 | unsigned long size); | 281 | unsigned long size); |
282 | #define find_first_bit find_first_bit | ||
283 | |||
278 | unsigned long find_next_bit(const unsigned long *addr, | 284 | unsigned long find_next_bit(const unsigned long *addr, |
279 | unsigned long size, | 285 | unsigned long size, |
280 | unsigned long offset); | 286 | unsigned long offset); |
287 | #define find_next_bit find_next_bit | ||
281 | 288 | ||
282 | /* | 289 | /* |
283 | * ffs: find first bit set. This is defined the same way as | 290 | * ffs: find first bit set. This is defined the same way as |
@@ -299,6 +306,14 @@ static inline int ffs(unsigned long word) | |||
299 | #include <asm-generic/bitops/hweight.h> | 306 | #include <asm-generic/bitops/hweight.h> |
300 | #include <asm-generic/bitops/lock.h> | 307 | #include <asm-generic/bitops/lock.h> |
301 | 308 | ||
309 | extern unsigned long find_next_zero_bit_le(const void *addr, | ||
310 | unsigned long size, unsigned long offset); | ||
311 | #define find_next_zero_bit_le find_next_zero_bit_le | ||
312 | |||
313 | extern unsigned long find_next_bit_le(const void *addr, | ||
314 | unsigned long size, unsigned long offset); | ||
315 | #define find_next_bit_le find_next_bit_le | ||
316 | |||
302 | #include <asm-generic/bitops/le.h> | 317 | #include <asm-generic/bitops/le.h> |
303 | #include <asm-generic/bitops/ext2-atomic.h> | 318 | #include <asm-generic/bitops/ext2-atomic.h> |
304 | 319 | ||
diff --git a/arch/blackfin/Kconfig b/arch/blackfin/Kconfig index a18180f2d007..d619b17c4413 100644 --- a/arch/blackfin/Kconfig +++ b/arch/blackfin/Kconfig | |||
@@ -47,9 +47,6 @@ config GENERIC_BUG | |||
47 | config ZONE_DMA | 47 | config ZONE_DMA |
48 | def_bool y | 48 | def_bool y |
49 | 49 | ||
50 | config GENERIC_FIND_NEXT_BIT | ||
51 | def_bool y | ||
52 | |||
53 | config GENERIC_GPIO | 50 | config GENERIC_GPIO |
54 | def_bool y | 51 | def_bool y |
55 | 52 | ||
diff --git a/arch/blackfin/include/asm/kgdb.h b/arch/blackfin/include/asm/kgdb.h index 3ac0c72e9fee..aaf884591b07 100644 --- a/arch/blackfin/include/asm/kgdb.h +++ b/arch/blackfin/include/asm/kgdb.h | |||
@@ -108,6 +108,7 @@ static inline void arch_kgdb_breakpoint(void) | |||
108 | #else | 108 | #else |
109 | # define CACHE_FLUSH_IS_SAFE 1 | 109 | # define CACHE_FLUSH_IS_SAFE 1 |
110 | #endif | 110 | #endif |
111 | #define GDB_ADJUSTS_BREAK_OFFSET | ||
111 | #define HW_INST_WATCHPOINT_NUM 6 | 112 | #define HW_INST_WATCHPOINT_NUM 6 |
112 | #define HW_WATCHPOINT_NUM 8 | 113 | #define HW_WATCHPOINT_NUM 8 |
113 | #define TYPE_INST_WATCHPOINT 0 | 114 | #define TYPE_INST_WATCHPOINT 0 |
diff --git a/arch/blackfin/include/asm/ptrace.h b/arch/blackfin/include/asm/ptrace.h index 1066d63e62b5..7854d4367c15 100644 --- a/arch/blackfin/include/asm/ptrace.h +++ b/arch/blackfin/include/asm/ptrace.h | |||
@@ -102,9 +102,6 @@ struct pt_regs { | |||
102 | /* user_mode returns true if only one bit is set in IPEND, other than the | 102 | /* user_mode returns true if only one bit is set in IPEND, other than the |
103 | master interrupt enable. */ | 103 | master interrupt enable. */ |
104 | #define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1))) | 104 | #define user_mode(regs) (!(((regs)->ipend & ~0x10) & (((regs)->ipend & ~0x10) - 1))) |
105 | #define instruction_pointer(regs) ((regs)->pc) | ||
106 | #define user_stack_pointer(regs) ((regs)->usp) | ||
107 | #define profile_pc(regs) instruction_pointer(regs) | ||
108 | extern void show_regs(struct pt_regs *); | 105 | extern void show_regs(struct pt_regs *); |
109 | 106 | ||
110 | #define arch_has_single_step() (1) | 107 | #define arch_has_single_step() (1) |
@@ -128,6 +125,8 @@ extern int is_user_addr_valid(struct task_struct *child, | |||
128 | ((unsigned long)task_stack_page(task) + \ | 125 | ((unsigned long)task_stack_page(task) + \ |
129 | (THREAD_SIZE - sizeof(struct pt_regs))) | 126 | (THREAD_SIZE - sizeof(struct pt_regs))) |
130 | 127 | ||
128 | #include <asm-generic/ptrace.h> | ||
129 | |||
131 | #endif /* __KERNEL__ */ | 130 | #endif /* __KERNEL__ */ |
132 | 131 | ||
133 | #endif /* __ASSEMBLY__ */ | 132 | #endif /* __ASSEMBLY__ */ |
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig index a6d03069d0ff..b6b94a27d276 100644 --- a/arch/cris/Kconfig +++ b/arch/cris/Kconfig | |||
@@ -31,10 +31,6 @@ config ARCH_HAS_ILOG2_U64 | |||
31 | bool | 31 | bool |
32 | default n | 32 | default n |
33 | 33 | ||
34 | config GENERIC_FIND_NEXT_BIT | ||
35 | bool | ||
36 | default y | ||
37 | |||
38 | config GENERIC_HWEIGHT | 34 | config GENERIC_HWEIGHT |
39 | bool | 35 | bool |
40 | default y | 36 | default y |
diff --git a/arch/frv/Kconfig b/arch/frv/Kconfig index 064f62196745..cb884e489425 100644 --- a/arch/frv/Kconfig +++ b/arch/frv/Kconfig | |||
@@ -19,14 +19,6 @@ config RWSEM_GENERIC_SPINLOCK | |||
19 | config RWSEM_XCHGADD_ALGORITHM | 19 | config RWSEM_XCHGADD_ALGORITHM |
20 | bool | 20 | bool |
21 | 21 | ||
22 | config GENERIC_FIND_NEXT_BIT | ||
23 | bool | ||
24 | default y | ||
25 | |||
26 | config GENERIC_FIND_BIT_LE | ||
27 | bool | ||
28 | default y | ||
29 | |||
30 | config GENERIC_HWEIGHT | 22 | config GENERIC_HWEIGHT |
31 | bool | 23 | bool |
32 | default y | 24 | default y |
diff --git a/arch/frv/include/asm/suspend.h b/arch/frv/include/asm/suspend.h deleted file mode 100644 index 5fa7b5a6ee40..000000000000 --- a/arch/frv/include/asm/suspend.h +++ /dev/null | |||
@@ -1,20 +0,0 @@ | |||
1 | /* suspend.h: suspension stuff | ||
2 | * | ||
3 | * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. | ||
4 | * Written by David Howells (dhowells@redhat.com) | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | */ | ||
11 | |||
12 | #ifndef _ASM_SUSPEND_H | ||
13 | #define _ASM_SUSPEND_H | ||
14 | |||
15 | static inline int arch_prepare_suspend(void) | ||
16 | { | ||
17 | return 0; | ||
18 | } | ||
19 | |||
20 | #endif /* _ASM_SUSPEND_H */ | ||
diff --git a/arch/h8300/Kconfig b/arch/h8300/Kconfig index e20322ffcaf8..091ed6192ae8 100644 --- a/arch/h8300/Kconfig +++ b/arch/h8300/Kconfig | |||
@@ -41,14 +41,6 @@ config ARCH_HAS_ILOG2_U64 | |||
41 | bool | 41 | bool |
42 | default n | 42 | default n |
43 | 43 | ||
44 | config GENERIC_FIND_NEXT_BIT | ||
45 | bool | ||
46 | default y | ||
47 | |||
48 | config GENERIC_FIND_BIT_LE | ||
49 | bool | ||
50 | default y | ||
51 | |||
52 | config GENERIC_HWEIGHT | 44 | config GENERIC_HWEIGHT |
53 | bool | 45 | bool |
54 | default y | 46 | default y |
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index e5cc56ae6ce3..38280ef4a2af 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -78,10 +78,6 @@ config HUGETLB_PAGE_SIZE_VARIABLE | |||
78 | depends on HUGETLB_PAGE | 78 | depends on HUGETLB_PAGE |
79 | default y | 79 | default y |
80 | 80 | ||
81 | config GENERIC_FIND_NEXT_BIT | ||
82 | bool | ||
83 | default y | ||
84 | |||
85 | config GENERIC_CALIBRATE_DELAY | 81 | config GENERIC_CALIBRATE_DELAY |
86 | bool | 82 | bool |
87 | default y | 83 | default y |
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c index 04440cc09b40..85118dfe9bb5 100644 --- a/arch/ia64/kernel/time.c +++ b/arch/ia64/kernel/time.c | |||
@@ -36,7 +36,7 @@ | |||
36 | static cycle_t itc_get_cycles(struct clocksource *cs); | 36 | static cycle_t itc_get_cycles(struct clocksource *cs); |
37 | 37 | ||
38 | struct fsyscall_gtod_data_t fsyscall_gtod_data = { | 38 | struct fsyscall_gtod_data_t fsyscall_gtod_data = { |
39 | .lock = SEQLOCK_UNLOCKED, | 39 | .lock = __SEQLOCK_UNLOCKED(fsyscall_gtod_data.lock), |
40 | }; | 40 | }; |
41 | 41 | ||
42 | struct itc_jitter_data_t itc_jitter_data; | 42 | struct itc_jitter_data_t itc_jitter_data; |
diff --git a/arch/m32r/Kconfig b/arch/m32r/Kconfig index 736b808d2291..85b44e858225 100644 --- a/arch/m32r/Kconfig +++ b/arch/m32r/Kconfig | |||
@@ -256,14 +256,6 @@ config ARCH_HAS_ILOG2_U64 | |||
256 | bool | 256 | bool |
257 | default n | 257 | default n |
258 | 258 | ||
259 | config GENERIC_FIND_NEXT_BIT | ||
260 | bool | ||
261 | default y | ||
262 | |||
263 | config GENERIC_FIND_BIT_LE | ||
264 | bool | ||
265 | default y | ||
266 | |||
267 | config GENERIC_HWEIGHT | 259 | config GENERIC_HWEIGHT |
268 | bool | 260 | bool |
269 | default y | 261 | default y |
diff --git a/arch/m32r/include/asm/smp.h b/arch/m32r/include/asm/smp.h index 8accc1bb0263..cf7829a61551 100644 --- a/arch/m32r/include/asm/smp.h +++ b/arch/m32r/include/asm/smp.h | |||
@@ -81,11 +81,11 @@ static __inline__ int cpu_number_map(int cpu) | |||
81 | 81 | ||
82 | static __inline__ unsigned int num_booting_cpus(void) | 82 | static __inline__ unsigned int num_booting_cpus(void) |
83 | { | 83 | { |
84 | return cpus_weight(cpu_callout_map); | 84 | return cpumask_weight(&cpu_callout_map); |
85 | } | 85 | } |
86 | 86 | ||
87 | extern void smp_send_timer(void); | 87 | extern void smp_send_timer(void); |
88 | extern unsigned long send_IPI_mask_phys(cpumask_t, int, int); | 88 | extern unsigned long send_IPI_mask_phys(const cpumask_t*, int, int); |
89 | 89 | ||
90 | extern void arch_send_call_function_single_ipi(int cpu); | 90 | extern void arch_send_call_function_single_ipi(int cpu); |
91 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); | 91 | extern void arch_send_call_function_ipi_mask(const struct cpumask *mask); |
diff --git a/arch/m32r/kernel/smp.c b/arch/m32r/kernel/smp.c index fc10b39893d4..092d40a6708e 100644 --- a/arch/m32r/kernel/smp.c +++ b/arch/m32r/kernel/smp.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include <asm/io.h> | 30 | #include <asm/io.h> |
31 | #include <asm/mmu_context.h> | 31 | #include <asm/mmu_context.h> |
32 | #include <asm/m32r.h> | 32 | #include <asm/m32r.h> |
33 | #include <asm/tlbflush.h> | ||
33 | 34 | ||
34 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 35 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
35 | /* Data structures and variables */ | 36 | /* Data structures and variables */ |
@@ -61,33 +62,22 @@ extern spinlock_t ipi_lock[]; | |||
61 | /* Function Prototypes */ | 62 | /* Function Prototypes */ |
62 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 63 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
63 | 64 | ||
64 | void smp_send_reschedule(int); | ||
65 | void smp_reschedule_interrupt(void); | 65 | void smp_reschedule_interrupt(void); |
66 | |||
67 | void smp_flush_cache_all(void); | ||
68 | void smp_flush_cache_all_interrupt(void); | 66 | void smp_flush_cache_all_interrupt(void); |
69 | 67 | ||
70 | void smp_flush_tlb_all(void); | ||
71 | static void flush_tlb_all_ipi(void *); | 68 | static void flush_tlb_all_ipi(void *); |
72 | |||
73 | void smp_flush_tlb_mm(struct mm_struct *); | ||
74 | void smp_flush_tlb_range(struct vm_area_struct *, unsigned long, \ | ||
75 | unsigned long); | ||
76 | void smp_flush_tlb_page(struct vm_area_struct *, unsigned long); | ||
77 | static void flush_tlb_others(cpumask_t, struct mm_struct *, | 69 | static void flush_tlb_others(cpumask_t, struct mm_struct *, |
78 | struct vm_area_struct *, unsigned long); | 70 | struct vm_area_struct *, unsigned long); |
71 | |||
79 | void smp_invalidate_interrupt(void); | 72 | void smp_invalidate_interrupt(void); |
80 | 73 | ||
81 | void smp_send_stop(void); | ||
82 | static void stop_this_cpu(void *); | 74 | static void stop_this_cpu(void *); |
83 | 75 | ||
84 | void smp_send_timer(void); | ||
85 | void smp_ipi_timer_interrupt(struct pt_regs *); | 76 | void smp_ipi_timer_interrupt(struct pt_regs *); |
86 | void smp_local_timer_interrupt(void); | 77 | void smp_local_timer_interrupt(void); |
87 | 78 | ||
88 | static void send_IPI_allbutself(int, int); | 79 | static void send_IPI_allbutself(int, int); |
89 | static void send_IPI_mask(const struct cpumask *, int, int); | 80 | static void send_IPI_mask(const struct cpumask *, int, int); |
90 | unsigned long send_IPI_mask_phys(cpumask_t, int, int); | ||
91 | 81 | ||
92 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 82 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
93 | /* Rescheduling request Routines */ | 83 | /* Rescheduling request Routines */ |
@@ -162,10 +152,10 @@ void smp_flush_cache_all(void) | |||
162 | unsigned long *mask; | 152 | unsigned long *mask; |
163 | 153 | ||
164 | preempt_disable(); | 154 | preempt_disable(); |
165 | cpumask = cpu_online_map; | 155 | cpumask_copy(&cpumask, cpu_online_mask); |
166 | cpu_clear(smp_processor_id(), cpumask); | 156 | cpumask_clear_cpu(smp_processor_id(), &cpumask); |
167 | spin_lock(&flushcache_lock); | 157 | spin_lock(&flushcache_lock); |
168 | mask=cpus_addr(cpumask); | 158 | mask=cpumask_bits(&cpumask); |
169 | atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); | 159 | atomic_set_mask(*mask, (atomic_t *)&flushcache_cpumask); |
170 | send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); | 160 | send_IPI_mask(&cpumask, INVALIDATE_CACHE_IPI, 0); |
171 | _flush_cache_copyback_all(); | 161 | _flush_cache_copyback_all(); |
@@ -263,8 +253,8 @@ void smp_flush_tlb_mm(struct mm_struct *mm) | |||
263 | preempt_disable(); | 253 | preempt_disable(); |
264 | cpu_id = smp_processor_id(); | 254 | cpu_id = smp_processor_id(); |
265 | mmc = &mm->context[cpu_id]; | 255 | mmc = &mm->context[cpu_id]; |
266 | cpu_mask = *mm_cpumask(mm); | 256 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
267 | cpu_clear(cpu_id, cpu_mask); | 257 | cpumask_clear_cpu(cpu_id, &cpu_mask); |
268 | 258 | ||
269 | if (*mmc != NO_CONTEXT) { | 259 | if (*mmc != NO_CONTEXT) { |
270 | local_irq_save(flags); | 260 | local_irq_save(flags); |
@@ -275,7 +265,7 @@ void smp_flush_tlb_mm(struct mm_struct *mm) | |||
275 | cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); | 265 | cpumask_clear_cpu(cpu_id, mm_cpumask(mm)); |
276 | local_irq_restore(flags); | 266 | local_irq_restore(flags); |
277 | } | 267 | } |
278 | if (!cpus_empty(cpu_mask)) | 268 | if (!cpumask_empty(&cpu_mask)) |
279 | flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL); | 269 | flush_tlb_others(cpu_mask, mm, NULL, FLUSH_ALL); |
280 | 270 | ||
281 | preempt_enable(); | 271 | preempt_enable(); |
@@ -333,8 +323,8 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |||
333 | preempt_disable(); | 323 | preempt_disable(); |
334 | cpu_id = smp_processor_id(); | 324 | cpu_id = smp_processor_id(); |
335 | mmc = &mm->context[cpu_id]; | 325 | mmc = &mm->context[cpu_id]; |
336 | cpu_mask = *mm_cpumask(mm); | 326 | cpumask_copy(&cpu_mask, mm_cpumask(mm)); |
337 | cpu_clear(cpu_id, cpu_mask); | 327 | cpumask_clear_cpu(cpu_id, &cpu_mask); |
338 | 328 | ||
339 | #ifdef DEBUG_SMP | 329 | #ifdef DEBUG_SMP |
340 | if (!mm) | 330 | if (!mm) |
@@ -348,7 +338,7 @@ void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long va) | |||
348 | __flush_tlb_page(va); | 338 | __flush_tlb_page(va); |
349 | local_irq_restore(flags); | 339 | local_irq_restore(flags); |
350 | } | 340 | } |
351 | if (!cpus_empty(cpu_mask)) | 341 | if (!cpumask_empty(&cpu_mask)) |
352 | flush_tlb_others(cpu_mask, mm, vma, va); | 342 | flush_tlb_others(cpu_mask, mm, vma, va); |
353 | 343 | ||
354 | preempt_enable(); | 344 | preempt_enable(); |
@@ -395,14 +385,14 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
395 | * - current CPU must not be in mask | 385 | * - current CPU must not be in mask |
396 | * - mask must exist :) | 386 | * - mask must exist :) |
397 | */ | 387 | */ |
398 | BUG_ON(cpus_empty(cpumask)); | 388 | BUG_ON(cpumask_empty(&cpumask)); |
399 | 389 | ||
400 | BUG_ON(cpu_isset(smp_processor_id(), cpumask)); | 390 | BUG_ON(cpumask_test_cpu(smp_processor_id(), &cpumask)); |
401 | BUG_ON(!mm); | 391 | BUG_ON(!mm); |
402 | 392 | ||
403 | /* If a CPU which we ran on has gone down, OK. */ | 393 | /* If a CPU which we ran on has gone down, OK. */ |
404 | cpus_and(cpumask, cpumask, cpu_online_map); | 394 | cpumask_and(&cpumask, &cpumask, cpu_online_mask); |
405 | if (cpus_empty(cpumask)) | 395 | if (cpumask_empty(&cpumask)) |
406 | return; | 396 | return; |
407 | 397 | ||
408 | /* | 398 | /* |
@@ -416,7 +406,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
416 | flush_mm = mm; | 406 | flush_mm = mm; |
417 | flush_vma = vma; | 407 | flush_vma = vma; |
418 | flush_va = va; | 408 | flush_va = va; |
419 | mask=cpus_addr(cpumask); | 409 | mask=cpumask_bits(&cpumask); |
420 | atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); | 410 | atomic_set_mask(*mask, (atomic_t *)&flush_cpumask); |
421 | 411 | ||
422 | /* | 412 | /* |
@@ -425,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm, | |||
425 | */ | 415 | */ |
426 | send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); | 416 | send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0); |
427 | 417 | ||
428 | while (!cpus_empty(flush_cpumask)) { | 418 | while (!cpumask_empty((cpumask_t*)&flush_cpumask)) { |
429 | /* nothing. lockup detection does not belong here */ | 419 | /* nothing. lockup detection does not belong here */ |
430 | mb(); | 420 | mb(); |
431 | } | 421 | } |
@@ -460,7 +450,7 @@ void smp_invalidate_interrupt(void) | |||
460 | int cpu_id = smp_processor_id(); | 450 | int cpu_id = smp_processor_id(); |
461 | unsigned long *mmc = &flush_mm->context[cpu_id]; | 451 | unsigned long *mmc = &flush_mm->context[cpu_id]; |
462 | 452 | ||
463 | if (!cpu_isset(cpu_id, flush_cpumask)) | 453 | if (!cpumask_test_cpu(cpu_id, &flush_cpumask)) |
464 | return; | 454 | return; |
465 | 455 | ||
466 | if (flush_va == FLUSH_ALL) { | 456 | if (flush_va == FLUSH_ALL) { |
@@ -478,7 +468,7 @@ void smp_invalidate_interrupt(void) | |||
478 | __flush_tlb_page(va); | 468 | __flush_tlb_page(va); |
479 | } | 469 | } |
480 | } | 470 | } |
481 | cpu_clear(cpu_id, flush_cpumask); | 471 | cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask); |
482 | } | 472 | } |
483 | 473 | ||
484 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 474 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
@@ -530,7 +520,7 @@ static void stop_this_cpu(void *dummy) | |||
530 | /* | 520 | /* |
531 | * Remove this CPU: | 521 | * Remove this CPU: |
532 | */ | 522 | */ |
533 | cpu_clear(cpu_id, cpu_online_map); | 523 | set_cpu_online(cpu_id, false); |
534 | 524 | ||
535 | /* | 525 | /* |
536 | * PSW IE = 1; | 526 | * PSW IE = 1; |
@@ -725,8 +715,8 @@ static void send_IPI_allbutself(int ipi_num, int try) | |||
725 | { | 715 | { |
726 | cpumask_t cpumask; | 716 | cpumask_t cpumask; |
727 | 717 | ||
728 | cpumask = cpu_online_map; | 718 | cpumask_copy(&cpumask, cpu_online_mask); |
729 | cpu_clear(smp_processor_id(), cpumask); | 719 | cpumask_clear_cpu(smp_processor_id(), &cpumask); |
730 | 720 | ||
731 | send_IPI_mask(&cpumask, ipi_num, try); | 721 | send_IPI_mask(&cpumask, ipi_num, try); |
732 | } | 722 | } |
@@ -763,13 +753,13 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) | |||
763 | cpumask_and(&tmp, cpumask, cpu_online_mask); | 753 | cpumask_and(&tmp, cpumask, cpu_online_mask); |
764 | BUG_ON(!cpumask_equal(cpumask, &tmp)); | 754 | BUG_ON(!cpumask_equal(cpumask, &tmp)); |
765 | 755 | ||
766 | physid_mask = CPU_MASK_NONE; | 756 | cpumask_clear(&physid_mask); |
767 | for_each_cpu(cpu_id, cpumask) { | 757 | for_each_cpu(cpu_id, cpumask) { |
768 | if ((phys_id = cpu_to_physid(cpu_id)) != -1) | 758 | if ((phys_id = cpu_to_physid(cpu_id)) != -1) |
769 | cpu_set(phys_id, physid_mask); | 759 | cpumask_set_cpu(phys_id, &physid_mask); |
770 | } | 760 | } |
771 | 761 | ||
772 | send_IPI_mask_phys(physid_mask, ipi_num, try); | 762 | send_IPI_mask_phys(&physid_mask, ipi_num, try); |
773 | } | 763 | } |
774 | 764 | ||
775 | /*==========================================================================* | 765 | /*==========================================================================* |
@@ -792,14 +782,14 @@ static void send_IPI_mask(const struct cpumask *cpumask, int ipi_num, int try) | |||
792 | * ---------- --- -------------------------------------------------------- | 782 | * ---------- --- -------------------------------------------------------- |
793 | * | 783 | * |
794 | *==========================================================================*/ | 784 | *==========================================================================*/ |
795 | unsigned long send_IPI_mask_phys(cpumask_t physid_mask, int ipi_num, | 785 | unsigned long send_IPI_mask_phys(const cpumask_t *physid_mask, int ipi_num, |
796 | int try) | 786 | int try) |
797 | { | 787 | { |
798 | spinlock_t *ipilock; | 788 | spinlock_t *ipilock; |
799 | volatile unsigned long *ipicr_addr; | 789 | volatile unsigned long *ipicr_addr; |
800 | unsigned long ipicr_val; | 790 | unsigned long ipicr_val; |
801 | unsigned long my_physid_mask; | 791 | unsigned long my_physid_mask; |
802 | unsigned long mask = cpus_addr(physid_mask)[0]; | 792 | unsigned long mask = cpumask_bits(physid_mask)[0]; |
803 | 793 | ||
804 | 794 | ||
805 | if (mask & ~physids_coerce(phys_cpu_present_map)) | 795 | if (mask & ~physids_coerce(phys_cpu_present_map)) |
diff --git a/arch/m32r/kernel/smpboot.c b/arch/m32r/kernel/smpboot.c index e034844cfc0d..cfdbe5d15002 100644 --- a/arch/m32r/kernel/smpboot.c +++ b/arch/m32r/kernel/smpboot.c | |||
@@ -135,9 +135,9 @@ void __devinit smp_prepare_boot_cpu(void) | |||
135 | { | 135 | { |
136 | bsp_phys_id = hard_smp_processor_id(); | 136 | bsp_phys_id = hard_smp_processor_id(); |
137 | physid_set(bsp_phys_id, phys_cpu_present_map); | 137 | physid_set(bsp_phys_id, phys_cpu_present_map); |
138 | cpu_set(0, cpu_online_map); /* BSP's cpu_id == 0 */ | 138 | set_cpu_online(0, true); /* BSP's cpu_id == 0 */ |
139 | cpu_set(0, cpu_callout_map); | 139 | cpumask_set_cpu(0, &cpu_callout_map); |
140 | cpu_set(0, cpu_callin_map); | 140 | cpumask_set_cpu(0, &cpu_callin_map); |
141 | 141 | ||
142 | /* | 142 | /* |
143 | * Initialize the logical to physical CPU number mapping | 143 | * Initialize the logical to physical CPU number mapping |
@@ -178,7 +178,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus) | |||
178 | for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) | 178 | for (phys_id = 0 ; phys_id < nr_cpu ; phys_id++) |
179 | physid_set(phys_id, phys_cpu_present_map); | 179 | physid_set(phys_id, phys_cpu_present_map); |
180 | #ifndef CONFIG_HOTPLUG_CPU | 180 | #ifndef CONFIG_HOTPLUG_CPU |
181 | init_cpu_present(&cpu_possible_map); | 181 | init_cpu_present(cpu_possible_mask); |
182 | #endif | 182 | #endif |
183 | 183 | ||
184 | show_mp_info(nr_cpu); | 184 | show_mp_info(nr_cpu); |
@@ -294,10 +294,10 @@ static void __init do_boot_cpu(int phys_id) | |||
294 | send_status = 0; | 294 | send_status = 0; |
295 | boot_status = 0; | 295 | boot_status = 0; |
296 | 296 | ||
297 | cpu_set(phys_id, cpu_bootout_map); | 297 | cpumask_set_cpu(phys_id, &cpu_bootout_map); |
298 | 298 | ||
299 | /* Send Startup IPI */ | 299 | /* Send Startup IPI */ |
300 | send_IPI_mask_phys(cpumask_of_cpu(phys_id), CPU_BOOT_IPI, 0); | 300 | send_IPI_mask_phys(cpumask_of(phys_id), CPU_BOOT_IPI, 0); |
301 | 301 | ||
302 | Dprintk("Waiting for send to finish...\n"); | 302 | Dprintk("Waiting for send to finish...\n"); |
303 | timeout = 0; | 303 | timeout = 0; |
@@ -306,7 +306,7 @@ static void __init do_boot_cpu(int phys_id) | |||
306 | do { | 306 | do { |
307 | Dprintk("+"); | 307 | Dprintk("+"); |
308 | udelay(1000); | 308 | udelay(1000); |
309 | send_status = !cpu_isset(phys_id, cpu_bootin_map); | 309 | send_status = !cpumask_test_cpu(phys_id, &cpu_bootin_map); |
310 | } while (send_status && (timeout++ < 100)); | 310 | } while (send_status && (timeout++ < 100)); |
311 | 311 | ||
312 | Dprintk("After Startup.\n"); | 312 | Dprintk("After Startup.\n"); |
@@ -316,19 +316,19 @@ static void __init do_boot_cpu(int phys_id) | |||
316 | * allow APs to start initializing. | 316 | * allow APs to start initializing. |
317 | */ | 317 | */ |
318 | Dprintk("Before Callout %d.\n", cpu_id); | 318 | Dprintk("Before Callout %d.\n", cpu_id); |
319 | cpu_set(cpu_id, cpu_callout_map); | 319 | cpumask_set_cpu(cpu_id, &cpu_callout_map); |
320 | Dprintk("After Callout %d.\n", cpu_id); | 320 | Dprintk("After Callout %d.\n", cpu_id); |
321 | 321 | ||
322 | /* | 322 | /* |
323 | * Wait 5s total for a response | 323 | * Wait 5s total for a response |
324 | */ | 324 | */ |
325 | for (timeout = 0; timeout < 5000; timeout++) { | 325 | for (timeout = 0; timeout < 5000; timeout++) { |
326 | if (cpu_isset(cpu_id, cpu_callin_map)) | 326 | if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) |
327 | break; /* It has booted */ | 327 | break; /* It has booted */ |
328 | udelay(1000); | 328 | udelay(1000); |
329 | } | 329 | } |
330 | 330 | ||
331 | if (cpu_isset(cpu_id, cpu_callin_map)) { | 331 | if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) { |
332 | /* number CPUs logically, starting from 1 (BSP is 0) */ | 332 | /* number CPUs logically, starting from 1 (BSP is 0) */ |
333 | Dprintk("OK.\n"); | 333 | Dprintk("OK.\n"); |
334 | } else { | 334 | } else { |
@@ -340,9 +340,9 @@ static void __init do_boot_cpu(int phys_id) | |||
340 | 340 | ||
341 | if (send_status || boot_status) { | 341 | if (send_status || boot_status) { |
342 | unmap_cpu_to_physid(cpu_id, phys_id); | 342 | unmap_cpu_to_physid(cpu_id, phys_id); |
343 | cpu_clear(cpu_id, cpu_callout_map); | 343 | cpumask_clear_cpu(cpu_id, &cpu_callout_map); |
344 | cpu_clear(cpu_id, cpu_callin_map); | 344 | cpumask_clear_cpu(cpu_id, &cpu_callin_map); |
345 | cpu_clear(cpu_id, cpu_initialized); | 345 | cpumask_clear_cpu(cpu_id, &cpu_initialized); |
346 | cpucount--; | 346 | cpucount--; |
347 | } | 347 | } |
348 | } | 348 | } |
@@ -351,17 +351,17 @@ int __cpuinit __cpu_up(unsigned int cpu_id) | |||
351 | { | 351 | { |
352 | int timeout; | 352 | int timeout; |
353 | 353 | ||
354 | cpu_set(cpu_id, smp_commenced_mask); | 354 | cpumask_set_cpu(cpu_id, &smp_commenced_mask); |
355 | 355 | ||
356 | /* | 356 | /* |
357 | * Wait 5s total for a response | 357 | * Wait 5s total for a response |
358 | */ | 358 | */ |
359 | for (timeout = 0; timeout < 5000; timeout++) { | 359 | for (timeout = 0; timeout < 5000; timeout++) { |
360 | if (cpu_isset(cpu_id, cpu_online_map)) | 360 | if (cpu_online(cpu_id)) |
361 | break; | 361 | break; |
362 | udelay(1000); | 362 | udelay(1000); |
363 | } | 363 | } |
364 | if (!cpu_isset(cpu_id, cpu_online_map)) | 364 | if (!cpu_online(cpu_id)) |
365 | BUG(); | 365 | BUG(); |
366 | 366 | ||
367 | return 0; | 367 | return 0; |
@@ -373,11 +373,11 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
373 | unsigned long bogosum = 0; | 373 | unsigned long bogosum = 0; |
374 | 374 | ||
375 | for (timeout = 0; timeout < 5000; timeout++) { | 375 | for (timeout = 0; timeout < 5000; timeout++) { |
376 | if (cpus_equal(cpu_callin_map, cpu_online_map)) | 376 | if (cpumask_equal(&cpu_callin_map, cpu_online_mask)) |
377 | break; | 377 | break; |
378 | udelay(1000); | 378 | udelay(1000); |
379 | } | 379 | } |
380 | if (!cpus_equal(cpu_callin_map, cpu_online_map)) | 380 | if (!cpumask_equal(&cpu_callin_map, cpu_online_mask)) |
381 | BUG(); | 381 | BUG(); |
382 | 382 | ||
383 | for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++) | 383 | for (cpu_id = 0 ; cpu_id < num_online_cpus() ; cpu_id++) |
@@ -388,7 +388,7 @@ void __init smp_cpus_done(unsigned int max_cpus) | |||
388 | */ | 388 | */ |
389 | Dprintk("Before bogomips.\n"); | 389 | Dprintk("Before bogomips.\n"); |
390 | if (cpucount) { | 390 | if (cpucount) { |
391 | for_each_cpu_mask(cpu_id, cpu_online_map) | 391 | for_each_cpu(cpu_id,cpu_online_mask) |
392 | bogosum += cpu_data[cpu_id].loops_per_jiffy; | 392 | bogosum += cpu_data[cpu_id].loops_per_jiffy; |
393 | 393 | ||
394 | printk(KERN_INFO "Total of %d processors activated " \ | 394 | printk(KERN_INFO "Total of %d processors activated " \ |
@@ -425,7 +425,7 @@ int __init start_secondary(void *unused) | |||
425 | cpu_init(); | 425 | cpu_init(); |
426 | preempt_disable(); | 426 | preempt_disable(); |
427 | smp_callin(); | 427 | smp_callin(); |
428 | while (!cpu_isset(smp_processor_id(), smp_commenced_mask)) | 428 | while (!cpumask_test_cpu(smp_processor_id(), &smp_commenced_mask)) |
429 | cpu_relax(); | 429 | cpu_relax(); |
430 | 430 | ||
431 | smp_online(); | 431 | smp_online(); |
@@ -463,7 +463,7 @@ static void __init smp_callin(void) | |||
463 | int cpu_id = smp_processor_id(); | 463 | int cpu_id = smp_processor_id(); |
464 | unsigned long timeout; | 464 | unsigned long timeout; |
465 | 465 | ||
466 | if (cpu_isset(cpu_id, cpu_callin_map)) { | 466 | if (cpumask_test_cpu(cpu_id, &cpu_callin_map)) { |
467 | printk("huh, phys CPU#%d, CPU#%d already present??\n", | 467 | printk("huh, phys CPU#%d, CPU#%d already present??\n", |
468 | phys_id, cpu_id); | 468 | phys_id, cpu_id); |
469 | BUG(); | 469 | BUG(); |
@@ -474,7 +474,7 @@ static void __init smp_callin(void) | |||
474 | timeout = jiffies + (2 * HZ); | 474 | timeout = jiffies + (2 * HZ); |
475 | while (time_before(jiffies, timeout)) { | 475 | while (time_before(jiffies, timeout)) { |
476 | /* Has the boot CPU finished it's STARTUP sequence ? */ | 476 | /* Has the boot CPU finished it's STARTUP sequence ? */ |
477 | if (cpu_isset(cpu_id, cpu_callout_map)) | 477 | if (cpumask_test_cpu(cpu_id, &cpu_callout_map)) |
478 | break; | 478 | break; |
479 | cpu_relax(); | 479 | cpu_relax(); |
480 | } | 480 | } |
@@ -486,7 +486,7 @@ static void __init smp_callin(void) | |||
486 | } | 486 | } |
487 | 487 | ||
488 | /* Allow the master to continue. */ | 488 | /* Allow the master to continue. */ |
489 | cpu_set(cpu_id, cpu_callin_map); | 489 | cpumask_set_cpu(cpu_id, &cpu_callin_map); |
490 | } | 490 | } |
491 | 491 | ||
492 | static void __init smp_online(void) | 492 | static void __init smp_online(void) |
@@ -503,7 +503,7 @@ static void __init smp_online(void) | |||
503 | /* Save our processor parameters */ | 503 | /* Save our processor parameters */ |
504 | smp_store_cpu_info(cpu_id); | 504 | smp_store_cpu_info(cpu_id); |
505 | 505 | ||
506 | cpu_set(cpu_id, cpu_online_map); | 506 | set_cpu_online(cpu_id, true); |
507 | } | 507 | } |
508 | 508 | ||
509 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ | 509 | /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ |
diff --git a/arch/m68k/Kconfig.nommu b/arch/m68k/Kconfig.nommu index 273bccab9517..fc98f9b9d4d2 100644 --- a/arch/m68k/Kconfig.nommu +++ b/arch/m68k/Kconfig.nommu | |||
@@ -2,10 +2,6 @@ config FPU | |||
2 | bool | 2 | bool |
3 | default n | 3 | default n |
4 | 4 | ||
5 | config GENERIC_FIND_NEXT_BIT | ||
6 | bool | ||
7 | default y | ||
8 | |||
9 | config GENERIC_GPIO | 5 | config GENERIC_GPIO |
10 | bool | 6 | bool |
11 | default n | 7 | default n |
diff --git a/arch/m68k/include/asm/bitops_mm.h b/arch/m68k/include/asm/bitops_mm.h index e9020f88a748..89cf5b814a4d 100644 --- a/arch/m68k/include/asm/bitops_mm.h +++ b/arch/m68k/include/asm/bitops_mm.h | |||
@@ -200,6 +200,7 @@ out: | |||
200 | res += ((long)p - (long)vaddr - 4) * 8; | 200 | res += ((long)p - (long)vaddr - 4) * 8; |
201 | return res < size ? res : size; | 201 | return res < size ? res : size; |
202 | } | 202 | } |
203 | #define find_first_zero_bit find_first_zero_bit | ||
203 | 204 | ||
204 | static inline int find_next_zero_bit(const unsigned long *vaddr, int size, | 205 | static inline int find_next_zero_bit(const unsigned long *vaddr, int size, |
205 | int offset) | 206 | int offset) |
@@ -229,6 +230,7 @@ static inline int find_next_zero_bit(const unsigned long *vaddr, int size, | |||
229 | /* No zero yet, search remaining full bytes for a zero */ | 230 | /* No zero yet, search remaining full bytes for a zero */ |
230 | return offset + find_first_zero_bit(p, size - offset); | 231 | return offset + find_first_zero_bit(p, size - offset); |
231 | } | 232 | } |
233 | #define find_next_zero_bit find_next_zero_bit | ||
232 | 234 | ||
233 | static inline int find_first_bit(const unsigned long *vaddr, unsigned size) | 235 | static inline int find_first_bit(const unsigned long *vaddr, unsigned size) |
234 | { | 236 | { |
@@ -253,6 +255,7 @@ out: | |||
253 | res += ((long)p - (long)vaddr - 4) * 8; | 255 | res += ((long)p - (long)vaddr - 4) * 8; |
254 | return res < size ? res : size; | 256 | return res < size ? res : size; |
255 | } | 257 | } |
258 | #define find_first_bit find_first_bit | ||
256 | 259 | ||
257 | static inline int find_next_bit(const unsigned long *vaddr, int size, | 260 | static inline int find_next_bit(const unsigned long *vaddr, int size, |
258 | int offset) | 261 | int offset) |
@@ -282,6 +285,7 @@ static inline int find_next_bit(const unsigned long *vaddr, int size, | |||
282 | /* No one yet, search remaining full bytes for a one */ | 285 | /* No one yet, search remaining full bytes for a one */ |
283 | return offset + find_first_bit(p, size - offset); | 286 | return offset + find_first_bit(p, size - offset); |
284 | } | 287 | } |
288 | #define find_next_bit find_next_bit | ||
285 | 289 | ||
286 | /* | 290 | /* |
287 | * ffz = Find First Zero in word. Undefined if no zero exists, | 291 | * ffz = Find First Zero in word. Undefined if no zero exists, |
@@ -398,6 +402,7 @@ out: | |||
398 | res += (p - addr) * 32; | 402 | res += (p - addr) * 32; |
399 | return res < size ? res : size; | 403 | return res < size ? res : size; |
400 | } | 404 | } |
405 | #define find_first_zero_bit_le find_first_zero_bit_le | ||
401 | 406 | ||
402 | static inline unsigned long find_next_zero_bit_le(const void *addr, | 407 | static inline unsigned long find_next_zero_bit_le(const void *addr, |
403 | unsigned long size, unsigned long offset) | 408 | unsigned long size, unsigned long offset) |
@@ -427,6 +432,7 @@ static inline unsigned long find_next_zero_bit_le(const void *addr, | |||
427 | /* No zero yet, search remaining full bytes for a zero */ | 432 | /* No zero yet, search remaining full bytes for a zero */ |
428 | return offset + find_first_zero_bit_le(p, size - offset); | 433 | return offset + find_first_zero_bit_le(p, size - offset); |
429 | } | 434 | } |
435 | #define find_next_zero_bit_le find_next_zero_bit_le | ||
430 | 436 | ||
431 | static inline int find_first_bit_le(const void *vaddr, unsigned size) | 437 | static inline int find_first_bit_le(const void *vaddr, unsigned size) |
432 | { | 438 | { |
@@ -451,6 +457,7 @@ out: | |||
451 | res += (p - addr) * 32; | 457 | res += (p - addr) * 32; |
452 | return res < size ? res : size; | 458 | return res < size ? res : size; |
453 | } | 459 | } |
460 | #define find_first_bit_le find_first_bit_le | ||
454 | 461 | ||
455 | static inline unsigned long find_next_bit_le(const void *addr, | 462 | static inline unsigned long find_next_bit_le(const void *addr, |
456 | unsigned long size, unsigned long offset) | 463 | unsigned long size, unsigned long offset) |
@@ -480,6 +487,7 @@ static inline unsigned long find_next_bit_le(const void *addr, | |||
480 | /* No set bit yet, search remaining full bytes for a set bit */ | 487 | /* No set bit yet, search remaining full bytes for a set bit */ |
481 | return offset + find_first_bit_le(p, size - offset); | 488 | return offset + find_first_bit_le(p, size - offset); |
482 | } | 489 | } |
490 | #define find_next_bit_le find_next_bit_le | ||
483 | 491 | ||
484 | /* Bitmap functions for the ext2 filesystem. */ | 492 | /* Bitmap functions for the ext2 filesystem. */ |
485 | 493 | ||
diff --git a/arch/m68k/include/asm/bitops_no.h b/arch/m68k/include/asm/bitops_no.h index 6b0e2d349f0e..72e85acdd7bd 100644 --- a/arch/m68k/include/asm/bitops_no.h +++ b/arch/m68k/include/asm/bitops_no.h | |||
@@ -319,6 +319,10 @@ found_first: | |||
319 | found_middle: | 319 | found_middle: |
320 | return result + ffz(__swab32(tmp)); | 320 | return result + ffz(__swab32(tmp)); |
321 | } | 321 | } |
322 | #define find_next_zero_bit_le find_next_zero_bit_le | ||
323 | |||
324 | extern unsigned long find_next_bit_le(const void *addr, | ||
325 | unsigned long size, unsigned long offset); | ||
322 | 326 | ||
323 | #endif /* __KERNEL__ */ | 327 | #endif /* __KERNEL__ */ |
324 | 328 | ||
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig index eccdefe70d4e..e446bab2427b 100644 --- a/arch/microblaze/Kconfig +++ b/arch/microblaze/Kconfig | |||
@@ -33,12 +33,6 @@ config ARCH_HAS_ILOG2_U32 | |||
33 | config ARCH_HAS_ILOG2_U64 | 33 | config ARCH_HAS_ILOG2_U64 |
34 | def_bool n | 34 | def_bool n |
35 | 35 | ||
36 | config GENERIC_FIND_NEXT_BIT | ||
37 | def_bool y | ||
38 | |||
39 | config GENERIC_FIND_BIT_LE | ||
40 | def_bool y | ||
41 | |||
42 | config GENERIC_HWEIGHT | 36 | config GENERIC_HWEIGHT |
43 | def_bool y | 37 | def_bool y |
44 | 38 | ||
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig index cef1a854487d..653da62d0682 100644 --- a/arch/mips/Kconfig +++ b/arch/mips/Kconfig | |||
@@ -821,14 +821,6 @@ config ARCH_SUPPORTS_OPROFILE | |||
821 | bool | 821 | bool |
822 | default y if !MIPS_MT_SMTC | 822 | default y if !MIPS_MT_SMTC |
823 | 823 | ||
824 | config GENERIC_FIND_NEXT_BIT | ||
825 | bool | ||
826 | default y | ||
827 | |||
828 | config GENERIC_FIND_BIT_LE | ||
829 | bool | ||
830 | default y | ||
831 | |||
832 | config GENERIC_HWEIGHT | 824 | config GENERIC_HWEIGHT |
833 | bool | 825 | bool |
834 | default y | 826 | default y |
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig index 22fdf2f0cc23..ad15fb10322b 100644 --- a/arch/mips/configs/bcm47xx_defconfig +++ b/arch/mips/configs/bcm47xx_defconfig | |||
@@ -16,7 +16,6 @@ CONFIG_TASK_IO_ACCOUNTING=y | |||
16 | CONFIG_AUDIT=y | 16 | CONFIG_AUDIT=y |
17 | CONFIG_TINY_RCU=y | 17 | CONFIG_TINY_RCU=y |
18 | CONFIG_CGROUPS=y | 18 | CONFIG_CGROUPS=y |
19 | CONFIG_CGROUP_NS=y | ||
20 | CONFIG_CGROUP_CPUACCT=y | 19 | CONFIG_CGROUP_CPUACCT=y |
21 | CONFIG_RELAY=y | 20 | CONFIG_RELAY=y |
22 | CONFIG_BLK_DEV_INITRD=y | 21 | CONFIG_BLK_DEV_INITRD=y |
diff --git a/arch/mips/include/asm/suspend.h b/arch/mips/include/asm/suspend.h index 294cdb66c5fc..3adac3b53d19 100644 --- a/arch/mips/include/asm/suspend.h +++ b/arch/mips/include/asm/suspend.h | |||
@@ -1,8 +1,6 @@ | |||
1 | #ifndef __ASM_SUSPEND_H | 1 | #ifndef __ASM_SUSPEND_H |
2 | #define __ASM_SUSPEND_H | 2 | #define __ASM_SUSPEND_H |
3 | 3 | ||
4 | static inline int arch_prepare_suspend(void) { return 0; } | ||
5 | |||
6 | /* References to section boundaries */ | 4 | /* References to section boundaries */ |
7 | extern const void __nosave_begin, __nosave_end; | 5 | extern const void __nosave_begin, __nosave_end; |
8 | 6 | ||
diff --git a/arch/mn10300/Kconfig b/arch/mn10300/Kconfig index feaf09cc8632..1f870340ebdd 100644 --- a/arch/mn10300/Kconfig +++ b/arch/mn10300/Kconfig | |||
@@ -44,9 +44,6 @@ config GENERIC_CALIBRATE_DELAY | |||
44 | config GENERIC_CMOS_UPDATE | 44 | config GENERIC_CMOS_UPDATE |
45 | def_bool n | 45 | def_bool n |
46 | 46 | ||
47 | config GENERIC_FIND_NEXT_BIT | ||
48 | def_bool y | ||
49 | |||
50 | config GENERIC_HWEIGHT | 47 | config GENERIC_HWEIGHT |
51 | def_bool y | 48 | def_bool y |
52 | 49 | ||
diff --git a/arch/mn10300/configs/asb2364_defconfig b/arch/mn10300/configs/asb2364_defconfig index 31d76261a3d5..fbb96ae3122a 100644 --- a/arch/mn10300/configs/asb2364_defconfig +++ b/arch/mn10300/configs/asb2364_defconfig | |||
@@ -8,7 +8,6 @@ CONFIG_TASK_XACCT=y | |||
8 | CONFIG_TASK_IO_ACCOUNTING=y | 8 | CONFIG_TASK_IO_ACCOUNTING=y |
9 | CONFIG_LOG_BUF_SHIFT=14 | 9 | CONFIG_LOG_BUF_SHIFT=14 |
10 | CONFIG_CGROUPS=y | 10 | CONFIG_CGROUPS=y |
11 | CONFIG_CGROUP_NS=y | ||
12 | CONFIG_CGROUP_FREEZER=y | 11 | CONFIG_CGROUP_FREEZER=y |
13 | CONFIG_CGROUP_DEVICE=y | 12 | CONFIG_CGROUP_DEVICE=y |
14 | CONFIG_CGROUP_CPUACCT=y | 13 | CONFIG_CGROUP_CPUACCT=y |
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig index 69ff049c8571..65adc86a230e 100644 --- a/arch/parisc/Kconfig +++ b/arch/parisc/Kconfig | |||
@@ -47,14 +47,6 @@ config ARCH_HAS_ILOG2_U64 | |||
47 | bool | 47 | bool |
48 | default n | 48 | default n |
49 | 49 | ||
50 | config GENERIC_FIND_NEXT_BIT | ||
51 | bool | ||
52 | default y | ||
53 | |||
54 | config GENERIC_FIND_BIT_LE | ||
55 | bool | ||
56 | default y | ||
57 | |||
58 | config GENERIC_BUG | 50 | config GENERIC_BUG |
59 | bool | 51 | bool |
60 | default y | 52 | default y |
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 423145a6f7ba..2729c6663d8a 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -91,14 +91,6 @@ config GENERIC_HWEIGHT | |||
91 | bool | 91 | bool |
92 | default y | 92 | default y |
93 | 93 | ||
94 | config GENERIC_FIND_NEXT_BIT | ||
95 | bool | ||
96 | default y | ||
97 | |||
98 | config GENERIC_FIND_BIT_LE | ||
99 | bool | ||
100 | default y | ||
101 | |||
102 | config GENERIC_GPIO | 94 | config GENERIC_GPIO |
103 | bool | 95 | bool |
104 | help | 96 | help |
@@ -141,6 +133,7 @@ config PPC | |||
141 | select GENERIC_IRQ_SHOW | 133 | select GENERIC_IRQ_SHOW |
142 | select GENERIC_IRQ_SHOW_LEVEL | 134 | select GENERIC_IRQ_SHOW_LEVEL |
143 | select HAVE_RCU_TABLE_FREE if SMP | 135 | select HAVE_RCU_TABLE_FREE if SMP |
136 | select HAVE_SYSCALL_TRACEPOINTS | ||
144 | 137 | ||
145 | config EARLY_PRINTK | 138 | config EARLY_PRINTK |
146 | bool | 139 | bool |
diff --git a/arch/powerpc/boot/dts/canyonlands.dts b/arch/powerpc/boot/dts/canyonlands.dts index 2779f08313a5..22dd6ae84da0 100644 --- a/arch/powerpc/boot/dts/canyonlands.dts +++ b/arch/powerpc/boot/dts/canyonlands.dts | |||
@@ -530,5 +530,23 @@ | |||
530 | 0x0 0x0 0x0 0x3 &UIC3 0x12 0x4 /* swizzled int C */ | 530 | 0x0 0x0 0x0 0x3 &UIC3 0x12 0x4 /* swizzled int C */ |
531 | 0x0 0x0 0x0 0x4 &UIC3 0x13 0x4 /* swizzled int D */>; | 531 | 0x0 0x0 0x0 0x4 &UIC3 0x13 0x4 /* swizzled int D */>; |
532 | }; | 532 | }; |
533 | |||
534 | MSI: ppc4xx-msi@C10000000 { | ||
535 | compatible = "amcc,ppc4xx-msi", "ppc4xx-msi"; | ||
536 | reg = < 0xC 0x10000000 0x100>; | ||
537 | sdr-base = <0x36C>; | ||
538 | msi-data = <0x00000000>; | ||
539 | msi-mask = <0x44440000>; | ||
540 | interrupt-count = <3>; | ||
541 | interrupts = <0 1 2 3>; | ||
542 | interrupt-parent = <&UIC3>; | ||
543 | #interrupt-cells = <1>; | ||
544 | #address-cells = <0>; | ||
545 | #size-cells = <0>; | ||
546 | interrupt-map = <0 &UIC3 0x18 1 | ||
547 | 1 &UIC3 0x19 1 | ||
548 | 2 &UIC3 0x1A 1 | ||
549 | 3 &UIC3 0x1B 1>; | ||
550 | }; | ||
533 | }; | 551 | }; |
534 | }; | 552 | }; |
diff --git a/arch/powerpc/boot/dts/katmai.dts b/arch/powerpc/boot/dts/katmai.dts index 7c3be5e45748..f913dbe25d35 100644 --- a/arch/powerpc/boot/dts/katmai.dts +++ b/arch/powerpc/boot/dts/katmai.dts | |||
@@ -442,6 +442,24 @@ | |||
442 | 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>; | 442 | 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>; |
443 | }; | 443 | }; |
444 | 444 | ||
445 | MSI: ppc4xx-msi@400300000 { | ||
446 | compatible = "amcc,ppc4xx-msi", "ppc4xx-msi"; | ||
447 | reg = < 0x4 0x00300000 0x100>; | ||
448 | sdr-base = <0x3B0>; | ||
449 | msi-data = <0x00000000>; | ||
450 | msi-mask = <0x44440000>; | ||
451 | interrupt-count = <3>; | ||
452 | interrupts =<0 1 2 3>; | ||
453 | interrupt-parent = <&UIC0>; | ||
454 | #interrupt-cells = <1>; | ||
455 | #address-cells = <0>; | ||
456 | #size-cells = <0>; | ||
457 | interrupt-map = <0 &UIC0 0xC 1 | ||
458 | 1 &UIC0 0x0D 1 | ||
459 | 2 &UIC0 0x0E 1 | ||
460 | 3 &UIC0 0x0F 1>; | ||
461 | }; | ||
462 | |||
445 | I2O: i2o@400100000 { | 463 | I2O: i2o@400100000 { |
446 | compatible = "ibm,i2o-440spe"; | 464 | compatible = "ibm,i2o-440spe"; |
447 | reg = <0x00000004 0x00100000 0x100>; | 465 | reg = <0x00000004 0x00100000 0x100>; |
diff --git a/arch/powerpc/boot/dts/kilauea.dts b/arch/powerpc/boot/dts/kilauea.dts index 89edb16649c3..1613d6e4049e 100644 --- a/arch/powerpc/boot/dts/kilauea.dts +++ b/arch/powerpc/boot/dts/kilauea.dts | |||
@@ -403,5 +403,33 @@ | |||
403 | 0x0 0x0 0x0 0x3 &UIC2 0xd 0x4 /* swizzled int C */ | 403 | 0x0 0x0 0x0 0x3 &UIC2 0xd 0x4 /* swizzled int C */ |
404 | 0x0 0x0 0x0 0x4 &UIC2 0xe 0x4 /* swizzled int D */>; | 404 | 0x0 0x0 0x0 0x4 &UIC2 0xe 0x4 /* swizzled int D */>; |
405 | }; | 405 | }; |
406 | |||
407 | MSI: ppc4xx-msi@C10000000 { | ||
408 | compatible = "amcc,ppc4xx-msi", "ppc4xx-msi"; | ||
409 | reg = < 0x0 0xEF620000 0x100>; | ||
410 | sdr-base = <0x4B0>; | ||
411 | msi-data = <0x00000000>; | ||
412 | msi-mask = <0x44440000>; | ||
413 | interrupt-count = <12>; | ||
414 | interrupts = <0 1 2 3 4 5 6 7 8 9 0xA 0xB 0xC 0xD>; | ||
415 | interrupt-parent = <&UIC2>; | ||
416 | #interrupt-cells = <1>; | ||
417 | #address-cells = <0>; | ||
418 | #size-cells = <0>; | ||
419 | interrupt-map = <0 &UIC2 0x10 1 | ||
420 | 1 &UIC2 0x11 1 | ||
421 | 2 &UIC2 0x12 1 | ||
422 | 2 &UIC2 0x13 1 | ||
423 | 2 &UIC2 0x14 1 | ||
424 | 2 &UIC2 0x15 1 | ||
425 | 2 &UIC2 0x16 1 | ||
426 | 2 &UIC2 0x17 1 | ||
427 | 2 &UIC2 0x18 1 | ||
428 | 2 &UIC2 0x19 1 | ||
429 | 2 &UIC2 0x1A 1 | ||
430 | 2 &UIC2 0x1B 1 | ||
431 | 2 &UIC2 0x1C 1 | ||
432 | 3 &UIC2 0x1D 1>; | ||
433 | }; | ||
406 | }; | 434 | }; |
407 | }; | 435 | }; |
diff --git a/arch/powerpc/boot/dts/redwood.dts b/arch/powerpc/boot/dts/redwood.dts index 81636c01d906..d86a3a498118 100644 --- a/arch/powerpc/boot/dts/redwood.dts +++ b/arch/powerpc/boot/dts/redwood.dts | |||
@@ -358,8 +358,28 @@ | |||
358 | 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>; | 358 | 0x0 0x0 0x0 0x4 &UIC3 0xb 0x4 /* swizzled int D */>; |
359 | }; | 359 | }; |
360 | 360 | ||
361 | MSI: ppc4xx-msi@400300000 { | ||
362 | compatible = "amcc,ppc4xx-msi", "ppc4xx-msi"; | ||
363 | reg = < 0x4 0x00300000 0x100 | ||
364 | 0x4 0x00300000 0x100>; | ||
365 | sdr-base = <0x3B0>; | ||
366 | msi-data = <0x00000000>; | ||
367 | msi-mask = <0x44440000>; | ||
368 | interrupt-count = <3>; | ||
369 | interrupts =<0 1 2 3>; | ||
370 | interrupt-parent = <&UIC0>; | ||
371 | #interrupt-cells = <1>; | ||
372 | #address-cells = <0>; | ||
373 | #size-cells = <0>; | ||
374 | interrupt-map = <0 &UIC0 0xC 1 | ||
375 | 1 &UIC0 0x0D 1 | ||
376 | 2 &UIC0 0x0E 1 | ||
377 | 3 &UIC0 0x0F 1>; | ||
378 | }; | ||
379 | |||
361 | }; | 380 | }; |
362 | 381 | ||
382 | |||
363 | chosen { | 383 | chosen { |
364 | linux,stdout-path = "/plb/opb/serial@ef600200"; | 384 | linux,stdout-path = "/plb/opb/serial@ef600200"; |
365 | }; | 385 | }; |
diff --git a/arch/powerpc/configs/ppc6xx_defconfig b/arch/powerpc/configs/ppc6xx_defconfig index 214208924a9c..04360f9b0109 100644 --- a/arch/powerpc/configs/ppc6xx_defconfig +++ b/arch/powerpc/configs/ppc6xx_defconfig | |||
@@ -10,7 +10,6 @@ CONFIG_TASK_XACCT=y | |||
10 | CONFIG_TASK_IO_ACCOUNTING=y | 10 | CONFIG_TASK_IO_ACCOUNTING=y |
11 | CONFIG_AUDIT=y | 11 | CONFIG_AUDIT=y |
12 | CONFIG_CGROUPS=y | 12 | CONFIG_CGROUPS=y |
13 | CONFIG_CGROUP_NS=y | ||
14 | CONFIG_CGROUP_DEVICE=y | 13 | CONFIG_CGROUP_DEVICE=y |
15 | CONFIG_CGROUP_CPUACCT=y | 14 | CONFIG_CGROUP_CPUACCT=y |
16 | CONFIG_RESOURCE_COUNTERS=y | 15 | CONFIG_RESOURCE_COUNTERS=y |
diff --git a/arch/powerpc/configs/pseries_defconfig b/arch/powerpc/configs/pseries_defconfig index 7de13865508c..c9f212b5f3de 100644 --- a/arch/powerpc/configs/pseries_defconfig +++ b/arch/powerpc/configs/pseries_defconfig | |||
@@ -15,7 +15,6 @@ CONFIG_AUDITSYSCALL=y | |||
15 | CONFIG_IKCONFIG=y | 15 | CONFIG_IKCONFIG=y |
16 | CONFIG_IKCONFIG_PROC=y | 16 | CONFIG_IKCONFIG_PROC=y |
17 | CONFIG_CGROUPS=y | 17 | CONFIG_CGROUPS=y |
18 | CONFIG_CGROUP_NS=y | ||
19 | CONFIG_CGROUP_FREEZER=y | 18 | CONFIG_CGROUP_FREEZER=y |
20 | CONFIG_CGROUP_DEVICE=y | 19 | CONFIG_CGROUP_DEVICE=y |
21 | CONFIG_CPUSETS=y | 20 | CONFIG_CPUSETS=y |
diff --git a/arch/powerpc/include/asm/fsl_lbc.h b/arch/powerpc/include/asm/fsl_lbc.h index 5c1bf3466749..8a0b5ece8f76 100644 --- a/arch/powerpc/include/asm/fsl_lbc.h +++ b/arch/powerpc/include/asm/fsl_lbc.h | |||
@@ -157,6 +157,8 @@ struct fsl_lbc_regs { | |||
157 | #define LBCR_EPAR_SHIFT 16 | 157 | #define LBCR_EPAR_SHIFT 16 |
158 | #define LBCR_BMT 0x0000FF00 | 158 | #define LBCR_BMT 0x0000FF00 |
159 | #define LBCR_BMT_SHIFT 8 | 159 | #define LBCR_BMT_SHIFT 8 |
160 | #define LBCR_BMTPS 0x0000000F | ||
161 | #define LBCR_BMTPS_SHIFT 0 | ||
160 | #define LBCR_INIT 0x00040000 | 162 | #define LBCR_INIT 0x00040000 |
161 | __be32 lcrr; /**< Clock Ratio Register */ | 163 | __be32 lcrr; /**< Clock Ratio Register */ |
162 | #define LCRR_DBYP 0x80000000 | 164 | #define LCRR_DBYP 0x80000000 |
diff --git a/arch/powerpc/include/asm/ftrace.h b/arch/powerpc/include/asm/ftrace.h index dde1296b8b41..169d039ed402 100644 --- a/arch/powerpc/include/asm/ftrace.h +++ b/arch/powerpc/include/asm/ftrace.h | |||
@@ -60,4 +60,18 @@ struct dyn_arch_ftrace { | |||
60 | 60 | ||
61 | #endif | 61 | #endif |
62 | 62 | ||
63 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) && !defined(__ASSEMBLY__) | ||
64 | #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME | ||
65 | static inline bool arch_syscall_match_sym_name(const char *sym, const char *name) | ||
66 | { | ||
67 | /* | ||
68 | * Compare the symbol name with the system call name. Skip the .sys or .SyS | ||
69 | * prefix from the symbol name and the sys prefix from the system call name and | ||
70 | * just match the rest. This is only needed on ppc64 since symbol names on | ||
71 | * 32bit do not start with a period so the generic function will work. | ||
72 | */ | ||
73 | return !strcmp(sym + 4, name + 3); | ||
74 | } | ||
75 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 && !__ASSEMBLY__ */ | ||
76 | |||
63 | #endif /* _ASM_POWERPC_FTRACE */ | 77 | #endif /* _ASM_POWERPC_FTRACE */ |
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h index 852b8c1c09db..fd8201dddd4b 100644 --- a/arch/powerpc/include/asm/hvcall.h +++ b/arch/powerpc/include/asm/hvcall.h | |||
@@ -236,7 +236,7 @@ | |||
236 | #define H_HOME_NODE_ASSOCIATIVITY 0x2EC | 236 | #define H_HOME_NODE_ASSOCIATIVITY 0x2EC |
237 | #define H_BEST_ENERGY 0x2F4 | 237 | #define H_BEST_ENERGY 0x2F4 |
238 | #define H_GET_MPP_X 0x314 | 238 | #define H_GET_MPP_X 0x314 |
239 | #define MAX_HCALL_OPCODE H_BEST_ENERGY | 239 | #define MAX_HCALL_OPCODE H_GET_MPP_X |
240 | 240 | ||
241 | #ifndef __ASSEMBLY__ | 241 | #ifndef __ASSEMBLY__ |
242 | 242 | ||
diff --git a/arch/powerpc/include/asm/rio.h b/arch/powerpc/include/asm/rio.h index 0018bf80cb25..d902abd33995 100644 --- a/arch/powerpc/include/asm/rio.h +++ b/arch/powerpc/include/asm/rio.h | |||
@@ -14,5 +14,10 @@ | |||
14 | #define ASM_PPC_RIO_H | 14 | #define ASM_PPC_RIO_H |
15 | 15 | ||
16 | extern void platform_rio_init(void); | 16 | extern void platform_rio_init(void); |
17 | #ifdef CONFIG_RAPIDIO | ||
18 | extern int fsl_rio_mcheck_exception(struct pt_regs *); | ||
19 | #else | ||
20 | static inline int fsl_rio_mcheck_exception(struct pt_regs *regs) {return 0; } | ||
21 | #endif | ||
17 | 22 | ||
18 | #endif /* ASM_PPC_RIO_H */ | 23 | #endif /* ASM_PPC_RIO_H */ |
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h index 880b8c1e6e53..11eb404b5606 100644 --- a/arch/powerpc/include/asm/smp.h +++ b/arch/powerpc/include/asm/smp.h | |||
@@ -191,8 +191,6 @@ extern unsigned long __secondary_hold_spinloop; | |||
191 | extern unsigned long __secondary_hold_acknowledge; | 191 | extern unsigned long __secondary_hold_acknowledge; |
192 | extern char __secondary_hold; | 192 | extern char __secondary_hold; |
193 | 193 | ||
194 | extern irqreturn_t debug_ipi_action(int irq, void *data); | ||
195 | |||
196 | #endif /* __ASSEMBLY__ */ | 194 | #endif /* __ASSEMBLY__ */ |
197 | 195 | ||
198 | #endif /* __KERNEL__ */ | 196 | #endif /* __KERNEL__ */ |
diff --git a/arch/powerpc/include/asm/suspend.h b/arch/powerpc/include/asm/suspend.h deleted file mode 100644 index c6efc3466aa6..000000000000 --- a/arch/powerpc/include/asm/suspend.h +++ /dev/null | |||
@@ -1,6 +0,0 @@ | |||
1 | #ifndef __ASM_POWERPC_SUSPEND_H | ||
2 | #define __ASM_POWERPC_SUSPEND_H | ||
3 | |||
4 | static inline int arch_prepare_suspend(void) { return 0; } | ||
5 | |||
6 | #endif /* __ASM_POWERPC_SUSPEND_H */ | ||
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h index 23913e902fc3..b54b2add07be 100644 --- a/arch/powerpc/include/asm/syscall.h +++ b/arch/powerpc/include/asm/syscall.h | |||
@@ -15,6 +15,11 @@ | |||
15 | 15 | ||
16 | #include <linux/sched.h> | 16 | #include <linux/sched.h> |
17 | 17 | ||
18 | /* ftrace syscalls requires exporting the sys_call_table */ | ||
19 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
20 | extern const unsigned long *sys_call_table; | ||
21 | #endif /* CONFIG_FTRACE_SYSCALLS */ | ||
22 | |||
18 | static inline long syscall_get_nr(struct task_struct *task, | 23 | static inline long syscall_get_nr(struct task_struct *task, |
19 | struct pt_regs *regs) | 24 | struct pt_regs *regs) |
20 | { | 25 | { |
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h index 37c353e8af7c..836f231ec1f0 100644 --- a/arch/powerpc/include/asm/thread_info.h +++ b/arch/powerpc/include/asm/thread_info.h | |||
@@ -110,7 +110,8 @@ static inline struct thread_info *current_thread_info(void) | |||
110 | #define TIF_NOERROR 12 /* Force successful syscall return */ | 110 | #define TIF_NOERROR 12 /* Force successful syscall return */ |
111 | #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ | 111 | #define TIF_NOTIFY_RESUME 13 /* callback before returning to user */ |
112 | #define TIF_FREEZE 14 /* Freezing for suspend */ | 112 | #define TIF_FREEZE 14 /* Freezing for suspend */ |
113 | #define TIF_RUNLATCH 15 /* Is the runlatch enabled? */ | 113 | #define TIF_SYSCALL_TRACEPOINT 15 /* syscall tracepoint instrumentation */ |
114 | #define TIF_RUNLATCH 16 /* Is the runlatch enabled? */ | ||
114 | 115 | ||
115 | /* as above, but as bit values */ | 116 | /* as above, but as bit values */ |
116 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) | 117 | #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE) |
@@ -127,8 +128,10 @@ static inline struct thread_info *current_thread_info(void) | |||
127 | #define _TIF_NOERROR (1<<TIF_NOERROR) | 128 | #define _TIF_NOERROR (1<<TIF_NOERROR) |
128 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) | 129 | #define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) |
129 | #define _TIF_FREEZE (1<<TIF_FREEZE) | 130 | #define _TIF_FREEZE (1<<TIF_FREEZE) |
131 | #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT) | ||
130 | #define _TIF_RUNLATCH (1<<TIF_RUNLATCH) | 132 | #define _TIF_RUNLATCH (1<<TIF_RUNLATCH) |
131 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP) | 133 | #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \ |
134 | _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT) | ||
132 | 135 | ||
133 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ | 136 | #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \ |
134 | _TIF_NOTIFY_RESUME) | 137 | _TIF_NOTIFY_RESUME) |
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index 9aab36312572..e8b981897d44 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile | |||
@@ -109,6 +109,7 @@ obj-$(CONFIG_PPC_IO_WORKAROUNDS) += io-workarounds.o | |||
109 | 109 | ||
110 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o | 110 | obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o |
111 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o | 111 | obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o |
112 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o | ||
112 | obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o | 113 | obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o |
113 | 114 | ||
114 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o | 115 | obj-$(CONFIG_PPC_PERF_CTRS) += perf_event.o |
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c index ce1f3e44c24f..bf99cfa6bbfe 100644 --- a/arch/powerpc/kernel/ftrace.c +++ b/arch/powerpc/kernel/ftrace.c | |||
@@ -22,6 +22,7 @@ | |||
22 | #include <asm/cacheflush.h> | 22 | #include <asm/cacheflush.h> |
23 | #include <asm/code-patching.h> | 23 | #include <asm/code-patching.h> |
24 | #include <asm/ftrace.h> | 24 | #include <asm/ftrace.h> |
25 | #include <asm/syscall.h> | ||
25 | 26 | ||
26 | 27 | ||
27 | #ifdef CONFIG_DYNAMIC_FTRACE | 28 | #ifdef CONFIG_DYNAMIC_FTRACE |
@@ -600,3 +601,10 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) | |||
600 | } | 601 | } |
601 | } | 602 | } |
602 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | 603 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ |
604 | |||
605 | #if defined(CONFIG_FTRACE_SYSCALLS) && defined(CONFIG_PPC64) | ||
606 | unsigned long __init arch_syscall_addr(int nr) | ||
607 | { | ||
608 | return sys_call_table[nr*2]; | ||
609 | } | ||
610 | #endif /* CONFIG_FTRACE_SYSCALLS && CONFIG_PPC64 */ | ||
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c index a24d37d4cf51..5b428e308666 100644 --- a/arch/powerpc/kernel/irq.c +++ b/arch/powerpc/kernel/irq.c | |||
@@ -295,17 +295,20 @@ static inline void handle_one_irq(unsigned int irq) | |||
295 | unsigned long saved_sp_limit; | 295 | unsigned long saved_sp_limit; |
296 | struct irq_desc *desc; | 296 | struct irq_desc *desc; |
297 | 297 | ||
298 | desc = irq_to_desc(irq); | ||
299 | if (!desc) | ||
300 | return; | ||
301 | |||
298 | /* Switch to the irq stack to handle this */ | 302 | /* Switch to the irq stack to handle this */ |
299 | curtp = current_thread_info(); | 303 | curtp = current_thread_info(); |
300 | irqtp = hardirq_ctx[smp_processor_id()]; | 304 | irqtp = hardirq_ctx[smp_processor_id()]; |
301 | 305 | ||
302 | if (curtp == irqtp) { | 306 | if (curtp == irqtp) { |
303 | /* We're already on the irq stack, just handle it */ | 307 | /* We're already on the irq stack, just handle it */ |
304 | generic_handle_irq(irq); | 308 | desc->handle_irq(irq, desc); |
305 | return; | 309 | return; |
306 | } | 310 | } |
307 | 311 | ||
308 | desc = irq_to_desc(irq); | ||
309 | saved_sp_limit = current->thread.ksp_limit; | 312 | saved_sp_limit = current->thread.ksp_limit; |
310 | 313 | ||
311 | irqtp->task = curtp->task; | 314 | irqtp->task = curtp->task; |
@@ -557,15 +560,8 @@ struct irq_host *irq_alloc_host(struct device_node *of_node, | |||
557 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { | 560 | if (revmap_type == IRQ_HOST_MAP_LEGACY) { |
558 | if (irq_map[0].host != NULL) { | 561 | if (irq_map[0].host != NULL) { |
559 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); | 562 | raw_spin_unlock_irqrestore(&irq_big_lock, flags); |
560 | /* If we are early boot, we can't free the structure, | 563 | of_node_put(host->of_node); |
561 | * too bad... | 564 | kfree(host); |
562 | * this will be fixed once slab is made available early | ||
563 | * instead of the current cruft | ||
564 | */ | ||
565 | if (mem_init_done) { | ||
566 | of_node_put(host->of_node); | ||
567 | kfree(host); | ||
568 | } | ||
569 | return NULL; | 565 | return NULL; |
570 | } | 566 | } |
571 | irq_map[0].host = host; | 567 | irq_map[0].host = host; |
@@ -727,9 +723,7 @@ unsigned int irq_create_mapping(struct irq_host *host, | |||
727 | } | 723 | } |
728 | pr_debug("irq: -> using host @%p\n", host); | 724 | pr_debug("irq: -> using host @%p\n", host); |
729 | 725 | ||
730 | /* Check if mapping already exist, if it does, call | 726 | /* Check if mapping already exists */ |
731 | * host->ops->map() to update the flags | ||
732 | */ | ||
733 | virq = irq_find_mapping(host, hwirq); | 727 | virq = irq_find_mapping(host, hwirq); |
734 | if (virq != NO_IRQ) { | 728 | if (virq != NO_IRQ) { |
735 | pr_debug("irq: -> existing mapping on virq %d\n", virq); | 729 | pr_debug("irq: -> existing mapping on virq %d\n", virq); |
@@ -899,10 +893,13 @@ unsigned int irq_radix_revmap_lookup(struct irq_host *host, | |||
899 | return irq_find_mapping(host, hwirq); | 893 | return irq_find_mapping(host, hwirq); |
900 | 894 | ||
901 | /* | 895 | /* |
902 | * No rcu_read_lock(ing) needed, the ptr returned can't go under us | 896 | * The ptr returned references the static global irq_map. |
903 | * as it's referencing an entry in the static irq_map table. | 897 | * but freeing an irq can delete nodes along the path to |
898 | * do the lookup via call_rcu. | ||
904 | */ | 899 | */ |
900 | rcu_read_lock(); | ||
905 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); | 901 | ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq); |
902 | rcu_read_unlock(); | ||
906 | 903 | ||
907 | /* | 904 | /* |
908 | * If found in radix tree, then fine. | 905 | * If found in radix tree, then fine. |
@@ -1010,14 +1007,23 @@ void irq_free_virt(unsigned int virq, unsigned int count) | |||
1010 | WARN_ON (virq < NUM_ISA_INTERRUPTS); | 1007 | WARN_ON (virq < NUM_ISA_INTERRUPTS); |
1011 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); | 1008 | WARN_ON (count == 0 || (virq + count) > irq_virq_count); |
1012 | 1009 | ||
1010 | if (virq < NUM_ISA_INTERRUPTS) { | ||
1011 | if (virq + count < NUM_ISA_INTERRUPTS) | ||
1012 | return; | ||
1013 | count =- NUM_ISA_INTERRUPTS - virq; | ||
1014 | virq = NUM_ISA_INTERRUPTS; | ||
1015 | } | ||
1016 | |||
1017 | if (count > irq_virq_count || virq > irq_virq_count - count) { | ||
1018 | if (virq > irq_virq_count) | ||
1019 | return; | ||
1020 | count = irq_virq_count - virq; | ||
1021 | } | ||
1022 | |||
1013 | raw_spin_lock_irqsave(&irq_big_lock, flags); | 1023 | raw_spin_lock_irqsave(&irq_big_lock, flags); |
1014 | for (i = virq; i < (virq + count); i++) { | 1024 | for (i = virq; i < (virq + count); i++) { |
1015 | struct irq_host *host; | 1025 | struct irq_host *host; |
1016 | 1026 | ||
1017 | if (i < NUM_ISA_INTERRUPTS || | ||
1018 | (virq + count) > irq_virq_count) | ||
1019 | continue; | ||
1020 | |||
1021 | host = irq_map[i].host; | 1027 | host = irq_map[i].host; |
1022 | irq_map[i].hwirq = host->inval_irq; | 1028 | irq_map[i].hwirq = host->inval_irq; |
1023 | smp_wmb(); | 1029 | smp_wmb(); |
diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c index a6ae1cfad86c..cb22024f2b42 100644 --- a/arch/powerpc/kernel/ptrace.c +++ b/arch/powerpc/kernel/ptrace.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/signal.h> | 29 | #include <linux/signal.h> |
30 | #include <linux/seccomp.h> | 30 | #include <linux/seccomp.h> |
31 | #include <linux/audit.h> | 31 | #include <linux/audit.h> |
32 | #include <trace/syscall.h> | ||
32 | #ifdef CONFIG_PPC32 | 33 | #ifdef CONFIG_PPC32 |
33 | #include <linux/module.h> | 34 | #include <linux/module.h> |
34 | #endif | 35 | #endif |
@@ -40,6 +41,9 @@ | |||
40 | #include <asm/pgtable.h> | 41 | #include <asm/pgtable.h> |
41 | #include <asm/system.h> | 42 | #include <asm/system.h> |
42 | 43 | ||
44 | #define CREATE_TRACE_POINTS | ||
45 | #include <trace/events/syscalls.h> | ||
46 | |||
43 | /* | 47 | /* |
44 | * The parameter save area on the stack is used to store arguments being passed | 48 | * The parameter save area on the stack is used to store arguments being passed |
45 | * to callee function and is located at fixed offset from stack pointer. | 49 | * to callee function and is located at fixed offset from stack pointer. |
@@ -1710,6 +1714,9 @@ long do_syscall_trace_enter(struct pt_regs *regs) | |||
1710 | */ | 1714 | */ |
1711 | ret = -1L; | 1715 | ret = -1L; |
1712 | 1716 | ||
1717 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | ||
1718 | trace_sys_enter(regs, regs->gpr[0]); | ||
1719 | |||
1713 | if (unlikely(current->audit_context)) { | 1720 | if (unlikely(current->audit_context)) { |
1714 | #ifdef CONFIG_PPC64 | 1721 | #ifdef CONFIG_PPC64 |
1715 | if (!is_32bit_task()) | 1722 | if (!is_32bit_task()) |
@@ -1738,6 +1745,9 @@ void do_syscall_trace_leave(struct pt_regs *regs) | |||
1738 | audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, | 1745 | audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS, |
1739 | regs->result); | 1746 | regs->result); |
1740 | 1747 | ||
1748 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | ||
1749 | trace_sys_exit(regs, regs->result); | ||
1750 | |||
1741 | step = test_thread_flag(TIF_SINGLESTEP); | 1751 | step = test_thread_flag(TIF_SINGLESTEP); |
1742 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) | 1752 | if (step || test_thread_flag(TIF_SYSCALL_TRACE)) |
1743 | tracehook_report_syscall_exit(regs, step); | 1753 | tracehook_report_syscall_exit(regs, step); |
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c index 4a6f2ec7e761..8ebc6700b98d 100644 --- a/arch/powerpc/kernel/smp.c +++ b/arch/powerpc/kernel/smp.c | |||
@@ -129,7 +129,7 @@ static irqreturn_t call_function_single_action(int irq, void *data) | |||
129 | return IRQ_HANDLED; | 129 | return IRQ_HANDLED; |
130 | } | 130 | } |
131 | 131 | ||
132 | irqreturn_t debug_ipi_action(int irq, void *data) | 132 | static irqreturn_t debug_ipi_action(int irq, void *data) |
133 | { | 133 | { |
134 | if (crash_ipi_function_ptr) { | 134 | if (crash_ipi_function_ptr) { |
135 | crash_ipi_function_ptr(get_irq_regs()); | 135 | crash_ipi_function_ptr(get_irq_regs()); |
diff --git a/arch/powerpc/kernel/swsusp.c b/arch/powerpc/kernel/swsusp.c index 560c96119501..aa17b76dd427 100644 --- a/arch/powerpc/kernel/swsusp.c +++ b/arch/powerpc/kernel/swsusp.c | |||
@@ -10,7 +10,6 @@ | |||
10 | */ | 10 | */ |
11 | 11 | ||
12 | #include <linux/sched.h> | 12 | #include <linux/sched.h> |
13 | #include <asm/suspend.h> | ||
14 | #include <asm/system.h> | 13 | #include <asm/system.h> |
15 | #include <asm/current.h> | 14 | #include <asm/current.h> |
16 | #include <asm/mmu_context.h> | 15 | #include <asm/mmu_context.h> |
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index b13306b0d925..0ff4ab98d50c 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c | |||
@@ -55,6 +55,7 @@ | |||
55 | #endif | 55 | #endif |
56 | #include <asm/kexec.h> | 56 | #include <asm/kexec.h> |
57 | #include <asm/ppc-opcode.h> | 57 | #include <asm/ppc-opcode.h> |
58 | #include <asm/rio.h> | ||
58 | 59 | ||
59 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) | 60 | #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC) |
60 | int (*__debugger)(struct pt_regs *regs) __read_mostly; | 61 | int (*__debugger)(struct pt_regs *regs) __read_mostly; |
@@ -424,6 +425,12 @@ int machine_check_e500mc(struct pt_regs *regs) | |||
424 | unsigned long reason = mcsr; | 425 | unsigned long reason = mcsr; |
425 | int recoverable = 1; | 426 | int recoverable = 1; |
426 | 427 | ||
428 | if (reason & MCSR_BUS_RBERR) { | ||
429 | recoverable = fsl_rio_mcheck_exception(regs); | ||
430 | if (recoverable == 1) | ||
431 | goto silent_out; | ||
432 | } | ||
433 | |||
427 | printk("Machine check in kernel mode.\n"); | 434 | printk("Machine check in kernel mode.\n"); |
428 | printk("Caused by (from MCSR=%lx): ", reason); | 435 | printk("Caused by (from MCSR=%lx): ", reason); |
429 | 436 | ||
@@ -499,6 +506,7 @@ int machine_check_e500mc(struct pt_regs *regs) | |||
499 | reason & MCSR_MEA ? "Effective" : "Physical", addr); | 506 | reason & MCSR_MEA ? "Effective" : "Physical", addr); |
500 | } | 507 | } |
501 | 508 | ||
509 | silent_out: | ||
502 | mtspr(SPRN_MCSR, mcsr); | 510 | mtspr(SPRN_MCSR, mcsr); |
503 | return mfspr(SPRN_MCSR) == 0 && recoverable; | 511 | return mfspr(SPRN_MCSR) == 0 && recoverable; |
504 | } | 512 | } |
@@ -507,6 +515,11 @@ int machine_check_e500(struct pt_regs *regs) | |||
507 | { | 515 | { |
508 | unsigned long reason = get_mc_reason(regs); | 516 | unsigned long reason = get_mc_reason(regs); |
509 | 517 | ||
518 | if (reason & MCSR_BUS_RBERR) { | ||
519 | if (fsl_rio_mcheck_exception(regs)) | ||
520 | return 1; | ||
521 | } | ||
522 | |||
510 | printk("Machine check in kernel mode.\n"); | 523 | printk("Machine check in kernel mode.\n"); |
511 | printk("Caused by (from MCSR=%lx): ", reason); | 524 | printk("Caused by (from MCSR=%lx): ", reason); |
512 | 525 | ||
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c index 8ee51a252cf1..e6bec74be131 100644 --- a/arch/powerpc/oprofile/op_model_power4.c +++ b/arch/powerpc/oprofile/op_model_power4.c | |||
@@ -261,6 +261,28 @@ static int get_kernel(unsigned long pc, unsigned long mmcra) | |||
261 | return is_kernel; | 261 | return is_kernel; |
262 | } | 262 | } |
263 | 263 | ||
264 | static bool pmc_overflow(unsigned long val) | ||
265 | { | ||
266 | if ((int)val < 0) | ||
267 | return true; | ||
268 | |||
269 | /* | ||
270 | * Events on POWER7 can roll back if a speculative event doesn't | ||
271 | * eventually complete. Unfortunately in some rare cases they will | ||
272 | * raise a performance monitor exception. We need to catch this to | ||
273 | * ensure we reset the PMC. In all cases the PMC will be 256 or less | ||
274 | * cycles from overflow. | ||
275 | * | ||
276 | * We only do this if the first pass fails to find any overflowing | ||
277 | * PMCs because a user might set a period of less than 256 and we | ||
278 | * don't want to mistakenly reset them. | ||
279 | */ | ||
280 | if (__is_processor(PV_POWER7) && ((0x80000000 - val) <= 256)) | ||
281 | return true; | ||
282 | |||
283 | return false; | ||
284 | } | ||
285 | |||
264 | static void power4_handle_interrupt(struct pt_regs *regs, | 286 | static void power4_handle_interrupt(struct pt_regs *regs, |
265 | struct op_counter_config *ctr) | 287 | struct op_counter_config *ctr) |
266 | { | 288 | { |
@@ -281,7 +303,7 @@ static void power4_handle_interrupt(struct pt_regs *regs, | |||
281 | 303 | ||
282 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { | 304 | for (i = 0; i < cur_cpu_spec->num_pmcs; ++i) { |
283 | val = classic_ctr_read(i); | 305 | val = classic_ctr_read(i); |
284 | if (val < 0) { | 306 | if (pmc_overflow(val)) { |
285 | if (oprofile_running && ctr[i].enabled) { | 307 | if (oprofile_running && ctr[i].enabled) { |
286 | oprofile_add_ext_sample(pc, regs, i, is_kernel); | 308 | oprofile_add_ext_sample(pc, regs, i, is_kernel); |
287 | classic_ctr_write(i, reset_value[i]); | 309 | classic_ctr_write(i, reset_value[i]); |
diff --git a/arch/powerpc/platforms/40x/Kconfig b/arch/powerpc/platforms/40x/Kconfig index b72176434ebe..d733d7ca939c 100644 --- a/arch/powerpc/platforms/40x/Kconfig +++ b/arch/powerpc/platforms/40x/Kconfig | |||
@@ -57,6 +57,8 @@ config KILAUEA | |||
57 | select 405EX | 57 | select 405EX |
58 | select PPC40x_SIMPLE | 58 | select PPC40x_SIMPLE |
59 | select PPC4xx_PCI_EXPRESS | 59 | select PPC4xx_PCI_EXPRESS |
60 | select PCI_MSI | ||
61 | select PPC4xx_MSI | ||
60 | help | 62 | help |
61 | This option enables support for the AMCC PPC405EX evaluation board. | 63 | This option enables support for the AMCC PPC405EX evaluation board. |
62 | 64 | ||
diff --git a/arch/powerpc/platforms/44x/Kconfig b/arch/powerpc/platforms/44x/Kconfig index f485fc5f6d5e..e958b6f48ec2 100644 --- a/arch/powerpc/platforms/44x/Kconfig +++ b/arch/powerpc/platforms/44x/Kconfig | |||
@@ -74,6 +74,8 @@ config KATMAI | |||
74 | select 440SPe | 74 | select 440SPe |
75 | select PCI | 75 | select PCI |
76 | select PPC4xx_PCI_EXPRESS | 76 | select PPC4xx_PCI_EXPRESS |
77 | select PCI_MSI | ||
78 | select PCC4xx_MSI | ||
77 | help | 79 | help |
78 | This option enables support for the AMCC PPC440SPe evaluation board. | 80 | This option enables support for the AMCC PPC440SPe evaluation board. |
79 | 81 | ||
@@ -118,6 +120,8 @@ config CANYONLANDS | |||
118 | select 460EX | 120 | select 460EX |
119 | select PCI | 121 | select PCI |
120 | select PPC4xx_PCI_EXPRESS | 122 | select PPC4xx_PCI_EXPRESS |
123 | select PCI_MSI | ||
124 | select PPC4xx_MSI | ||
121 | select IBM_NEW_EMAC_RGMII | 125 | select IBM_NEW_EMAC_RGMII |
122 | select IBM_NEW_EMAC_ZMII | 126 | select IBM_NEW_EMAC_ZMII |
123 | help | 127 | help |
@@ -144,6 +148,8 @@ config REDWOOD | |||
144 | select 460SX | 148 | select 460SX |
145 | select PCI | 149 | select PCI |
146 | select PPC4xx_PCI_EXPRESS | 150 | select PPC4xx_PCI_EXPRESS |
151 | select PCI_MSI | ||
152 | select PPC4xx_MSI | ||
147 | help | 153 | help |
148 | This option enables support for the AMCC PPC460SX Redwood board. | 154 | This option enables support for the AMCC PPC460SX Redwood board. |
149 | 155 | ||
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c index 449c08c15862..3e4eba603e6b 100644 --- a/arch/powerpc/platforms/cell/interrupt.c +++ b/arch/powerpc/platforms/cell/interrupt.c | |||
@@ -176,14 +176,14 @@ EXPORT_SYMBOL_GPL(iic_get_target_id); | |||
176 | #ifdef CONFIG_SMP | 176 | #ifdef CONFIG_SMP |
177 | 177 | ||
178 | /* Use the highest interrupt priorities for IPI */ | 178 | /* Use the highest interrupt priorities for IPI */ |
179 | static inline int iic_ipi_to_irq(int ipi) | 179 | static inline int iic_msg_to_irq(int msg) |
180 | { | 180 | { |
181 | return IIC_IRQ_TYPE_IPI + 0xf - ipi; | 181 | return IIC_IRQ_TYPE_IPI + 0xf - msg; |
182 | } | 182 | } |
183 | 183 | ||
184 | void iic_cause_IPI(int cpu, int mesg) | 184 | void iic_message_pass(int cpu, int msg) |
185 | { | 185 | { |
186 | out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - mesg) << 4); | 186 | out_be64(&per_cpu(cpu_iic, cpu).regs->generate, (0xf - msg) << 4); |
187 | } | 187 | } |
188 | 188 | ||
189 | struct irq_host *iic_get_irq_host(int node) | 189 | struct irq_host *iic_get_irq_host(int node) |
@@ -192,50 +192,31 @@ struct irq_host *iic_get_irq_host(int node) | |||
192 | } | 192 | } |
193 | EXPORT_SYMBOL_GPL(iic_get_irq_host); | 193 | EXPORT_SYMBOL_GPL(iic_get_irq_host); |
194 | 194 | ||
195 | static irqreturn_t iic_ipi_action(int irq, void *dev_id) | 195 | static void iic_request_ipi(int msg) |
196 | { | ||
197 | int ipi = (int)(long)dev_id; | ||
198 | |||
199 | switch(ipi) { | ||
200 | case PPC_MSG_CALL_FUNCTION: | ||
201 | generic_smp_call_function_interrupt(); | ||
202 | break; | ||
203 | case PPC_MSG_RESCHEDULE: | ||
204 | scheduler_ipi(); | ||
205 | break; | ||
206 | case PPC_MSG_CALL_FUNC_SINGLE: | ||
207 | generic_smp_call_function_single_interrupt(); | ||
208 | break; | ||
209 | case PPC_MSG_DEBUGGER_BREAK: | ||
210 | debug_ipi_action(0, NULL); | ||
211 | break; | ||
212 | } | ||
213 | return IRQ_HANDLED; | ||
214 | } | ||
215 | static void iic_request_ipi(int ipi, const char *name) | ||
216 | { | 196 | { |
217 | int virq; | 197 | int virq; |
218 | 198 | ||
219 | virq = irq_create_mapping(iic_host, iic_ipi_to_irq(ipi)); | 199 | virq = irq_create_mapping(iic_host, iic_msg_to_irq(msg)); |
220 | if (virq == NO_IRQ) { | 200 | if (virq == NO_IRQ) { |
221 | printk(KERN_ERR | 201 | printk(KERN_ERR |
222 | "iic: failed to map IPI %s\n", name); | 202 | "iic: failed to map IPI %s\n", smp_ipi_name[msg]); |
223 | return; | 203 | return; |
224 | } | 204 | } |
225 | if (request_irq(virq, iic_ipi_action, IRQF_DISABLED, name, | 205 | |
226 | (void *)(long)ipi)) | 206 | /* |
227 | printk(KERN_ERR | 207 | * If smp_request_message_ipi encounters an error it will notify |
228 | "iic: failed to request IPI %s\n", name); | 208 | * the error. If a message is not needed it will return non-zero. |
209 | */ | ||
210 | if (smp_request_message_ipi(virq, msg)) | ||
211 | irq_dispose_mapping(virq); | ||
229 | } | 212 | } |
230 | 213 | ||
231 | void iic_request_IPIs(void) | 214 | void iic_request_IPIs(void) |
232 | { | 215 | { |
233 | iic_request_ipi(PPC_MSG_CALL_FUNCTION, "IPI-call"); | 216 | iic_request_ipi(PPC_MSG_CALL_FUNCTION); |
234 | iic_request_ipi(PPC_MSG_RESCHEDULE, "IPI-resched"); | 217 | iic_request_ipi(PPC_MSG_RESCHEDULE); |
235 | iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE, "IPI-call-single"); | 218 | iic_request_ipi(PPC_MSG_CALL_FUNC_SINGLE); |
236 | #ifdef CONFIG_DEBUGGER | 219 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK); |
237 | iic_request_ipi(PPC_MSG_DEBUGGER_BREAK, "IPI-debug"); | ||
238 | #endif /* CONFIG_DEBUGGER */ | ||
239 | } | 220 | } |
240 | 221 | ||
241 | #endif /* CONFIG_SMP */ | 222 | #endif /* CONFIG_SMP */ |
diff --git a/arch/powerpc/platforms/cell/interrupt.h b/arch/powerpc/platforms/cell/interrupt.h index 942dc39d6045..4f60ae6ca358 100644 --- a/arch/powerpc/platforms/cell/interrupt.h +++ b/arch/powerpc/platforms/cell/interrupt.h | |||
@@ -75,7 +75,7 @@ enum { | |||
75 | }; | 75 | }; |
76 | 76 | ||
77 | extern void iic_init_IRQ(void); | 77 | extern void iic_init_IRQ(void); |
78 | extern void iic_cause_IPI(int cpu, int mesg); | 78 | extern void iic_message_pass(int cpu, int msg); |
79 | extern void iic_request_IPIs(void); | 79 | extern void iic_request_IPIs(void); |
80 | extern void iic_setup_cpu(void); | 80 | extern void iic_setup_cpu(void); |
81 | 81 | ||
diff --git a/arch/powerpc/platforms/cell/smp.c b/arch/powerpc/platforms/cell/smp.c index d176e6148e3f..dbb641ea90dd 100644 --- a/arch/powerpc/platforms/cell/smp.c +++ b/arch/powerpc/platforms/cell/smp.c | |||
@@ -152,7 +152,7 @@ static int smp_cell_cpu_bootable(unsigned int nr) | |||
152 | return 1; | 152 | return 1; |
153 | } | 153 | } |
154 | static struct smp_ops_t bpa_iic_smp_ops = { | 154 | static struct smp_ops_t bpa_iic_smp_ops = { |
155 | .message_pass = iic_cause_IPI, | 155 | .message_pass = iic_message_pass, |
156 | .probe = smp_iic_probe, | 156 | .probe = smp_iic_probe, |
157 | .kick_cpu = smp_cell_kick_cpu, | 157 | .kick_cpu = smp_cell_kick_cpu, |
158 | .setup_cpu = smp_cell_setup_cpu, | 158 | .setup_cpu = smp_cell_setup_cpu, |
diff --git a/arch/powerpc/sysdev/Kconfig b/arch/powerpc/sysdev/Kconfig index d775fd148d13..7b4df37ac381 100644 --- a/arch/powerpc/sysdev/Kconfig +++ b/arch/powerpc/sysdev/Kconfig | |||
@@ -7,11 +7,18 @@ config PPC4xx_PCI_EXPRESS | |||
7 | depends on PCI && 4xx | 7 | depends on PCI && 4xx |
8 | default n | 8 | default n |
9 | 9 | ||
10 | config PPC4xx_MSI | ||
11 | bool | ||
12 | depends on PCI_MSI | ||
13 | depends on PCI && 4xx | ||
14 | default n | ||
15 | |||
10 | config PPC_MSI_BITMAP | 16 | config PPC_MSI_BITMAP |
11 | bool | 17 | bool |
12 | depends on PCI_MSI | 18 | depends on PCI_MSI |
13 | default y if MPIC | 19 | default y if MPIC |
14 | default y if FSL_PCI | 20 | default y if FSL_PCI |
21 | default y if PPC4xx_MSI | ||
15 | 22 | ||
16 | source "arch/powerpc/sysdev/xics/Kconfig" | 23 | source "arch/powerpc/sysdev/xics/Kconfig" |
17 | 24 | ||
diff --git a/arch/powerpc/sysdev/Makefile b/arch/powerpc/sysdev/Makefile index 6076e0074a87..0efa990e3344 100644 --- a/arch/powerpc/sysdev/Makefile +++ b/arch/powerpc/sysdev/Makefile | |||
@@ -41,6 +41,7 @@ obj-$(CONFIG_OF_RTC) += of_rtc.o | |||
41 | ifeq ($(CONFIG_PCI),y) | 41 | ifeq ($(CONFIG_PCI),y) |
42 | obj-$(CONFIG_4xx) += ppc4xx_pci.o | 42 | obj-$(CONFIG_4xx) += ppc4xx_pci.o |
43 | endif | 43 | endif |
44 | obj-$(CONFIG_PPC4xx_MSI) += ppc4xx_msi.o | ||
44 | obj-$(CONFIG_PPC4xx_CPM) += ppc4xx_cpm.o | 45 | obj-$(CONFIG_PPC4xx_CPM) += ppc4xx_cpm.o |
45 | obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o | 46 | obj-$(CONFIG_PPC4xx_GPIO) += ppc4xx_gpio.o |
46 | 47 | ||
diff --git a/arch/powerpc/sysdev/fsl_lbc.c b/arch/powerpc/sysdev/fsl_lbc.c index 4fcb5a4e60dd..0608b1657da4 100644 --- a/arch/powerpc/sysdev/fsl_lbc.c +++ b/arch/powerpc/sysdev/fsl_lbc.c | |||
@@ -184,7 +184,8 @@ int fsl_upm_run_pattern(struct fsl_upm *upm, void __iomem *io_base, u32 mar) | |||
184 | } | 184 | } |
185 | EXPORT_SYMBOL(fsl_upm_run_pattern); | 185 | EXPORT_SYMBOL(fsl_upm_run_pattern); |
186 | 186 | ||
187 | static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl) | 187 | static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl, |
188 | struct device_node *node) | ||
188 | { | 189 | { |
189 | struct fsl_lbc_regs __iomem *lbc = ctrl->regs; | 190 | struct fsl_lbc_regs __iomem *lbc = ctrl->regs; |
190 | 191 | ||
@@ -198,6 +199,10 @@ static int __devinit fsl_lbc_ctrl_init(struct fsl_lbc_ctrl *ctrl) | |||
198 | /* Enable interrupts for any detected events */ | 199 | /* Enable interrupts for any detected events */ |
199 | out_be32(&lbc->lteir, LTEIR_ENABLE); | 200 | out_be32(&lbc->lteir, LTEIR_ENABLE); |
200 | 201 | ||
202 | /* Set the monitor timeout value to the maximum for erratum A001 */ | ||
203 | if (of_device_is_compatible(node, "fsl,elbc")) | ||
204 | clrsetbits_be32(&lbc->lbcr, LBCR_BMT, LBCR_BMTPS); | ||
205 | |||
201 | return 0; | 206 | return 0; |
202 | } | 207 | } |
203 | 208 | ||
@@ -304,7 +309,7 @@ static int __devinit fsl_lbc_ctrl_probe(struct platform_device *dev) | |||
304 | 309 | ||
305 | fsl_lbc_ctrl_dev->dev = &dev->dev; | 310 | fsl_lbc_ctrl_dev->dev = &dev->dev; |
306 | 311 | ||
307 | ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev); | 312 | ret = fsl_lbc_ctrl_init(fsl_lbc_ctrl_dev, dev->dev.of_node); |
308 | if (ret < 0) | 313 | if (ret < 0) |
309 | goto err; | 314 | goto err; |
310 | 315 | ||
diff --git a/arch/powerpc/sysdev/fsl_rio.c b/arch/powerpc/sysdev/fsl_rio.c index 49798532b477..5b206a2fe17c 100644 --- a/arch/powerpc/sysdev/fsl_rio.c +++ b/arch/powerpc/sysdev/fsl_rio.c | |||
@@ -10,7 +10,7 @@ | |||
10 | * - Added Port-Write message handling | 10 | * - Added Port-Write message handling |
11 | * - Added Machine Check exception handling | 11 | * - Added Machine Check exception handling |
12 | * | 12 | * |
13 | * Copyright (C) 2007, 2008 Freescale Semiconductor, Inc. | 13 | * Copyright (C) 2007, 2008, 2010 Freescale Semiconductor, Inc. |
14 | * Zhang Wei <wei.zhang@freescale.com> | 14 | * Zhang Wei <wei.zhang@freescale.com> |
15 | * | 15 | * |
16 | * Copyright 2005 MontaVista Software, Inc. | 16 | * Copyright 2005 MontaVista Software, Inc. |
@@ -47,15 +47,33 @@ | |||
47 | #define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) | 47 | #define IRQ_RIO_RX(m) (((struct rio_priv *)(m->priv))->rxirq) |
48 | #define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) | 48 | #define IRQ_RIO_PW(m) (((struct rio_priv *)(m->priv))->pwirq) |
49 | 49 | ||
50 | #define IPWSR_CLEAR 0x98 | ||
51 | #define OMSR_CLEAR 0x1cb3 | ||
52 | #define IMSR_CLEAR 0x491 | ||
53 | #define IDSR_CLEAR 0x91 | ||
54 | #define ODSR_CLEAR 0x1c00 | ||
55 | #define LTLEECSR_ENABLE_ALL 0xFFC000FC | ||
56 | #define ESCSR_CLEAR 0x07120204 | ||
57 | |||
58 | #define RIO_PORT1_EDCSR 0x0640 | ||
59 | #define RIO_PORT2_EDCSR 0x0680 | ||
60 | #define RIO_PORT1_IECSR 0x10130 | ||
61 | #define RIO_PORT2_IECSR 0x101B0 | ||
62 | #define RIO_IM0SR 0x13064 | ||
63 | #define RIO_IM1SR 0x13164 | ||
64 | #define RIO_OM0SR 0x13004 | ||
65 | #define RIO_OM1SR 0x13104 | ||
66 | |||
50 | #define RIO_ATMU_REGS_OFFSET 0x10c00 | 67 | #define RIO_ATMU_REGS_OFFSET 0x10c00 |
51 | #define RIO_P_MSG_REGS_OFFSET 0x11000 | 68 | #define RIO_P_MSG_REGS_OFFSET 0x11000 |
52 | #define RIO_S_MSG_REGS_OFFSET 0x13000 | 69 | #define RIO_S_MSG_REGS_OFFSET 0x13000 |
53 | #define RIO_GCCSR 0x13c | 70 | #define RIO_GCCSR 0x13c |
54 | #define RIO_ESCSR 0x158 | 71 | #define RIO_ESCSR 0x158 |
72 | #define RIO_PORT2_ESCSR 0x178 | ||
55 | #define RIO_CCSR 0x15c | 73 | #define RIO_CCSR 0x15c |
56 | #define RIO_LTLEDCSR 0x0608 | 74 | #define RIO_LTLEDCSR 0x0608 |
57 | #define RIO_LTLEDCSR_IER 0x80000000 | 75 | #define RIO_LTLEDCSR_IER 0x80000000 |
58 | #define RIO_LTLEDCSR_PRT 0x01000000 | 76 | #define RIO_LTLEDCSR_PRT 0x01000000 |
59 | #define RIO_LTLEECSR 0x060c | 77 | #define RIO_LTLEECSR 0x060c |
60 | #define RIO_EPWISR 0x10010 | 78 | #define RIO_EPWISR 0x10010 |
61 | #define RIO_ISR_AACR 0x10120 | 79 | #define RIO_ISR_AACR 0x10120 |
@@ -88,7 +106,10 @@ | |||
88 | #define RIO_IPWSR_PWD 0x00000008 | 106 | #define RIO_IPWSR_PWD 0x00000008 |
89 | #define RIO_IPWSR_PWB 0x00000004 | 107 | #define RIO_IPWSR_PWB 0x00000004 |
90 | 108 | ||
91 | #define RIO_EPWISR_PINT 0x80000000 | 109 | /* EPWISR Error match value */ |
110 | #define RIO_EPWISR_PINT1 0x80000000 | ||
111 | #define RIO_EPWISR_PINT2 0x40000000 | ||
112 | #define RIO_EPWISR_MU 0x00000002 | ||
92 | #define RIO_EPWISR_PW 0x00000001 | 113 | #define RIO_EPWISR_PW 0x00000001 |
93 | 114 | ||
94 | #define RIO_MSG_DESC_SIZE 32 | 115 | #define RIO_MSG_DESC_SIZE 32 |
@@ -260,9 +281,7 @@ struct rio_priv { | |||
260 | static void __iomem *rio_regs_win; | 281 | static void __iomem *rio_regs_win; |
261 | 282 | ||
262 | #ifdef CONFIG_E500 | 283 | #ifdef CONFIG_E500 |
263 | static int (*saved_mcheck_exception)(struct pt_regs *regs); | 284 | int fsl_rio_mcheck_exception(struct pt_regs *regs) |
264 | |||
265 | static int fsl_rio_mcheck_exception(struct pt_regs *regs) | ||
266 | { | 285 | { |
267 | const struct exception_table_entry *entry = NULL; | 286 | const struct exception_table_entry *entry = NULL; |
268 | unsigned long reason = mfspr(SPRN_MCSR); | 287 | unsigned long reason = mfspr(SPRN_MCSR); |
@@ -284,11 +303,9 @@ static int fsl_rio_mcheck_exception(struct pt_regs *regs) | |||
284 | } | 303 | } |
285 | } | 304 | } |
286 | 305 | ||
287 | if (saved_mcheck_exception) | 306 | return 0; |
288 | return saved_mcheck_exception(regs); | ||
289 | else | ||
290 | return cur_cpu_spec->machine_check(regs); | ||
291 | } | 307 | } |
308 | EXPORT_SYMBOL_GPL(fsl_rio_mcheck_exception); | ||
292 | #endif | 309 | #endif |
293 | 310 | ||
294 | /** | 311 | /** |
@@ -1064,6 +1081,40 @@ static int fsl_rio_doorbell_init(struct rio_mport *mport) | |||
1064 | return rc; | 1081 | return rc; |
1065 | } | 1082 | } |
1066 | 1083 | ||
1084 | static void port_error_handler(struct rio_mport *port, int offset) | ||
1085 | { | ||
1086 | /*XXX: Error recovery is not implemented, we just clear errors */ | ||
1087 | out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); | ||
1088 | |||
1089 | if (offset == 0) { | ||
1090 | out_be32((u32 *)(rio_regs_win + RIO_PORT1_EDCSR), 0); | ||
1091 | out_be32((u32 *)(rio_regs_win + RIO_PORT1_IECSR), 0); | ||
1092 | out_be32((u32 *)(rio_regs_win + RIO_ESCSR), ESCSR_CLEAR); | ||
1093 | } else { | ||
1094 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_EDCSR), 0); | ||
1095 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_IECSR), 0); | ||
1096 | out_be32((u32 *)(rio_regs_win + RIO_PORT2_ESCSR), ESCSR_CLEAR); | ||
1097 | } | ||
1098 | } | ||
1099 | |||
1100 | static void msg_unit_error_handler(struct rio_mport *port) | ||
1101 | { | ||
1102 | struct rio_priv *priv = port->priv; | ||
1103 | |||
1104 | /*XXX: Error recovery is not implemented, we just clear errors */ | ||
1105 | out_be32((u32 *)(rio_regs_win + RIO_LTLEDCSR), 0); | ||
1106 | |||
1107 | out_be32((u32 *)(rio_regs_win + RIO_IM0SR), IMSR_CLEAR); | ||
1108 | out_be32((u32 *)(rio_regs_win + RIO_IM1SR), IMSR_CLEAR); | ||
1109 | out_be32((u32 *)(rio_regs_win + RIO_OM0SR), OMSR_CLEAR); | ||
1110 | out_be32((u32 *)(rio_regs_win + RIO_OM1SR), OMSR_CLEAR); | ||
1111 | |||
1112 | out_be32(&priv->msg_regs->odsr, ODSR_CLEAR); | ||
1113 | out_be32(&priv->msg_regs->dsr, IDSR_CLEAR); | ||
1114 | |||
1115 | out_be32(&priv->msg_regs->pwsr, IPWSR_CLEAR); | ||
1116 | } | ||
1117 | |||
1067 | /** | 1118 | /** |
1068 | * fsl_rio_port_write_handler - MPC85xx port write interrupt handler | 1119 | * fsl_rio_port_write_handler - MPC85xx port write interrupt handler |
1069 | * @irq: Linux interrupt number | 1120 | * @irq: Linux interrupt number |
@@ -1144,10 +1195,22 @@ fsl_rio_port_write_handler(int irq, void *dev_instance) | |||
1144 | } | 1195 | } |
1145 | 1196 | ||
1146 | pw_done: | 1197 | pw_done: |
1147 | if (epwisr & RIO_EPWISR_PINT) { | 1198 | if (epwisr & RIO_EPWISR_PINT1) { |
1199 | tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); | ||
1200 | pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); | ||
1201 | port_error_handler(port, 0); | ||
1202 | } | ||
1203 | |||
1204 | if (epwisr & RIO_EPWISR_PINT2) { | ||
1148 | tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); | 1205 | tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); |
1149 | pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); | 1206 | pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); |
1150 | out_be32(priv->regs_win + RIO_LTLEDCSR, 0); | 1207 | port_error_handler(port, 1); |
1208 | } | ||
1209 | |||
1210 | if (epwisr & RIO_EPWISR_MU) { | ||
1211 | tmp = in_be32(priv->regs_win + RIO_LTLEDCSR); | ||
1212 | pr_debug("RIO_LTLEDCSR = 0x%x\n", tmp); | ||
1213 | msg_unit_error_handler(port); | ||
1151 | } | 1214 | } |
1152 | 1215 | ||
1153 | return IRQ_HANDLED; | 1216 | return IRQ_HANDLED; |
@@ -1258,12 +1321,14 @@ static int fsl_rio_port_write_init(struct rio_mport *mport) | |||
1258 | 1321 | ||
1259 | 1322 | ||
1260 | /* Hook up port-write handler */ | 1323 | /* Hook up port-write handler */ |
1261 | rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, 0, | 1324 | rc = request_irq(IRQ_RIO_PW(mport), fsl_rio_port_write_handler, |
1262 | "port-write", (void *)mport); | 1325 | IRQF_SHARED, "port-write", (void *)mport); |
1263 | if (rc < 0) { | 1326 | if (rc < 0) { |
1264 | pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); | 1327 | pr_err("MPC85xx RIO: unable to request inbound doorbell irq"); |
1265 | goto err_out; | 1328 | goto err_out; |
1266 | } | 1329 | } |
1330 | /* Enable Error Interrupt */ | ||
1331 | out_be32((u32 *)(rio_regs_win + RIO_LTLEECSR), LTLEECSR_ENABLE_ALL); | ||
1267 | 1332 | ||
1268 | INIT_WORK(&priv->pw_work, fsl_pw_dpc); | 1333 | INIT_WORK(&priv->pw_work, fsl_pw_dpc); |
1269 | spin_lock_init(&priv->pw_fifo_lock); | 1334 | spin_lock_init(&priv->pw_fifo_lock); |
@@ -1538,11 +1603,6 @@ int fsl_rio_setup(struct platform_device *dev) | |||
1538 | fsl_rio_doorbell_init(port); | 1603 | fsl_rio_doorbell_init(port); |
1539 | fsl_rio_port_write_init(port); | 1604 | fsl_rio_port_write_init(port); |
1540 | 1605 | ||
1541 | #ifdef CONFIG_E500 | ||
1542 | saved_mcheck_exception = ppc_md.machine_check_exception; | ||
1543 | ppc_md.machine_check_exception = fsl_rio_mcheck_exception; | ||
1544 | #endif | ||
1545 | |||
1546 | return 0; | 1606 | return 0; |
1547 | err: | 1607 | err: |
1548 | iounmap(priv->regs_win); | 1608 | iounmap(priv->regs_win); |
diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c new file mode 100644 index 000000000000..367af0241851 --- /dev/null +++ b/arch/powerpc/sysdev/ppc4xx_msi.c | |||
@@ -0,0 +1,276 @@ | |||
1 | /* | ||
2 | * Adding PCI-E MSI support for PPC4XX SoCs. | ||
3 | * | ||
4 | * Copyright (c) 2010, Applied Micro Circuits Corporation | ||
5 | * Authors: Tirumala R Marri <tmarri@apm.com> | ||
6 | * Feng Kan <fkan@apm.com> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License as | ||
10 | * published by the Free Software Foundation; either version 2 of | ||
11 | * the License, or (at your option) any later version. | ||
12 | * | ||
13 | * This program is distributed in the hope that it will be useful, | ||
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | * GNU General Public License for more details. | ||
17 | * | ||
18 | * You should have received a copy of the GNU General Public License | ||
19 | * along with this program; if not, write to the Free Software | ||
20 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, | ||
21 | * MA 02111-1307 USA | ||
22 | */ | ||
23 | |||
24 | #include <linux/irq.h> | ||
25 | #include <linux/bootmem.h> | ||
26 | #include <linux/pci.h> | ||
27 | #include <linux/msi.h> | ||
28 | #include <linux/of_platform.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <asm/prom.h> | ||
31 | #include <asm/hw_irq.h> | ||
32 | #include <asm/ppc-pci.h> | ||
33 | #include <boot/dcr.h> | ||
34 | #include <asm/dcr-regs.h> | ||
35 | #include <asm/msi_bitmap.h> | ||
36 | |||
37 | #define PEIH_TERMADH 0x00 | ||
38 | #define PEIH_TERMADL 0x08 | ||
39 | #define PEIH_MSIED 0x10 | ||
40 | #define PEIH_MSIMK 0x18 | ||
41 | #define PEIH_MSIASS 0x20 | ||
42 | #define PEIH_FLUSH0 0x30 | ||
43 | #define PEIH_FLUSH1 0x38 | ||
44 | #define PEIH_CNTRST 0x48 | ||
45 | #define NR_MSI_IRQS 4 | ||
46 | |||
47 | struct ppc4xx_msi { | ||
48 | u32 msi_addr_lo; | ||
49 | u32 msi_addr_hi; | ||
50 | void __iomem *msi_regs; | ||
51 | int msi_virqs[NR_MSI_IRQS]; | ||
52 | struct msi_bitmap bitmap; | ||
53 | struct device_node *msi_dev; | ||
54 | }; | ||
55 | |||
56 | static struct ppc4xx_msi ppc4xx_msi; | ||
57 | |||
58 | static int ppc4xx_msi_init_allocator(struct platform_device *dev, | ||
59 | struct ppc4xx_msi *msi_data) | ||
60 | { | ||
61 | int err; | ||
62 | |||
63 | err = msi_bitmap_alloc(&msi_data->bitmap, NR_MSI_IRQS, | ||
64 | dev->dev.of_node); | ||
65 | if (err) | ||
66 | return err; | ||
67 | |||
68 | err = msi_bitmap_reserve_dt_hwirqs(&msi_data->bitmap); | ||
69 | if (err < 0) { | ||
70 | msi_bitmap_free(&msi_data->bitmap); | ||
71 | return err; | ||
72 | } | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) | ||
78 | { | ||
79 | int int_no = -ENOMEM; | ||
80 | unsigned int virq; | ||
81 | struct msi_msg msg; | ||
82 | struct msi_desc *entry; | ||
83 | struct ppc4xx_msi *msi_data = &ppc4xx_msi; | ||
84 | |||
85 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
86 | int_no = msi_bitmap_alloc_hwirqs(&msi_data->bitmap, 1); | ||
87 | if (int_no >= 0) | ||
88 | break; | ||
89 | if (int_no < 0) { | ||
90 | pr_debug("%s: fail allocating msi interrupt\n", | ||
91 | __func__); | ||
92 | } | ||
93 | virq = irq_of_parse_and_map(msi_data->msi_dev, int_no); | ||
94 | if (virq == NO_IRQ) { | ||
95 | dev_err(&dev->dev, "%s: fail mapping irq\n", __func__); | ||
96 | msi_bitmap_free_hwirqs(&msi_data->bitmap, int_no, 1); | ||
97 | return -ENOSPC; | ||
98 | } | ||
99 | dev_dbg(&dev->dev, "%s: virq = %d\n", __func__, virq); | ||
100 | |||
101 | /* Setup msi address space */ | ||
102 | msg.address_hi = msi_data->msi_addr_hi; | ||
103 | msg.address_lo = msi_data->msi_addr_lo; | ||
104 | |||
105 | irq_set_msi_desc(virq, entry); | ||
106 | msg.data = int_no; | ||
107 | write_msi_msg(virq, &msg); | ||
108 | } | ||
109 | return 0; | ||
110 | } | ||
111 | |||
112 | void ppc4xx_teardown_msi_irqs(struct pci_dev *dev) | ||
113 | { | ||
114 | struct msi_desc *entry; | ||
115 | struct ppc4xx_msi *msi_data = &ppc4xx_msi; | ||
116 | |||
117 | dev_dbg(&dev->dev, "PCIE-MSI: tearing down msi irqs\n"); | ||
118 | |||
119 | list_for_each_entry(entry, &dev->msi_list, list) { | ||
120 | if (entry->irq == NO_IRQ) | ||
121 | continue; | ||
122 | irq_set_msi_desc(entry->irq, NULL); | ||
123 | msi_bitmap_free_hwirqs(&msi_data->bitmap, | ||
124 | virq_to_hw(entry->irq), 1); | ||
125 | irq_dispose_mapping(entry->irq); | ||
126 | } | ||
127 | } | ||
128 | |||
129 | static int ppc4xx_msi_check_device(struct pci_dev *pdev, int nvec, int type) | ||
130 | { | ||
131 | dev_dbg(&pdev->dev, "PCIE-MSI:%s called. vec %x type %d\n", | ||
132 | __func__, nvec, type); | ||
133 | if (type == PCI_CAP_ID_MSIX) | ||
134 | pr_debug("ppc4xx msi: MSI-X untested, trying anyway.\n"); | ||
135 | |||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static int ppc4xx_setup_pcieh_hw(struct platform_device *dev, | ||
140 | struct resource res, struct ppc4xx_msi *msi) | ||
141 | { | ||
142 | const u32 *msi_data; | ||
143 | const u32 *msi_mask; | ||
144 | const u32 *sdr_addr; | ||
145 | dma_addr_t msi_phys; | ||
146 | void *msi_virt; | ||
147 | |||
148 | sdr_addr = of_get_property(dev->dev.of_node, "sdr-base", NULL); | ||
149 | if (!sdr_addr) | ||
150 | return -1; | ||
151 | |||
152 | SDR0_WRITE(sdr_addr, (u64)res.start >> 32); /*HIGH addr */ | ||
153 | SDR0_WRITE(sdr_addr + 1, res.start & 0xFFFFFFFF); /* Low addr */ | ||
154 | |||
155 | |||
156 | msi->msi_dev = of_find_node_by_name(NULL, "ppc4xx-msi"); | ||
157 | if (msi->msi_dev) | ||
158 | return -ENODEV; | ||
159 | |||
160 | msi->msi_regs = of_iomap(msi->msi_dev, 0); | ||
161 | if (!msi->msi_regs) { | ||
162 | dev_err(&dev->dev, "of_iomap problem failed\n"); | ||
163 | return -ENOMEM; | ||
164 | } | ||
165 | dev_dbg(&dev->dev, "PCIE-MSI: msi register mapped 0x%x 0x%x\n", | ||
166 | (u32) (msi->msi_regs + PEIH_TERMADH), (u32) (msi->msi_regs)); | ||
167 | |||
168 | msi_virt = dma_alloc_coherent(&dev->dev, 64, &msi_phys, GFP_KERNEL); | ||
169 | msi->msi_addr_hi = 0x0; | ||
170 | msi->msi_addr_lo = (u32) msi_phys; | ||
171 | dev_dbg(&dev->dev, "PCIE-MSI: msi address 0x%x\n", msi->msi_addr_lo); | ||
172 | |||
173 | /* Progam the Interrupt handler Termination addr registers */ | ||
174 | out_be32(msi->msi_regs + PEIH_TERMADH, msi->msi_addr_hi); | ||
175 | out_be32(msi->msi_regs + PEIH_TERMADL, msi->msi_addr_lo); | ||
176 | |||
177 | msi_data = of_get_property(dev->dev.of_node, "msi-data", NULL); | ||
178 | if (!msi_data) | ||
179 | return -1; | ||
180 | msi_mask = of_get_property(dev->dev.of_node, "msi-mask", NULL); | ||
181 | if (!msi_mask) | ||
182 | return -1; | ||
183 | /* Program MSI Expected data and Mask bits */ | ||
184 | out_be32(msi->msi_regs + PEIH_MSIED, *msi_data); | ||
185 | out_be32(msi->msi_regs + PEIH_MSIMK, *msi_mask); | ||
186 | |||
187 | return 0; | ||
188 | } | ||
189 | |||
190 | static int ppc4xx_of_msi_remove(struct platform_device *dev) | ||
191 | { | ||
192 | struct ppc4xx_msi *msi = dev->dev.platform_data; | ||
193 | int i; | ||
194 | int virq; | ||
195 | |||
196 | for (i = 0; i < NR_MSI_IRQS; i++) { | ||
197 | virq = msi->msi_virqs[i]; | ||
198 | if (virq != NO_IRQ) | ||
199 | irq_dispose_mapping(virq); | ||
200 | } | ||
201 | |||
202 | if (msi->bitmap.bitmap) | ||
203 | msi_bitmap_free(&msi->bitmap); | ||
204 | iounmap(msi->msi_regs); | ||
205 | of_node_put(msi->msi_dev); | ||
206 | kfree(msi); | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | |||
211 | static int __devinit ppc4xx_msi_probe(struct platform_device *dev) | ||
212 | { | ||
213 | struct ppc4xx_msi *msi; | ||
214 | struct resource res; | ||
215 | int err = 0; | ||
216 | |||
217 | msi = &ppc4xx_msi;/*keep the msi data for further use*/ | ||
218 | |||
219 | dev_dbg(&dev->dev, "PCIE-MSI: Setting up MSI support...\n"); | ||
220 | |||
221 | msi = kzalloc(sizeof(struct ppc4xx_msi), GFP_KERNEL); | ||
222 | if (!msi) { | ||
223 | dev_err(&dev->dev, "No memory for MSI structure\n"); | ||
224 | return -ENOMEM; | ||
225 | } | ||
226 | dev->dev.platform_data = msi; | ||
227 | |||
228 | /* Get MSI ranges */ | ||
229 | err = of_address_to_resource(dev->dev.of_node, 0, &res); | ||
230 | if (err) { | ||
231 | dev_err(&dev->dev, "%s resource error!\n", | ||
232 | dev->dev.of_node->full_name); | ||
233 | goto error_out; | ||
234 | } | ||
235 | |||
236 | if (ppc4xx_setup_pcieh_hw(dev, res, msi)) | ||
237 | goto error_out; | ||
238 | |||
239 | err = ppc4xx_msi_init_allocator(dev, msi); | ||
240 | if (err) { | ||
241 | dev_err(&dev->dev, "Error allocating MSI bitmap\n"); | ||
242 | goto error_out; | ||
243 | } | ||
244 | |||
245 | ppc_md.setup_msi_irqs = ppc4xx_setup_msi_irqs; | ||
246 | ppc_md.teardown_msi_irqs = ppc4xx_teardown_msi_irqs; | ||
247 | ppc_md.msi_check_device = ppc4xx_msi_check_device; | ||
248 | return err; | ||
249 | |||
250 | error_out: | ||
251 | ppc4xx_of_msi_remove(dev); | ||
252 | return err; | ||
253 | } | ||
254 | static const struct of_device_id ppc4xx_msi_ids[] = { | ||
255 | { | ||
256 | .compatible = "amcc,ppc4xx-msi", | ||
257 | }, | ||
258 | {} | ||
259 | }; | ||
260 | static struct platform_driver ppc4xx_msi_driver = { | ||
261 | .probe = ppc4xx_msi_probe, | ||
262 | .remove = ppc4xx_of_msi_remove, | ||
263 | .driver = { | ||
264 | .name = "ppc4xx-msi", | ||
265 | .owner = THIS_MODULE, | ||
266 | .of_match_table = ppc4xx_msi_ids, | ||
267 | }, | ||
268 | |||
269 | }; | ||
270 | |||
271 | static __init int ppc4xx_msi_init(void) | ||
272 | { | ||
273 | return platform_driver_register(&ppc4xx_msi_driver); | ||
274 | } | ||
275 | |||
276 | subsys_initcall(ppc4xx_msi_init); | ||
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig index ff2d2371b2e9..9fab2aa9c2c8 100644 --- a/arch/s390/Kconfig +++ b/arch/s390/Kconfig | |||
@@ -2,7 +2,7 @@ config MMU | |||
2 | def_bool y | 2 | def_bool y |
3 | 3 | ||
4 | config ZONE_DMA | 4 | config ZONE_DMA |
5 | def_bool y if 64BIT | 5 | def_bool y |
6 | 6 | ||
7 | config LOCKDEP_SUPPORT | 7 | config LOCKDEP_SUPPORT |
8 | def_bool y | 8 | def_bool y |
diff --git a/arch/s390/appldata/appldata_mem.c b/arch/s390/appldata/appldata_mem.c index e43fe7537031..f7d3dc555bdb 100644 --- a/arch/s390/appldata/appldata_mem.c +++ b/arch/s390/appldata/appldata_mem.c | |||
@@ -92,9 +92,7 @@ static void appldata_get_mem_data(void *data) | |||
92 | mem_data->pswpin = ev[PSWPIN]; | 92 | mem_data->pswpin = ev[PSWPIN]; |
93 | mem_data->pswpout = ev[PSWPOUT]; | 93 | mem_data->pswpout = ev[PSWPOUT]; |
94 | mem_data->pgalloc = ev[PGALLOC_NORMAL]; | 94 | mem_data->pgalloc = ev[PGALLOC_NORMAL]; |
95 | #ifdef CONFIG_ZONE_DMA | ||
96 | mem_data->pgalloc += ev[PGALLOC_DMA]; | 95 | mem_data->pgalloc += ev[PGALLOC_DMA]; |
97 | #endif | ||
98 | mem_data->pgfault = ev[PGFAULT]; | 96 | mem_data->pgfault = ev[PGFAULT]; |
99 | mem_data->pgmajfault = ev[PGMAJFAULT]; | 97 | mem_data->pgmajfault = ev[PGMAJFAULT]; |
100 | 98 | ||
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h index e1c8f3a49884..667c6e9f6a34 100644 --- a/arch/s390/include/asm/bitops.h +++ b/arch/s390/include/asm/bitops.h | |||
@@ -621,6 +621,7 @@ static inline unsigned long find_first_zero_bit(const unsigned long *addr, | |||
621 | bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes)); | 621 | bits = __ffz_word(bytes*8, __load_ulong_be(addr, bytes)); |
622 | return (bits < size) ? bits : size; | 622 | return (bits < size) ? bits : size; |
623 | } | 623 | } |
624 | #define find_first_zero_bit find_first_zero_bit | ||
624 | 625 | ||
625 | /** | 626 | /** |
626 | * find_first_bit - find the first set bit in a memory region | 627 | * find_first_bit - find the first set bit in a memory region |
@@ -641,6 +642,7 @@ static inline unsigned long find_first_bit(const unsigned long * addr, | |||
641 | bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes)); | 642 | bits = __ffs_word(bytes*8, __load_ulong_be(addr, bytes)); |
642 | return (bits < size) ? bits : size; | 643 | return (bits < size) ? bits : size; |
643 | } | 644 | } |
645 | #define find_first_bit find_first_bit | ||
644 | 646 | ||
645 | /** | 647 | /** |
646 | * find_next_zero_bit - find the first zero bit in a memory region | 648 | * find_next_zero_bit - find the first zero bit in a memory region |
@@ -677,6 +679,7 @@ static inline int find_next_zero_bit (const unsigned long * addr, | |||
677 | } | 679 | } |
678 | return offset + find_first_zero_bit(p, size); | 680 | return offset + find_first_zero_bit(p, size); |
679 | } | 681 | } |
682 | #define find_next_zero_bit find_next_zero_bit | ||
680 | 683 | ||
681 | /** | 684 | /** |
682 | * find_next_bit - find the first set bit in a memory region | 685 | * find_next_bit - find the first set bit in a memory region |
@@ -713,6 +716,7 @@ static inline int find_next_bit (const unsigned long * addr, | |||
713 | } | 716 | } |
714 | return offset + find_first_bit(p, size); | 717 | return offset + find_first_bit(p, size); |
715 | } | 718 | } |
719 | #define find_next_bit find_next_bit | ||
716 | 720 | ||
717 | /* | 721 | /* |
718 | * Every architecture must define this function. It's the fastest | 722 | * Every architecture must define this function. It's the fastest |
@@ -742,41 +746,6 @@ static inline int sched_find_first_bit(unsigned long *b) | |||
742 | * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 | 746 | * 23 22 21 20 19 18 17 16 31 30 29 28 27 26 25 24 |
743 | */ | 747 | */ |
744 | 748 | ||
745 | static inline void __set_bit_le(unsigned long nr, void *addr) | ||
746 | { | ||
747 | __set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | ||
748 | } | ||
749 | |||
750 | static inline void __clear_bit_le(unsigned long nr, void *addr) | ||
751 | { | ||
752 | __clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | ||
753 | } | ||
754 | |||
755 | static inline int __test_and_set_bit_le(unsigned long nr, void *addr) | ||
756 | { | ||
757 | return __test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | ||
758 | } | ||
759 | |||
760 | static inline int test_and_set_bit_le(unsigned long nr, void *addr) | ||
761 | { | ||
762 | return test_and_set_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | ||
763 | } | ||
764 | |||
765 | static inline int __test_and_clear_bit_le(unsigned long nr, void *addr) | ||
766 | { | ||
767 | return __test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | ||
768 | } | ||
769 | |||
770 | static inline int test_and_clear_bit_le(unsigned long nr, void *addr) | ||
771 | { | ||
772 | return test_and_clear_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | ||
773 | } | ||
774 | |||
775 | static inline int test_bit_le(unsigned long nr, const void *addr) | ||
776 | { | ||
777 | return test_bit(nr ^ (__BITOPS_WORDSIZE - 8), addr); | ||
778 | } | ||
779 | |||
780 | static inline int find_first_zero_bit_le(void *vaddr, unsigned int size) | 749 | static inline int find_first_zero_bit_le(void *vaddr, unsigned int size) |
781 | { | 750 | { |
782 | unsigned long bytes, bits; | 751 | unsigned long bytes, bits; |
@@ -787,6 +756,7 @@ static inline int find_first_zero_bit_le(void *vaddr, unsigned int size) | |||
787 | bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes)); | 756 | bits = __ffz_word(bytes*8, __load_ulong_le(vaddr, bytes)); |
788 | return (bits < size) ? bits : size; | 757 | return (bits < size) ? bits : size; |
789 | } | 758 | } |
759 | #define find_first_zero_bit_le find_first_zero_bit_le | ||
790 | 760 | ||
791 | static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, | 761 | static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, |
792 | unsigned long offset) | 762 | unsigned long offset) |
@@ -816,6 +786,7 @@ static inline int find_next_zero_bit_le(void *vaddr, unsigned long size, | |||
816 | } | 786 | } |
817 | return offset + find_first_zero_bit_le(p, size); | 787 | return offset + find_first_zero_bit_le(p, size); |
818 | } | 788 | } |
789 | #define find_next_zero_bit_le find_next_zero_bit_le | ||
819 | 790 | ||
820 | static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size) | 791 | static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size) |
821 | { | 792 | { |
@@ -827,6 +798,7 @@ static inline unsigned long find_first_bit_le(void *vaddr, unsigned long size) | |||
827 | bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes)); | 798 | bits = __ffs_word(bytes*8, __load_ulong_le(vaddr, bytes)); |
828 | return (bits < size) ? bits : size; | 799 | return (bits < size) ? bits : size; |
829 | } | 800 | } |
801 | #define find_first_bit_le find_first_bit_le | ||
830 | 802 | ||
831 | static inline int find_next_bit_le(void *vaddr, unsigned long size, | 803 | static inline int find_next_bit_le(void *vaddr, unsigned long size, |
832 | unsigned long offset) | 804 | unsigned long offset) |
@@ -856,6 +828,9 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size, | |||
856 | } | 828 | } |
857 | return offset + find_first_bit_le(p, size); | 829 | return offset + find_first_bit_le(p, size); |
858 | } | 830 | } |
831 | #define find_next_bit_le find_next_bit_le | ||
832 | |||
833 | #include <asm-generic/bitops/le.h> | ||
859 | 834 | ||
860 | #define ext2_set_bit_atomic(lock, nr, addr) \ | 835 | #define ext2_set_bit_atomic(lock, nr, addr) \ |
861 | test_and_set_bit_le(nr, addr) | 836 | test_and_set_bit_le(nr, addr) |
diff --git a/arch/s390/include/asm/delay.h b/arch/s390/include/asm/delay.h index 8a096b83f51f..0e3b35f96be1 100644 --- a/arch/s390/include/asm/delay.h +++ b/arch/s390/include/asm/delay.h | |||
@@ -14,10 +14,12 @@ | |||
14 | #ifndef _S390_DELAY_H | 14 | #ifndef _S390_DELAY_H |
15 | #define _S390_DELAY_H | 15 | #define _S390_DELAY_H |
16 | 16 | ||
17 | extern void __udelay(unsigned long long usecs); | 17 | void __ndelay(unsigned long long nsecs); |
18 | extern void udelay_simple(unsigned long long usecs); | 18 | void __udelay(unsigned long long usecs); |
19 | extern void __delay(unsigned long loops); | 19 | void udelay_simple(unsigned long long usecs); |
20 | void __delay(unsigned long loops); | ||
20 | 21 | ||
22 | #define ndelay(n) __ndelay((unsigned long long) (n)) | ||
21 | #define udelay(n) __udelay((unsigned long long) (n)) | 23 | #define udelay(n) __udelay((unsigned long long) (n)) |
22 | #define mdelay(n) __udelay((unsigned long long) (n) * 1000) | 24 | #define mdelay(n) __udelay((unsigned long long) (n) * 1000) |
23 | 25 | ||
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h index 1544b90bd6d6..ba7b01c726a3 100644 --- a/arch/s390/include/asm/irq.h +++ b/arch/s390/include/asm/irq.h | |||
@@ -2,6 +2,7 @@ | |||
2 | #define _ASM_IRQ_H | 2 | #define _ASM_IRQ_H |
3 | 3 | ||
4 | #include <linux/hardirq.h> | 4 | #include <linux/hardirq.h> |
5 | #include <linux/types.h> | ||
5 | 6 | ||
6 | enum interruption_class { | 7 | enum interruption_class { |
7 | EXTERNAL_INTERRUPT, | 8 | EXTERNAL_INTERRUPT, |
@@ -31,4 +32,11 @@ enum interruption_class { | |||
31 | NR_IRQS, | 32 | NR_IRQS, |
32 | }; | 33 | }; |
33 | 34 | ||
35 | typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long); | ||
36 | |||
37 | int register_external_interrupt(u16 code, ext_int_handler_t handler); | ||
38 | int unregister_external_interrupt(u16 code, ext_int_handler_t handler); | ||
39 | void service_subclass_irq_register(void); | ||
40 | void service_subclass_irq_unregister(void); | ||
41 | |||
34 | #endif /* _ASM_IRQ_H */ | 42 | #endif /* _ASM_IRQ_H */ |
diff --git a/arch/s390/include/asm/s390_ext.h b/arch/s390/include/asm/s390_ext.h deleted file mode 100644 index 080876d5f196..000000000000 --- a/arch/s390/include/asm/s390_ext.h +++ /dev/null | |||
@@ -1,17 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 1999,2010 | ||
3 | * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, | ||
4 | * Martin Schwidefsky <schwidefsky@de.ibm.com>, | ||
5 | */ | ||
6 | |||
7 | #ifndef _S390_EXTINT_H | ||
8 | #define _S390_EXTINT_H | ||
9 | |||
10 | #include <linux/types.h> | ||
11 | |||
12 | typedef void (*ext_int_handler_t)(unsigned int, unsigned int, unsigned long); | ||
13 | |||
14 | int register_external_interrupt(__u16 code, ext_int_handler_t handler); | ||
15 | int unregister_external_interrupt(__u16 code, ext_int_handler_t handler); | ||
16 | |||
17 | #endif /* _S390_EXTINT_H */ | ||
diff --git a/arch/s390/include/asm/suspend.h b/arch/s390/include/asm/suspend.h deleted file mode 100644 index dc75c616eafe..000000000000 --- a/arch/s390/include/asm/suspend.h +++ /dev/null | |||
@@ -1,10 +0,0 @@ | |||
1 | #ifndef __ASM_S390_SUSPEND_H | ||
2 | #define __ASM_S390_SUSPEND_H | ||
3 | |||
4 | static inline int arch_prepare_suspend(void) | ||
5 | { | ||
6 | return 0; | ||
7 | } | ||
8 | |||
9 | #endif | ||
10 | |||
diff --git a/arch/s390/include/asm/topology.h b/arch/s390/include/asm/topology.h index c5338834ddbd..005d77d8ae2a 100644 --- a/arch/s390/include/asm/topology.h +++ b/arch/s390/include/asm/topology.h | |||
@@ -7,7 +7,7 @@ | |||
7 | extern unsigned char cpu_core_id[NR_CPUS]; | 7 | extern unsigned char cpu_core_id[NR_CPUS]; |
8 | extern cpumask_t cpu_core_map[NR_CPUS]; | 8 | extern cpumask_t cpu_core_map[NR_CPUS]; |
9 | 9 | ||
10 | static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | 10 | static inline const struct cpumask *cpu_coregroup_mask(int cpu) |
11 | { | 11 | { |
12 | return &cpu_core_map[cpu]; | 12 | return &cpu_core_map[cpu]; |
13 | } | 13 | } |
@@ -21,7 +21,7 @@ static inline const struct cpumask *cpu_coregroup_mask(unsigned int cpu) | |||
21 | extern unsigned char cpu_book_id[NR_CPUS]; | 21 | extern unsigned char cpu_book_id[NR_CPUS]; |
22 | extern cpumask_t cpu_book_map[NR_CPUS]; | 22 | extern cpumask_t cpu_book_map[NR_CPUS]; |
23 | 23 | ||
24 | static inline const struct cpumask *cpu_book_mask(unsigned int cpu) | 24 | static inline const struct cpumask *cpu_book_mask(int cpu) |
25 | { | 25 | { |
26 | return &cpu_book_map[cpu]; | 26 | return &cpu_book_map[cpu]; |
27 | } | 27 | } |
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h index 2d9ea11f919a..2b23885e81e9 100644 --- a/arch/s390/include/asm/uaccess.h +++ b/arch/s390/include/asm/uaccess.h | |||
@@ -49,12 +49,13 @@ | |||
49 | 49 | ||
50 | #define segment_eq(a,b) ((a).ar4 == (b).ar4) | 50 | #define segment_eq(a,b) ((a).ar4 == (b).ar4) |
51 | 51 | ||
52 | #define __access_ok(addr, size) \ | ||
53 | ({ \ | ||
54 | __chk_user_ptr(addr); \ | ||
55 | 1; \ | ||
56 | }) | ||
52 | 57 | ||
53 | static inline int __access_ok(const void __user *addr, unsigned long size) | 58 | #define access_ok(type, addr, size) __access_ok(addr, size) |
54 | { | ||
55 | return 1; | ||
56 | } | ||
57 | #define access_ok(type,addr,size) __access_ok(addr,size) | ||
58 | 59 | ||
59 | /* | 60 | /* |
60 | * The exception table consists of pairs of addresses: the first is the | 61 | * The exception table consists of pairs of addresses: the first is the |
diff --git a/arch/s390/kernel/Makefile b/arch/s390/kernel/Makefile index 5ff15dacb571..df3732249baa 100644 --- a/arch/s390/kernel/Makefile +++ b/arch/s390/kernel/Makefile | |||
@@ -20,10 +20,10 @@ CFLAGS_ptrace.o += -DUTS_MACHINE='"$(UTS_MACHINE)"' | |||
20 | 20 | ||
21 | CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w | 21 | CFLAGS_sysinfo.o += -Iinclude/math-emu -Iarch/s390/math-emu -w |
22 | 22 | ||
23 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o \ | 23 | obj-y := bitmap.o traps.o time.o process.o base.o early.o setup.o vtime.o \ |
24 | processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o \ | 24 | processor.o sys_s390.o ptrace.o signal.o cpcmd.o ebcdic.o nmi.o \ |
25 | s390_ext.o debug.o irq.o ipl.o dis.o diag.o mem_detect.o \ | 25 | debug.o irq.o ipl.o dis.o diag.o mem_detect.o sclp.o vdso.o \ |
26 | vdso.o vtime.o sysinfo.o nmi.o sclp.o jump_label.o | 26 | sysinfo.o jump_label.o |
27 | 27 | ||
28 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) | 28 | obj-y += $(if $(CONFIG_64BIT),entry64.o,entry.o) |
29 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) | 29 | obj-y += $(if $(CONFIG_64BIT),reipl64.o,reipl.o) |
diff --git a/arch/s390/kernel/dis.c b/arch/s390/kernel/dis.c index 3d4a78fc1adc..1ca3d1d6a86c 100644 --- a/arch/s390/kernel/dis.c +++ b/arch/s390/kernel/dis.c | |||
@@ -30,9 +30,9 @@ | |||
30 | #include <asm/atomic.h> | 30 | #include <asm/atomic.h> |
31 | #include <asm/mathemu.h> | 31 | #include <asm/mathemu.h> |
32 | #include <asm/cpcmd.h> | 32 | #include <asm/cpcmd.h> |
33 | #include <asm/s390_ext.h> | ||
34 | #include <asm/lowcore.h> | 33 | #include <asm/lowcore.h> |
35 | #include <asm/debug.h> | 34 | #include <asm/debug.h> |
35 | #include <asm/irq.h> | ||
36 | 36 | ||
37 | #ifndef CONFIG_64BIT | 37 | #ifndef CONFIG_64BIT |
38 | #define ONELONG "%08lx: " | 38 | #define ONELONG "%08lx: " |
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c index e204f9597aaf..e3264f6a9720 100644 --- a/arch/s390/kernel/irq.c +++ b/arch/s390/kernel/irq.c | |||
@@ -1,19 +1,28 @@ | |||
1 | /* | 1 | /* |
2 | * Copyright IBM Corp. 2004,2010 | 2 | * Copyright IBM Corp. 2004,2011 |
3 | * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), | 3 | * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>, |
4 | * Thomas Spatzier (tspat@de.ibm.com) | 4 | * Holger Smolinski <Holger.Smolinski@de.ibm.com>, |
5 | * Thomas Spatzier <tspat@de.ibm.com>, | ||
5 | * | 6 | * |
6 | * This file contains interrupt related functions. | 7 | * This file contains interrupt related functions. |
7 | */ | 8 | */ |
8 | 9 | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/kernel_stat.h> | 10 | #include <linux/kernel_stat.h> |
12 | #include <linux/interrupt.h> | 11 | #include <linux/interrupt.h> |
13 | #include <linux/seq_file.h> | 12 | #include <linux/seq_file.h> |
14 | #include <linux/cpu.h> | ||
15 | #include <linux/proc_fs.h> | 13 | #include <linux/proc_fs.h> |
16 | #include <linux/profile.h> | 14 | #include <linux/profile.h> |
15 | #include <linux/module.h> | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/ftrace.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/cpu.h> | ||
21 | #include <asm/irq_regs.h> | ||
22 | #include <asm/cputime.h> | ||
23 | #include <asm/lowcore.h> | ||
24 | #include <asm/irq.h> | ||
25 | #include "entry.h" | ||
17 | 26 | ||
18 | struct irq_class { | 27 | struct irq_class { |
19 | char *name; | 28 | char *name; |
@@ -82,8 +91,7 @@ int show_interrupts(struct seq_file *p, void *v) | |||
82 | * For compatibilty only. S/390 specific setup of interrupts et al. is done | 91 | * For compatibilty only. S/390 specific setup of interrupts et al. is done |
83 | * much later in init_channel_subsystem(). | 92 | * much later in init_channel_subsystem(). |
84 | */ | 93 | */ |
85 | void __init | 94 | void __init init_IRQ(void) |
86 | init_IRQ(void) | ||
87 | { | 95 | { |
88 | /* nothing... */ | 96 | /* nothing... */ |
89 | } | 97 | } |
@@ -134,3 +142,116 @@ void init_irq_proc(void) | |||
134 | create_prof_cpu_mask(root_irq_dir); | 142 | create_prof_cpu_mask(root_irq_dir); |
135 | } | 143 | } |
136 | #endif | 144 | #endif |
145 | |||
146 | /* | ||
147 | * ext_int_hash[index] is the start of the list for all external interrupts | ||
148 | * that hash to this index. With the current set of external interrupts | ||
149 | * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000 | ||
150 | * iucv and 0x2603 pfault) this is always the first element. | ||
151 | */ | ||
152 | |||
153 | struct ext_int_info { | ||
154 | struct ext_int_info *next; | ||
155 | ext_int_handler_t handler; | ||
156 | u16 code; | ||
157 | }; | ||
158 | |||
159 | static struct ext_int_info *ext_int_hash[256]; | ||
160 | |||
161 | static inline int ext_hash(u16 code) | ||
162 | { | ||
163 | return (code + (code >> 9)) & 0xff; | ||
164 | } | ||
165 | |||
166 | int register_external_interrupt(u16 code, ext_int_handler_t handler) | ||
167 | { | ||
168 | struct ext_int_info *p; | ||
169 | int index; | ||
170 | |||
171 | p = kmalloc(sizeof(*p), GFP_ATOMIC); | ||
172 | if (!p) | ||
173 | return -ENOMEM; | ||
174 | p->code = code; | ||
175 | p->handler = handler; | ||
176 | index = ext_hash(code); | ||
177 | p->next = ext_int_hash[index]; | ||
178 | ext_int_hash[index] = p; | ||
179 | return 0; | ||
180 | } | ||
181 | EXPORT_SYMBOL(register_external_interrupt); | ||
182 | |||
183 | int unregister_external_interrupt(u16 code, ext_int_handler_t handler) | ||
184 | { | ||
185 | struct ext_int_info *p, *q; | ||
186 | int index; | ||
187 | |||
188 | index = ext_hash(code); | ||
189 | q = NULL; | ||
190 | p = ext_int_hash[index]; | ||
191 | while (p) { | ||
192 | if (p->code == code && p->handler == handler) | ||
193 | break; | ||
194 | q = p; | ||
195 | p = p->next; | ||
196 | } | ||
197 | if (!p) | ||
198 | return -ENOENT; | ||
199 | if (q) | ||
200 | q->next = p->next; | ||
201 | else | ||
202 | ext_int_hash[index] = p->next; | ||
203 | kfree(p); | ||
204 | return 0; | ||
205 | } | ||
206 | EXPORT_SYMBOL(unregister_external_interrupt); | ||
207 | |||
208 | void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, | ||
209 | unsigned int param32, unsigned long param64) | ||
210 | { | ||
211 | struct pt_regs *old_regs; | ||
212 | unsigned short code; | ||
213 | struct ext_int_info *p; | ||
214 | int index; | ||
215 | |||
216 | code = (unsigned short) ext_int_code; | ||
217 | old_regs = set_irq_regs(regs); | ||
218 | s390_idle_check(regs, S390_lowcore.int_clock, | ||
219 | S390_lowcore.async_enter_timer); | ||
220 | irq_enter(); | ||
221 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) | ||
222 | /* Serve timer interrupts first. */ | ||
223 | clock_comparator_work(); | ||
224 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; | ||
225 | if (code != 0x1004) | ||
226 | __get_cpu_var(s390_idle).nohz_delay = 1; | ||
227 | index = ext_hash(code); | ||
228 | for (p = ext_int_hash[index]; p; p = p->next) { | ||
229 | if (likely(p->code == code)) | ||
230 | p->handler(ext_int_code, param32, param64); | ||
231 | } | ||
232 | irq_exit(); | ||
233 | set_irq_regs(old_regs); | ||
234 | } | ||
235 | |||
236 | static DEFINE_SPINLOCK(sc_irq_lock); | ||
237 | static int sc_irq_refcount; | ||
238 | |||
239 | void service_subclass_irq_register(void) | ||
240 | { | ||
241 | spin_lock(&sc_irq_lock); | ||
242 | if (!sc_irq_refcount) | ||
243 | ctl_set_bit(0, 9); | ||
244 | sc_irq_refcount++; | ||
245 | spin_unlock(&sc_irq_lock); | ||
246 | } | ||
247 | EXPORT_SYMBOL(service_subclass_irq_register); | ||
248 | |||
249 | void service_subclass_irq_unregister(void) | ||
250 | { | ||
251 | spin_lock(&sc_irq_lock); | ||
252 | sc_irq_refcount--; | ||
253 | if (!sc_irq_refcount) | ||
254 | ctl_clear_bit(0, 9); | ||
255 | spin_unlock(&sc_irq_lock); | ||
256 | } | ||
257 | EXPORT_SYMBOL(service_subclass_irq_unregister); | ||
diff --git a/arch/s390/kernel/s390_ext.c b/arch/s390/kernel/s390_ext.c deleted file mode 100644 index 185029919c4d..000000000000 --- a/arch/s390/kernel/s390_ext.c +++ /dev/null | |||
@@ -1,108 +0,0 @@ | |||
1 | /* | ||
2 | * Copyright IBM Corp. 1999,2010 | ||
3 | * Author(s): Holger Smolinski <Holger.Smolinski@de.ibm.com>, | ||
4 | * Martin Schwidefsky <schwidefsky@de.ibm.com>, | ||
5 | */ | ||
6 | |||
7 | #include <linux/kernel_stat.h> | ||
8 | #include <linux/interrupt.h> | ||
9 | #include <linux/module.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/ftrace.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/slab.h> | ||
14 | #include <asm/s390_ext.h> | ||
15 | #include <asm/irq_regs.h> | ||
16 | #include <asm/cputime.h> | ||
17 | #include <asm/lowcore.h> | ||
18 | #include <asm/irq.h> | ||
19 | #include "entry.h" | ||
20 | |||
21 | struct ext_int_info { | ||
22 | struct ext_int_info *next; | ||
23 | ext_int_handler_t handler; | ||
24 | __u16 code; | ||
25 | }; | ||
26 | |||
27 | /* | ||
28 | * ext_int_hash[index] is the start of the list for all external interrupts | ||
29 | * that hash to this index. With the current set of external interrupts | ||
30 | * (0x1202 external call, 0x1004 cpu timer, 0x2401 hwc console, 0x4000 | ||
31 | * iucv and 0x2603 pfault) this is always the first element. | ||
32 | */ | ||
33 | static struct ext_int_info *ext_int_hash[256]; | ||
34 | |||
35 | static inline int ext_hash(__u16 code) | ||
36 | { | ||
37 | return (code + (code >> 9)) & 0xff; | ||
38 | } | ||
39 | |||
40 | int register_external_interrupt(__u16 code, ext_int_handler_t handler) | ||
41 | { | ||
42 | struct ext_int_info *p; | ||
43 | int index; | ||
44 | |||
45 | p = kmalloc(sizeof(*p), GFP_ATOMIC); | ||
46 | if (!p) | ||
47 | return -ENOMEM; | ||
48 | p->code = code; | ||
49 | p->handler = handler; | ||
50 | index = ext_hash(code); | ||
51 | p->next = ext_int_hash[index]; | ||
52 | ext_int_hash[index] = p; | ||
53 | return 0; | ||
54 | } | ||
55 | EXPORT_SYMBOL(register_external_interrupt); | ||
56 | |||
57 | int unregister_external_interrupt(__u16 code, ext_int_handler_t handler) | ||
58 | { | ||
59 | struct ext_int_info *p, *q; | ||
60 | int index; | ||
61 | |||
62 | index = ext_hash(code); | ||
63 | q = NULL; | ||
64 | p = ext_int_hash[index]; | ||
65 | while (p) { | ||
66 | if (p->code == code && p->handler == handler) | ||
67 | break; | ||
68 | q = p; | ||
69 | p = p->next; | ||
70 | } | ||
71 | if (!p) | ||
72 | return -ENOENT; | ||
73 | if (q) | ||
74 | q->next = p->next; | ||
75 | else | ||
76 | ext_int_hash[index] = p->next; | ||
77 | kfree(p); | ||
78 | return 0; | ||
79 | } | ||
80 | EXPORT_SYMBOL(unregister_external_interrupt); | ||
81 | |||
82 | void __irq_entry do_extint(struct pt_regs *regs, unsigned int ext_int_code, | ||
83 | unsigned int param32, unsigned long param64) | ||
84 | { | ||
85 | struct pt_regs *old_regs; | ||
86 | unsigned short code; | ||
87 | struct ext_int_info *p; | ||
88 | int index; | ||
89 | |||
90 | code = (unsigned short) ext_int_code; | ||
91 | old_regs = set_irq_regs(regs); | ||
92 | s390_idle_check(regs, S390_lowcore.int_clock, | ||
93 | S390_lowcore.async_enter_timer); | ||
94 | irq_enter(); | ||
95 | if (S390_lowcore.int_clock >= S390_lowcore.clock_comparator) | ||
96 | /* Serve timer interrupts first. */ | ||
97 | clock_comparator_work(); | ||
98 | kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; | ||
99 | if (code != 0x1004) | ||
100 | __get_cpu_var(s390_idle).nohz_delay = 1; | ||
101 | index = ext_hash(code); | ||
102 | for (p = ext_int_hash[index]; p; p = p->next) { | ||
103 | if (likely(p->code == code)) | ||
104 | p->handler(ext_int_code, param32, param64); | ||
105 | } | ||
106 | irq_exit(); | ||
107 | set_irq_regs(old_regs); | ||
108 | } | ||
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c index f8e85ecbc459..52420d2785b3 100644 --- a/arch/s390/kernel/smp.c +++ b/arch/s390/kernel/smp.c | |||
@@ -44,7 +44,6 @@ | |||
44 | #include <asm/sigp.h> | 44 | #include <asm/sigp.h> |
45 | #include <asm/pgalloc.h> | 45 | #include <asm/pgalloc.h> |
46 | #include <asm/irq.h> | 46 | #include <asm/irq.h> |
47 | #include <asm/s390_ext.h> | ||
48 | #include <asm/cpcmd.h> | 47 | #include <asm/cpcmd.h> |
49 | #include <asm/tlbflush.h> | 48 | #include <asm/tlbflush.h> |
50 | #include <asm/timer.h> | 49 | #include <asm/timer.h> |
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c index a59557f1fb5f..dff933065ab6 100644 --- a/arch/s390/kernel/time.c +++ b/arch/s390/kernel/time.c | |||
@@ -41,7 +41,6 @@ | |||
41 | #include <linux/kprobes.h> | 41 | #include <linux/kprobes.h> |
42 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
43 | #include <asm/delay.h> | 43 | #include <asm/delay.h> |
44 | #include <asm/s390_ext.h> | ||
45 | #include <asm/div64.h> | 44 | #include <asm/div64.h> |
46 | #include <asm/vdso.h> | 45 | #include <asm/vdso.h> |
47 | #include <asm/irq.h> | 46 | #include <asm/irq.h> |
diff --git a/arch/s390/kernel/topology.c b/arch/s390/kernel/topology.c index 2eafb8c7a746..0cd340b72632 100644 --- a/arch/s390/kernel/topology.c +++ b/arch/s390/kernel/topology.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/smp.h> | 17 | #include <linux/smp.h> |
18 | #include <linux/cpuset.h> | 18 | #include <linux/cpuset.h> |
19 | #include <asm/delay.h> | 19 | #include <asm/delay.h> |
20 | #include <asm/s390_ext.h> | ||
21 | 20 | ||
22 | #define PTF_HORIZONTAL (0UL) | 21 | #define PTF_HORIZONTAL (0UL) |
23 | #define PTF_VERTICAL (1UL) | 22 | #define PTF_VERTICAL (1UL) |
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c index b5a4a739b477..a65d2e82f61d 100644 --- a/arch/s390/kernel/traps.c +++ b/arch/s390/kernel/traps.c | |||
@@ -39,7 +39,6 @@ | |||
39 | #include <asm/atomic.h> | 39 | #include <asm/atomic.h> |
40 | #include <asm/mathemu.h> | 40 | #include <asm/mathemu.h> |
41 | #include <asm/cpcmd.h> | 41 | #include <asm/cpcmd.h> |
42 | #include <asm/s390_ext.h> | ||
43 | #include <asm/lowcore.h> | 42 | #include <asm/lowcore.h> |
44 | #include <asm/debug.h> | 43 | #include <asm/debug.h> |
45 | #include "entry.h" | 44 | #include "entry.h" |
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c index 5e8ead4b4aba..2d6228f60cd6 100644 --- a/arch/s390/kernel/vtime.c +++ b/arch/s390/kernel/vtime.c | |||
@@ -22,10 +22,10 @@ | |||
22 | #include <linux/cpu.h> | 22 | #include <linux/cpu.h> |
23 | #include <linux/kprobes.h> | 23 | #include <linux/kprobes.h> |
24 | 24 | ||
25 | #include <asm/s390_ext.h> | ||
26 | #include <asm/timer.h> | 25 | #include <asm/timer.h> |
27 | #include <asm/irq_regs.h> | 26 | #include <asm/irq_regs.h> |
28 | #include <asm/cputime.h> | 27 | #include <asm/cputime.h> |
28 | #include <asm/irq.h> | ||
29 | 29 | ||
30 | static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); | 30 | static DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer); |
31 | 31 | ||
diff --git a/arch/s390/lib/delay.c b/arch/s390/lib/delay.c index 0f53110e1d09..a65229d91c92 100644 --- a/arch/s390/lib/delay.c +++ b/arch/s390/lib/delay.c | |||
@@ -12,6 +12,7 @@ | |||
12 | #include <linux/module.h> | 12 | #include <linux/module.h> |
13 | #include <linux/irqflags.h> | 13 | #include <linux/irqflags.h> |
14 | #include <linux/interrupt.h> | 14 | #include <linux/interrupt.h> |
15 | #include <asm/div64.h> | ||
15 | 16 | ||
16 | void __delay(unsigned long loops) | 17 | void __delay(unsigned long loops) |
17 | { | 18 | { |
@@ -116,3 +117,17 @@ void udelay_simple(unsigned long long usecs) | |||
116 | while (get_clock() < end) | 117 | while (get_clock() < end) |
117 | cpu_relax(); | 118 | cpu_relax(); |
118 | } | 119 | } |
120 | |||
121 | void __ndelay(unsigned long long nsecs) | ||
122 | { | ||
123 | u64 end; | ||
124 | |||
125 | nsecs <<= 9; | ||
126 | do_div(nsecs, 125); | ||
127 | end = get_clock() + nsecs; | ||
128 | if (nsecs & ~0xfffUL) | ||
129 | __udelay(nsecs >> 12); | ||
130 | while (get_clock() < end) | ||
131 | barrier(); | ||
132 | } | ||
133 | EXPORT_SYMBOL(__ndelay); | ||
diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index a0f9e730f26a..fe103e891e7a 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c | |||
@@ -34,7 +34,7 @@ | |||
34 | #include <asm/asm-offsets.h> | 34 | #include <asm/asm-offsets.h> |
35 | #include <asm/system.h> | 35 | #include <asm/system.h> |
36 | #include <asm/pgtable.h> | 36 | #include <asm/pgtable.h> |
37 | #include <asm/s390_ext.h> | 37 | #include <asm/irq.h> |
38 | #include <asm/mmu_context.h> | 38 | #include <asm/mmu_context.h> |
39 | #include <asm/compat.h> | 39 | #include <asm/compat.h> |
40 | #include "../kernel/entry.h" | 40 | #include "../kernel/entry.h" |
@@ -245,9 +245,12 @@ static noinline void do_fault_error(struct pt_regs *regs, long int_code, | |||
245 | do_no_context(regs, int_code, trans_exc_code); | 245 | do_no_context(regs, int_code, trans_exc_code); |
246 | break; | 246 | break; |
247 | default: /* fault & VM_FAULT_ERROR */ | 247 | default: /* fault & VM_FAULT_ERROR */ |
248 | if (fault & VM_FAULT_OOM) | 248 | if (fault & VM_FAULT_OOM) { |
249 | pagefault_out_of_memory(); | 249 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) |
250 | else if (fault & VM_FAULT_SIGBUS) { | 250 | do_no_context(regs, int_code, trans_exc_code); |
251 | else | ||
252 | pagefault_out_of_memory(); | ||
253 | } else if (fault & VM_FAULT_SIGBUS) { | ||
251 | /* Kernel mode? Handle exceptions or die */ | 254 | /* Kernel mode? Handle exceptions or die */ |
252 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) | 255 | if (!(regs->psw.mask & PSW_MASK_PSTATE)) |
253 | do_no_context(regs, int_code, trans_exc_code); | 256 | do_no_context(regs, int_code, trans_exc_code); |
@@ -277,7 +280,8 @@ static inline int do_exception(struct pt_regs *regs, int access, | |||
277 | struct mm_struct *mm; | 280 | struct mm_struct *mm; |
278 | struct vm_area_struct *vma; | 281 | struct vm_area_struct *vma; |
279 | unsigned long address; | 282 | unsigned long address; |
280 | int fault, write; | 283 | unsigned int flags; |
284 | int fault; | ||
281 | 285 | ||
282 | if (notify_page_fault(regs)) | 286 | if (notify_page_fault(regs)) |
283 | return 0; | 287 | return 0; |
@@ -296,6 +300,10 @@ static inline int do_exception(struct pt_regs *regs, int access, | |||
296 | 300 | ||
297 | address = trans_exc_code & __FAIL_ADDR_MASK; | 301 | address = trans_exc_code & __FAIL_ADDR_MASK; |
298 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); | 302 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, address); |
303 | flags = FAULT_FLAG_ALLOW_RETRY; | ||
304 | if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) | ||
305 | flags |= FAULT_FLAG_WRITE; | ||
306 | retry: | ||
299 | down_read(&mm->mmap_sem); | 307 | down_read(&mm->mmap_sem); |
300 | 308 | ||
301 | fault = VM_FAULT_BADMAP; | 309 | fault = VM_FAULT_BADMAP; |
@@ -325,21 +333,31 @@ static inline int do_exception(struct pt_regs *regs, int access, | |||
325 | * make sure we exit gracefully rather than endlessly redo | 333 | * make sure we exit gracefully rather than endlessly redo |
326 | * the fault. | 334 | * the fault. |
327 | */ | 335 | */ |
328 | write = (access == VM_WRITE || | 336 | fault = handle_mm_fault(mm, vma, address, flags); |
329 | (trans_exc_code & store_indication) == 0x400) ? | ||
330 | FAULT_FLAG_WRITE : 0; | ||
331 | fault = handle_mm_fault(mm, vma, address, write); | ||
332 | if (unlikely(fault & VM_FAULT_ERROR)) | 337 | if (unlikely(fault & VM_FAULT_ERROR)) |
333 | goto out_up; | 338 | goto out_up; |
334 | 339 | ||
335 | if (fault & VM_FAULT_MAJOR) { | 340 | /* |
336 | tsk->maj_flt++; | 341 | * Major/minor page fault accounting is only done on the |
337 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | 342 | * initial attempt. If we go through a retry, it is extremely |
338 | regs, address); | 343 | * likely that the page will be found in page cache at that point. |
339 | } else { | 344 | */ |
340 | tsk->min_flt++; | 345 | if (flags & FAULT_FLAG_ALLOW_RETRY) { |
341 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | 346 | if (fault & VM_FAULT_MAJOR) { |
342 | regs, address); | 347 | tsk->maj_flt++; |
348 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, | ||
349 | regs, address); | ||
350 | } else { | ||
351 | tsk->min_flt++; | ||
352 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, | ||
353 | regs, address); | ||
354 | } | ||
355 | if (fault & VM_FAULT_RETRY) { | ||
356 | /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk | ||
357 | * of starvation. */ | ||
358 | flags &= ~FAULT_FLAG_ALLOW_RETRY; | ||
359 | goto retry; | ||
360 | } | ||
343 | } | 361 | } |
344 | /* | 362 | /* |
345 | * The instruction that caused the program check will | 363 | * The instruction that caused the program check will |
@@ -429,10 +447,9 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) | |||
429 | access = write ? VM_WRITE : VM_READ; | 447 | access = write ? VM_WRITE : VM_READ; |
430 | fault = do_exception(®s, access, uaddr | 2); | 448 | fault = do_exception(®s, access, uaddr | 2); |
431 | if (unlikely(fault)) { | 449 | if (unlikely(fault)) { |
432 | if (fault & VM_FAULT_OOM) { | 450 | if (fault & VM_FAULT_OOM) |
433 | pagefault_out_of_memory(); | 451 | return -EFAULT; |
434 | fault = 0; | 452 | else if (fault & VM_FAULT_SIGBUS) |
435 | } else if (fault & VM_FAULT_SIGBUS) | ||
436 | do_sigbus(®s, pgm_int_code, uaddr); | 453 | do_sigbus(®s, pgm_int_code, uaddr); |
437 | } | 454 | } |
438 | return fault ? -EFAULT : 0; | 455 | return fault ? -EFAULT : 0; |
@@ -485,7 +502,6 @@ int pfault_init(void) | |||
485 | "2:\n" | 502 | "2:\n" |
486 | EX_TABLE(0b,1b) | 503 | EX_TABLE(0b,1b) |
487 | : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); | 504 | : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); |
488 | __ctl_set_bit(0, 9); | ||
489 | return rc; | 505 | return rc; |
490 | } | 506 | } |
491 | 507 | ||
@@ -500,7 +516,6 @@ void pfault_fini(void) | |||
500 | 516 | ||
501 | if (!MACHINE_IS_VM || pfault_disable) | 517 | if (!MACHINE_IS_VM || pfault_disable) |
502 | return; | 518 | return; |
503 | __ctl_clear_bit(0,9); | ||
504 | asm volatile( | 519 | asm volatile( |
505 | " diag %0,0,0x258\n" | 520 | " diag %0,0,0x258\n" |
506 | "0:\n" | 521 | "0:\n" |
@@ -615,6 +630,7 @@ static int __init pfault_irq_init(void) | |||
615 | rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; | 630 | rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; |
616 | if (rc) | 631 | if (rc) |
617 | goto out_pfault; | 632 | goto out_pfault; |
633 | service_subclass_irq_register(); | ||
618 | hotcpu_notifier(pfault_cpu_notify, 0); | 634 | hotcpu_notifier(pfault_cpu_notify, 0); |
619 | return 0; | 635 | return 0; |
620 | 636 | ||
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index dfefc2171691..59b663109d90 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c | |||
@@ -119,9 +119,7 @@ void __init paging_init(void) | |||
119 | sparse_memory_present_with_active_regions(MAX_NUMNODES); | 119 | sparse_memory_present_with_active_regions(MAX_NUMNODES); |
120 | sparse_init(); | 120 | sparse_init(); |
121 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); | 121 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
122 | #ifdef CONFIG_ZONE_DMA | ||
123 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); | 122 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(MAX_DMA_ADDRESS); |
124 | #endif | ||
125 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | 123 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
126 | free_area_init_nodes(max_zone_pfns); | 124 | free_area_init_nodes(max_zone_pfns); |
127 | fault_init(); | 125 | fault_init(); |
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c index 053caa0fd276..4552ce40c81a 100644 --- a/arch/s390/oprofile/hwsampler.c +++ b/arch/s390/oprofile/hwsampler.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/oprofile.h> | 19 | #include <linux/oprofile.h> |
20 | 20 | ||
21 | #include <asm/lowcore.h> | 21 | #include <asm/lowcore.h> |
22 | #include <asm/s390_ext.h> | 22 | #include <asm/irq.h> |
23 | 23 | ||
24 | #include "hwsampler.h" | 24 | #include "hwsampler.h" |
25 | 25 | ||
@@ -580,7 +580,7 @@ static int hws_cpu_callback(struct notifier_block *nfb, | |||
580 | { | 580 | { |
581 | /* We do not have sampler space available for all possible CPUs. | 581 | /* We do not have sampler space available for all possible CPUs. |
582 | All CPUs should be online when hw sampling is activated. */ | 582 | All CPUs should be online when hw sampling is activated. */ |
583 | return NOTIFY_BAD; | 583 | return (hws_state <= HWS_DEALLOCATED) ? NOTIFY_OK : NOTIFY_BAD; |
584 | } | 584 | } |
585 | 585 | ||
586 | static struct notifier_block hws_cpu_notifier = { | 586 | static struct notifier_block hws_cpu_notifier = { |
diff --git a/arch/score/Kconfig b/arch/score/Kconfig index e73bc781cc14..288add8d168f 100644 --- a/arch/score/Kconfig +++ b/arch/score/Kconfig | |||
@@ -43,9 +43,6 @@ config NO_DMA | |||
43 | config RWSEM_GENERIC_SPINLOCK | 43 | config RWSEM_GENERIC_SPINLOCK |
44 | def_bool y | 44 | def_bool y |
45 | 45 | ||
46 | config GENERIC_FIND_NEXT_BIT | ||
47 | def_bool y | ||
48 | |||
49 | config GENERIC_HWEIGHT | 46 | config GENERIC_HWEIGHT |
50 | def_bool y | 47 | def_bool y |
51 | 48 | ||
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig index b44e37753b9a..74495a5ea027 100644 --- a/arch/sh/Kconfig +++ b/arch/sh/Kconfig | |||
@@ -71,12 +71,6 @@ config GENERIC_CSUM | |||
71 | def_bool y | 71 | def_bool y |
72 | depends on SUPERH64 | 72 | depends on SUPERH64 |
73 | 73 | ||
74 | config GENERIC_FIND_NEXT_BIT | ||
75 | def_bool y | ||
76 | |||
77 | config GENERIC_FIND_BIT_LE | ||
78 | def_bool y | ||
79 | |||
80 | config GENERIC_HWEIGHT | 74 | config GENERIC_HWEIGHT |
81 | def_bool y | 75 | def_bool y |
82 | 76 | ||
diff --git a/arch/sh/configs/apsh4ad0a_defconfig b/arch/sh/configs/apsh4ad0a_defconfig index 77ec0e7b8ddf..e7583484cc07 100644 --- a/arch/sh/configs/apsh4ad0a_defconfig +++ b/arch/sh/configs/apsh4ad0a_defconfig | |||
@@ -7,7 +7,6 @@ CONFIG_IKCONFIG=y | |||
7 | CONFIG_IKCONFIG_PROC=y | 7 | CONFIG_IKCONFIG_PROC=y |
8 | CONFIG_LOG_BUF_SHIFT=14 | 8 | CONFIG_LOG_BUF_SHIFT=14 |
9 | CONFIG_CGROUPS=y | 9 | CONFIG_CGROUPS=y |
10 | CONFIG_CGROUP_NS=y | ||
11 | CONFIG_CGROUP_FREEZER=y | 10 | CONFIG_CGROUP_FREEZER=y |
12 | CONFIG_CGROUP_DEVICE=y | 11 | CONFIG_CGROUP_DEVICE=y |
13 | CONFIG_CGROUP_CPUACCT=y | 12 | CONFIG_CGROUP_CPUACCT=y |
diff --git a/arch/sh/configs/sdk7786_defconfig b/arch/sh/configs/sdk7786_defconfig index c41650572d79..8a7dd7b59c5c 100644 --- a/arch/sh/configs/sdk7786_defconfig +++ b/arch/sh/configs/sdk7786_defconfig | |||
@@ -12,7 +12,6 @@ CONFIG_IKCONFIG=y | |||
12 | CONFIG_IKCONFIG_PROC=y | 12 | CONFIG_IKCONFIG_PROC=y |
13 | CONFIG_CGROUPS=y | 13 | CONFIG_CGROUPS=y |
14 | CONFIG_CGROUP_DEBUG=y | 14 | CONFIG_CGROUP_DEBUG=y |
15 | CONFIG_CGROUP_NS=y | ||
16 | CONFIG_CGROUP_FREEZER=y | 15 | CONFIG_CGROUP_FREEZER=y |
17 | CONFIG_CGROUP_DEVICE=y | 16 | CONFIG_CGROUP_DEVICE=y |
18 | CONFIG_CPUSETS=y | 17 | CONFIG_CPUSETS=y |
diff --git a/arch/sh/configs/se7206_defconfig b/arch/sh/configs/se7206_defconfig index a468ff227fc6..72c3fad7383f 100644 --- a/arch/sh/configs/se7206_defconfig +++ b/arch/sh/configs/se7206_defconfig | |||
@@ -8,7 +8,6 @@ CONFIG_RCU_TRACE=y | |||
8 | CONFIG_LOG_BUF_SHIFT=14 | 8 | CONFIG_LOG_BUF_SHIFT=14 |
9 | CONFIG_CGROUPS=y | 9 | CONFIG_CGROUPS=y |
10 | CONFIG_CGROUP_DEBUG=y | 10 | CONFIG_CGROUP_DEBUG=y |
11 | CONFIG_CGROUP_NS=y | ||
12 | CONFIG_CGROUP_DEVICE=y | 11 | CONFIG_CGROUP_DEVICE=y |
13 | CONFIG_CGROUP_CPUACCT=y | 12 | CONFIG_CGROUP_CPUACCT=y |
14 | CONFIG_RESOURCE_COUNTERS=y | 13 | CONFIG_RESOURCE_COUNTERS=y |
diff --git a/arch/sh/configs/shx3_defconfig b/arch/sh/configs/shx3_defconfig index 3f92d37c6374..6bb413036892 100644 --- a/arch/sh/configs/shx3_defconfig +++ b/arch/sh/configs/shx3_defconfig | |||
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG=y | |||
9 | CONFIG_IKCONFIG_PROC=y | 9 | CONFIG_IKCONFIG_PROC=y |
10 | CONFIG_LOG_BUF_SHIFT=14 | 10 | CONFIG_LOG_BUF_SHIFT=14 |
11 | CONFIG_CGROUPS=y | 11 | CONFIG_CGROUPS=y |
12 | CONFIG_CGROUP_NS=y | ||
13 | CONFIG_CGROUP_FREEZER=y | 12 | CONFIG_CGROUP_FREEZER=y |
14 | CONFIG_CGROUP_DEVICE=y | 13 | CONFIG_CGROUP_DEVICE=y |
15 | CONFIG_CGROUP_CPUACCT=y | 14 | CONFIG_CGROUP_CPUACCT=y |
diff --git a/arch/sh/configs/urquell_defconfig b/arch/sh/configs/urquell_defconfig index 7b3daec6fefe..8bfa4d056d7a 100644 --- a/arch/sh/configs/urquell_defconfig +++ b/arch/sh/configs/urquell_defconfig | |||
@@ -9,7 +9,6 @@ CONFIG_IKCONFIG_PROC=y | |||
9 | CONFIG_LOG_BUF_SHIFT=14 | 9 | CONFIG_LOG_BUF_SHIFT=14 |
10 | CONFIG_CGROUPS=y | 10 | CONFIG_CGROUPS=y |
11 | CONFIG_CGROUP_DEBUG=y | 11 | CONFIG_CGROUP_DEBUG=y |
12 | CONFIG_CGROUP_NS=y | ||
13 | CONFIG_CGROUP_FREEZER=y | 12 | CONFIG_CGROUP_FREEZER=y |
14 | CONFIG_CGROUP_DEVICE=y | 13 | CONFIG_CGROUP_DEVICE=y |
15 | CONFIG_CPUSETS=y | 14 | CONFIG_CPUSETS=y |
diff --git a/arch/sh/include/asm/kgdb.h b/arch/sh/include/asm/kgdb.h index 4235e228d921..f3613952d1ae 100644 --- a/arch/sh/include/asm/kgdb.h +++ b/arch/sh/include/asm/kgdb.h | |||
@@ -34,5 +34,6 @@ static inline void arch_kgdb_breakpoint(void) | |||
34 | 34 | ||
35 | #define CACHE_FLUSH_IS_SAFE 1 | 35 | #define CACHE_FLUSH_IS_SAFE 1 |
36 | #define BREAK_INSTR_SIZE 2 | 36 | #define BREAK_INSTR_SIZE 2 |
37 | #define GDB_ADJUSTS_BREAK_OFFSET | ||
37 | 38 | ||
38 | #endif /* __ASM_SH_KGDB_H */ | 39 | #endif /* __ASM_SH_KGDB_H */ |
diff --git a/arch/sh/include/asm/ptrace.h b/arch/sh/include/asm/ptrace.h index de167d3a1a80..40725b4a8018 100644 --- a/arch/sh/include/asm/ptrace.h +++ b/arch/sh/include/asm/ptrace.h | |||
@@ -40,9 +40,8 @@ | |||
40 | #include <asm/system.h> | 40 | #include <asm/system.h> |
41 | 41 | ||
42 | #define user_mode(regs) (((regs)->sr & 0x40000000)==0) | 42 | #define user_mode(regs) (((regs)->sr & 0x40000000)==0) |
43 | #define user_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15]) | ||
44 | #define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15]) | 43 | #define kernel_stack_pointer(_regs) ((unsigned long)(_regs)->regs[15]) |
45 | #define instruction_pointer(regs) ((unsigned long)(regs)->pc) | 44 | #define GET_USP(regs) ((regs)->regs[15]) |
46 | 45 | ||
47 | extern void show_regs(struct pt_regs *); | 46 | extern void show_regs(struct pt_regs *); |
48 | 47 | ||
@@ -139,6 +138,9 @@ static inline unsigned long profile_pc(struct pt_regs *regs) | |||
139 | 138 | ||
140 | return pc; | 139 | return pc; |
141 | } | 140 | } |
141 | #define profile_pc profile_pc | ||
142 | |||
143 | #include <asm-generic/ptrace.h> | ||
142 | #endif /* __KERNEL__ */ | 144 | #endif /* __KERNEL__ */ |
143 | 145 | ||
144 | #endif /* __ASM_SH_PTRACE_H */ | 146 | #endif /* __ASM_SH_PTRACE_H */ |
diff --git a/arch/sh/include/asm/suspend.h b/arch/sh/include/asm/suspend.h index 64eb41a063e8..e14567a7e9a1 100644 --- a/arch/sh/include/asm/suspend.h +++ b/arch/sh/include/asm/suspend.h | |||
@@ -3,7 +3,6 @@ | |||
3 | 3 | ||
4 | #ifndef __ASSEMBLY__ | 4 | #ifndef __ASSEMBLY__ |
5 | #include <linux/notifier.h> | 5 | #include <linux/notifier.h> |
6 | static inline int arch_prepare_suspend(void) { return 0; } | ||
7 | 6 | ||
8 | #include <asm/ptrace.h> | 7 | #include <asm/ptrace.h> |
9 | 8 | ||
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 63a027c9ada5..af32e17fa170 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig | |||
@@ -190,14 +190,6 @@ config RWSEM_XCHGADD_ALGORITHM | |||
190 | bool | 190 | bool |
191 | default y if SPARC64 | 191 | default y if SPARC64 |
192 | 192 | ||
193 | config GENERIC_FIND_NEXT_BIT | ||
194 | bool | ||
195 | default y | ||
196 | |||
197 | config GENERIC_FIND_BIT_LE | ||
198 | bool | ||
199 | default y | ||
200 | |||
201 | config GENERIC_HWEIGHT | 193 | config GENERIC_HWEIGHT |
202 | bool | 194 | bool |
203 | default y if !ULTRA_HAS_POPULATION_COUNT | 195 | default y if !ULTRA_HAS_POPULATION_COUNT |
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig index 635e1bfb1c5d..e1e50101b3bb 100644 --- a/arch/tile/Kconfig +++ b/arch/tile/Kconfig | |||
@@ -5,7 +5,6 @@ config TILE | |||
5 | def_bool y | 5 | def_bool y |
6 | select HAVE_KVM if !TILEGX | 6 | select HAVE_KVM if !TILEGX |
7 | select GENERIC_FIND_FIRST_BIT | 7 | select GENERIC_FIND_FIRST_BIT |
8 | select GENERIC_FIND_NEXT_BIT | ||
9 | select USE_GENERIC_SMP_HELPERS | 8 | select USE_GENERIC_SMP_HELPERS |
10 | select CC_OPTIMIZE_FOR_SIZE | 9 | select CC_OPTIMIZE_FOR_SIZE |
11 | select HAVE_GENERIC_HARDIRQS | 10 | select HAVE_GENERIC_HARDIRQS |
diff --git a/arch/um/Kconfig.x86 b/arch/um/Kconfig.x86 index 795ea8e869f4..8aae429a56e2 100644 --- a/arch/um/Kconfig.x86 +++ b/arch/um/Kconfig.x86 | |||
@@ -15,7 +15,6 @@ endmenu | |||
15 | config UML_X86 | 15 | config UML_X86 |
16 | def_bool y | 16 | def_bool y |
17 | select GENERIC_FIND_FIRST_BIT | 17 | select GENERIC_FIND_FIRST_BIT |
18 | select GENERIC_FIND_NEXT_BIT | ||
19 | 18 | ||
20 | config 64BIT | 19 | config 64BIT |
21 | bool | 20 | bool |
diff --git a/arch/unicore32/include/asm/suspend.h b/arch/unicore32/include/asm/suspend.h index 88a9c0f32b21..65bad75c7e96 100644 --- a/arch/unicore32/include/asm/suspend.h +++ b/arch/unicore32/include/asm/suspend.h | |||
@@ -14,7 +14,6 @@ | |||
14 | #define __UNICORE_SUSPEND_H__ | 14 | #define __UNICORE_SUSPEND_H__ |
15 | 15 | ||
16 | #ifndef __ASSEMBLY__ | 16 | #ifndef __ASSEMBLY__ |
17 | static inline int arch_prepare_suspend(void) { return 0; } | ||
18 | 17 | ||
19 | #include <asm/ptrace.h> | 18 | #include <asm/ptrace.h> |
20 | 19 | ||
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 483775f42d2a..da349723d411 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig | |||
@@ -64,7 +64,6 @@ config X86 | |||
64 | select HAVE_GENERIC_HARDIRQS | 64 | select HAVE_GENERIC_HARDIRQS |
65 | select HAVE_SPARSE_IRQ | 65 | select HAVE_SPARSE_IRQ |
66 | select GENERIC_FIND_FIRST_BIT | 66 | select GENERIC_FIND_FIRST_BIT |
67 | select GENERIC_FIND_NEXT_BIT | ||
68 | select GENERIC_IRQ_PROBE | 67 | select GENERIC_IRQ_PROBE |
69 | select GENERIC_PENDING_IRQ if SMP | 68 | select GENERIC_PENDING_IRQ if SMP |
70 | select GENERIC_IRQ_SHOW | 69 | select GENERIC_IRQ_SHOW |
diff --git a/arch/x86/configs/i386_defconfig b/arch/x86/configs/i386_defconfig index 6f9872658dd2..2bf18059fbea 100644 --- a/arch/x86/configs/i386_defconfig +++ b/arch/x86/configs/i386_defconfig | |||
@@ -10,7 +10,6 @@ CONFIG_TASK_IO_ACCOUNTING=y | |||
10 | CONFIG_AUDIT=y | 10 | CONFIG_AUDIT=y |
11 | CONFIG_LOG_BUF_SHIFT=18 | 11 | CONFIG_LOG_BUF_SHIFT=18 |
12 | CONFIG_CGROUPS=y | 12 | CONFIG_CGROUPS=y |
13 | CONFIG_CGROUP_NS=y | ||
14 | CONFIG_CGROUP_FREEZER=y | 13 | CONFIG_CGROUP_FREEZER=y |
15 | CONFIG_CPUSETS=y | 14 | CONFIG_CPUSETS=y |
16 | CONFIG_CGROUP_CPUACCT=y | 15 | CONFIG_CGROUP_CPUACCT=y |
diff --git a/arch/x86/configs/x86_64_defconfig b/arch/x86/configs/x86_64_defconfig index ee01a9d5d4f0..22a0dc8e51dd 100644 --- a/arch/x86/configs/x86_64_defconfig +++ b/arch/x86/configs/x86_64_defconfig | |||
@@ -11,7 +11,6 @@ CONFIG_TASK_IO_ACCOUNTING=y | |||
11 | CONFIG_AUDIT=y | 11 | CONFIG_AUDIT=y |
12 | CONFIG_LOG_BUF_SHIFT=18 | 12 | CONFIG_LOG_BUF_SHIFT=18 |
13 | CONFIG_CGROUPS=y | 13 | CONFIG_CGROUPS=y |
14 | CONFIG_CGROUP_NS=y | ||
15 | CONFIG_CGROUP_FREEZER=y | 14 | CONFIG_CGROUP_FREEZER=y |
16 | CONFIG_CPUSETS=y | 15 | CONFIG_CPUSETS=y |
17 | CONFIG_CGROUP_CPUACCT=y | 16 | CONFIG_CGROUP_CPUACCT=y |
diff --git a/arch/x86/include/asm/kgdb.h b/arch/x86/include/asm/kgdb.h index 396f5b5fc4d7..77e95f54570a 100644 --- a/arch/x86/include/asm/kgdb.h +++ b/arch/x86/include/asm/kgdb.h | |||
@@ -77,6 +77,7 @@ static inline void arch_kgdb_breakpoint(void) | |||
77 | } | 77 | } |
78 | #define BREAK_INSTR_SIZE 1 | 78 | #define BREAK_INSTR_SIZE 1 |
79 | #define CACHE_FLUSH_IS_SAFE 1 | 79 | #define CACHE_FLUSH_IS_SAFE 1 |
80 | #define GDB_ADJUSTS_BREAK_OFFSET | ||
80 | 81 | ||
81 | extern int kgdb_ll_trap(int cmd, const char *str, | 82 | extern int kgdb_ll_trap(int cmd, const char *str, |
82 | struct pt_regs *regs, long err, int trap, int sig); | 83 | struct pt_regs *regs, long err, int trap, int sig); |
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h index 1babf8adecdf..94e7618fcac8 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h | |||
@@ -136,6 +136,7 @@ struct cpuinfo_x86; | |||
136 | struct task_struct; | 136 | struct task_struct; |
137 | 137 | ||
138 | extern unsigned long profile_pc(struct pt_regs *regs); | 138 | extern unsigned long profile_pc(struct pt_regs *regs); |
139 | #define profile_pc profile_pc | ||
139 | 140 | ||
140 | extern unsigned long | 141 | extern unsigned long |
141 | convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); | 142 | convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs); |
@@ -202,20 +203,11 @@ static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) | |||
202 | #endif | 203 | #endif |
203 | } | 204 | } |
204 | 205 | ||
205 | static inline unsigned long instruction_pointer(struct pt_regs *regs) | 206 | #define GET_IP(regs) ((regs)->ip) |
206 | { | 207 | #define GET_FP(regs) ((regs)->bp) |
207 | return regs->ip; | 208 | #define GET_USP(regs) ((regs)->sp) |
208 | } | ||
209 | |||
210 | static inline unsigned long frame_pointer(struct pt_regs *regs) | ||
211 | { | ||
212 | return regs->bp; | ||
213 | } | ||
214 | 209 | ||
215 | static inline unsigned long user_stack_pointer(struct pt_regs *regs) | 210 | #include <asm-generic/ptrace.h> |
216 | { | ||
217 | return regs->sp; | ||
218 | } | ||
219 | 211 | ||
220 | /* Query offset/name of register from its name/offset */ | 212 | /* Query offset/name of register from its name/offset */ |
221 | extern int regs_query_register_offset(const char *name); | 213 | extern int regs_query_register_offset(const char *name); |
diff --git a/arch/x86/include/asm/suspend_32.h b/arch/x86/include/asm/suspend_32.h index fd921c3a6841..487055c8c1aa 100644 --- a/arch/x86/include/asm/suspend_32.h +++ b/arch/x86/include/asm/suspend_32.h | |||
@@ -9,8 +9,6 @@ | |||
9 | #include <asm/desc.h> | 9 | #include <asm/desc.h> |
10 | #include <asm/i387.h> | 10 | #include <asm/i387.h> |
11 | 11 | ||
12 | static inline int arch_prepare_suspend(void) { return 0; } | ||
13 | |||
14 | /* image of the saved processor state */ | 12 | /* image of the saved processor state */ |
15 | struct saved_context { | 13 | struct saved_context { |
16 | u16 es, fs, gs, ss; | 14 | u16 es, fs, gs, ss; |
diff --git a/arch/x86/include/asm/suspend_64.h b/arch/x86/include/asm/suspend_64.h index 8d942afae681..09b0bf104156 100644 --- a/arch/x86/include/asm/suspend_64.h +++ b/arch/x86/include/asm/suspend_64.h | |||
@@ -9,11 +9,6 @@ | |||
9 | #include <asm/desc.h> | 9 | #include <asm/desc.h> |
10 | #include <asm/i387.h> | 10 | #include <asm/i387.h> |
11 | 11 | ||
12 | static inline int arch_prepare_suspend(void) | ||
13 | { | ||
14 | return 0; | ||
15 | } | ||
16 | |||
17 | /* | 12 | /* |
18 | * Image of the saved processor state, used by the low level ACPI suspend to | 13 | * Image of the saved processor state, used by the low level ACPI suspend to |
19 | * RAM code and by the low level hibernation code. | 14 | * RAM code and by the low level hibernation code. |
diff --git a/arch/x86/include/asm/tsc.h b/arch/x86/include/asm/tsc.h index 83e2efd181e2..9db5583b6d38 100644 --- a/arch/x86/include/asm/tsc.h +++ b/arch/x86/include/asm/tsc.h | |||
@@ -51,6 +51,10 @@ extern int unsynchronized_tsc(void); | |||
51 | extern int check_tsc_unstable(void); | 51 | extern int check_tsc_unstable(void); |
52 | extern unsigned long native_calibrate_tsc(void); | 52 | extern unsigned long native_calibrate_tsc(void); |
53 | 53 | ||
54 | #ifdef CONFIG_X86_64 | ||
55 | extern cycles_t vread_tsc(void); | ||
56 | #endif | ||
57 | |||
54 | /* | 58 | /* |
55 | * Boot-time check whether the TSCs are synchronized across | 59 | * Boot-time check whether the TSCs are synchronized across |
56 | * all CPUs/cores: | 60 | * all CPUs/cores: |
diff --git a/arch/x86/include/asm/vdso.h b/arch/x86/include/asm/vdso.h index 9064052b73de..bb0522850b74 100644 --- a/arch/x86/include/asm/vdso.h +++ b/arch/x86/include/asm/vdso.h | |||
@@ -1,20 +1,6 @@ | |||
1 | #ifndef _ASM_X86_VDSO_H | 1 | #ifndef _ASM_X86_VDSO_H |
2 | #define _ASM_X86_VDSO_H | 2 | #define _ASM_X86_VDSO_H |
3 | 3 | ||
4 | #ifdef CONFIG_X86_64 | ||
5 | extern const char VDSO64_PRELINK[]; | ||
6 | |||
7 | /* | ||
8 | * Given a pointer to the vDSO image, find the pointer to VDSO64_name | ||
9 | * as that symbol is defined in the vDSO sources or linker script. | ||
10 | */ | ||
11 | #define VDSO64_SYMBOL(base, name) \ | ||
12 | ({ \ | ||
13 | extern const char VDSO64_##name[]; \ | ||
14 | (void *)(VDSO64_##name - VDSO64_PRELINK + (unsigned long)(base)); \ | ||
15 | }) | ||
16 | #endif | ||
17 | |||
18 | #if defined CONFIG_X86_32 || defined CONFIG_COMPAT | 4 | #if defined CONFIG_X86_32 || defined CONFIG_COMPAT |
19 | extern const char VDSO32_PRELINK[]; | 5 | extern const char VDSO32_PRELINK[]; |
20 | 6 | ||
diff --git a/arch/x86/include/asm/vgtod.h b/arch/x86/include/asm/vgtod.h index 3d61e204826f..646b4c1ca695 100644 --- a/arch/x86/include/asm/vgtod.h +++ b/arch/x86/include/asm/vgtod.h | |||
@@ -23,8 +23,6 @@ struct vsyscall_gtod_data { | |||
23 | struct timespec wall_to_monotonic; | 23 | struct timespec wall_to_monotonic; |
24 | struct timespec wall_time_coarse; | 24 | struct timespec wall_time_coarse; |
25 | }; | 25 | }; |
26 | extern struct vsyscall_gtod_data __vsyscall_gtod_data | ||
27 | __section_vsyscall_gtod_data; | ||
28 | extern struct vsyscall_gtod_data vsyscall_gtod_data; | 26 | extern struct vsyscall_gtod_data vsyscall_gtod_data; |
29 | 27 | ||
30 | #endif /* _ASM_X86_VGTOD_H */ | 28 | #endif /* _ASM_X86_VGTOD_H */ |
diff --git a/arch/x86/include/asm/vsyscall.h b/arch/x86/include/asm/vsyscall.h index d0983d255fbd..d55597351f6a 100644 --- a/arch/x86/include/asm/vsyscall.h +++ b/arch/x86/include/asm/vsyscall.h | |||
@@ -16,27 +16,19 @@ enum vsyscall_num { | |||
16 | #ifdef __KERNEL__ | 16 | #ifdef __KERNEL__ |
17 | #include <linux/seqlock.h> | 17 | #include <linux/seqlock.h> |
18 | 18 | ||
19 | #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16))) | ||
20 | #define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16))) | ||
21 | |||
22 | /* Definitions for CONFIG_GENERIC_TIME definitions */ | 19 | /* Definitions for CONFIG_GENERIC_TIME definitions */ |
23 | #define __section_vsyscall_gtod_data __attribute__ \ | ||
24 | ((unused, __section__ (".vsyscall_gtod_data"),aligned(16))) | ||
25 | #define __section_vsyscall_clock __attribute__ \ | ||
26 | ((unused, __section__ (".vsyscall_clock"),aligned(16))) | ||
27 | #define __vsyscall_fn \ | 20 | #define __vsyscall_fn \ |
28 | __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace | 21 | __attribute__ ((unused, __section__(".vsyscall_fn"))) notrace |
29 | 22 | ||
30 | #define VGETCPU_RDTSCP 1 | 23 | #define VGETCPU_RDTSCP 1 |
31 | #define VGETCPU_LSL 2 | 24 | #define VGETCPU_LSL 2 |
32 | 25 | ||
33 | extern int __vgetcpu_mode; | ||
34 | extern volatile unsigned long __jiffies; | ||
35 | |||
36 | /* kernel space (writeable) */ | 26 | /* kernel space (writeable) */ |
37 | extern int vgetcpu_mode; | 27 | extern int vgetcpu_mode; |
38 | extern struct timezone sys_tz; | 28 | extern struct timezone sys_tz; |
39 | 29 | ||
30 | #include <asm/vvar.h> | ||
31 | |||
40 | extern void map_vsyscall(void); | 32 | extern void map_vsyscall(void); |
41 | 33 | ||
42 | #endif /* __KERNEL__ */ | 34 | #endif /* __KERNEL__ */ |
diff --git a/arch/x86/include/asm/vvar.h b/arch/x86/include/asm/vvar.h new file mode 100644 index 000000000000..341b3559452b --- /dev/null +++ b/arch/x86/include/asm/vvar.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* | ||
2 | * vvar.h: Shared vDSO/kernel variable declarations | ||
3 | * Copyright (c) 2011 Andy Lutomirski | ||
4 | * Subject to the GNU General Public License, version 2 | ||
5 | * | ||
6 | * A handful of variables are accessible (read-only) from userspace | ||
7 | * code in the vsyscall page and the vdso. They are declared here. | ||
8 | * Some other file must define them with DEFINE_VVAR. | ||
9 | * | ||
10 | * In normal kernel code, they are used like any other variable. | ||
11 | * In user code, they are accessed through the VVAR macro. | ||
12 | * | ||
13 | * Each of these variables lives in the vsyscall page, and each | ||
14 | * one needs a unique offset within the little piece of the page | ||
15 | * reserved for vvars. Specify that offset in DECLARE_VVAR. | ||
16 | * (There are 896 bytes available. If you mess up, the linker will | ||
17 | * catch it.) | ||
18 | */ | ||
19 | |||
20 | /* Offset of vars within vsyscall page */ | ||
21 | #define VSYSCALL_VARS_OFFSET (3072 + 128) | ||
22 | |||
23 | #if defined(__VVAR_KERNEL_LDS) | ||
24 | |||
25 | /* The kernel linker script defines its own magic to put vvars in the | ||
26 | * right place. | ||
27 | */ | ||
28 | #define DECLARE_VVAR(offset, type, name) \ | ||
29 | EMIT_VVAR(name, VSYSCALL_VARS_OFFSET + offset) | ||
30 | |||
31 | #else | ||
32 | |||
33 | #define DECLARE_VVAR(offset, type, name) \ | ||
34 | static type const * const vvaraddr_ ## name = \ | ||
35 | (void *)(VSYSCALL_START + VSYSCALL_VARS_OFFSET + (offset)); | ||
36 | |||
37 | #define DEFINE_VVAR(type, name) \ | ||
38 | type __vvar_ ## name \ | ||
39 | __attribute__((section(".vsyscall_var_" #name), aligned(16))) | ||
40 | |||
41 | #define VVAR(name) (*vvaraddr_ ## name) | ||
42 | |||
43 | #endif | ||
44 | |||
45 | /* DECLARE_VVAR(offset, type, name) */ | ||
46 | |||
47 | DECLARE_VVAR(0, volatile unsigned long, jiffies) | ||
48 | DECLARE_VVAR(8, int, vgetcpu_mode) | ||
49 | DECLARE_VVAR(128, struct vsyscall_gtod_data, vsyscall_gtod_data) | ||
50 | |||
51 | #undef DECLARE_VVAR | ||
52 | #undef VSYSCALL_VARS_OFFSET | ||
diff --git a/arch/x86/include/asm/xen/hypercall.h b/arch/x86/include/asm/xen/hypercall.h index 8508bfe52296..d240ea950519 100644 --- a/arch/x86/include/asm/xen/hypercall.h +++ b/arch/x86/include/asm/xen/hypercall.h | |||
@@ -447,6 +447,13 @@ HYPERVISOR_hvm_op(int op, void *arg) | |||
447 | return _hypercall2(unsigned long, hvm_op, op, arg); | 447 | return _hypercall2(unsigned long, hvm_op, op, arg); |
448 | } | 448 | } |
449 | 449 | ||
450 | static inline int | ||
451 | HYPERVISOR_tmem_op( | ||
452 | struct tmem_op *op) | ||
453 | { | ||
454 | return _hypercall1(int, tmem_op, op); | ||
455 | } | ||
456 | |||
450 | static inline void | 457 | static inline void |
451 | MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set) | 458 | MULTI_fpu_taskswitch(struct multicall_entry *mcl, int set) |
452 | { | 459 | { |
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile index 250806472a7e..f5abe3a245b8 100644 --- a/arch/x86/kernel/Makefile +++ b/arch/x86/kernel/Makefile | |||
@@ -8,7 +8,6 @@ CPPFLAGS_vmlinux.lds += -U$(UTS_MACHINE) | |||
8 | 8 | ||
9 | ifdef CONFIG_FUNCTION_TRACER | 9 | ifdef CONFIG_FUNCTION_TRACER |
10 | # Do not profile debug and lowlevel utilities | 10 | # Do not profile debug and lowlevel utilities |
11 | CFLAGS_REMOVE_tsc.o = -pg | ||
12 | CFLAGS_REMOVE_rtc.o = -pg | 11 | CFLAGS_REMOVE_rtc.o = -pg |
13 | CFLAGS_REMOVE_paravirt-spinlocks.o = -pg | 12 | CFLAGS_REMOVE_paravirt-spinlocks.o = -pg |
14 | CFLAGS_REMOVE_pvclock.o = -pg | 13 | CFLAGS_REMOVE_pvclock.o = -pg |
@@ -24,13 +23,16 @@ endif | |||
24 | nostackp := $(call cc-option, -fno-stack-protector) | 23 | nostackp := $(call cc-option, -fno-stack-protector) |
25 | CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) | 24 | CFLAGS_vsyscall_64.o := $(PROFILING) -g0 $(nostackp) |
26 | CFLAGS_hpet.o := $(nostackp) | 25 | CFLAGS_hpet.o := $(nostackp) |
27 | CFLAGS_tsc.o := $(nostackp) | 26 | CFLAGS_vread_tsc_64.o := $(nostackp) |
28 | CFLAGS_paravirt.o := $(nostackp) | 27 | CFLAGS_paravirt.o := $(nostackp) |
29 | GCOV_PROFILE_vsyscall_64.o := n | 28 | GCOV_PROFILE_vsyscall_64.o := n |
30 | GCOV_PROFILE_hpet.o := n | 29 | GCOV_PROFILE_hpet.o := n |
31 | GCOV_PROFILE_tsc.o := n | 30 | GCOV_PROFILE_tsc.o := n |
32 | GCOV_PROFILE_paravirt.o := n | 31 | GCOV_PROFILE_paravirt.o := n |
33 | 32 | ||
33 | # vread_tsc_64 is hot and should be fully optimized: | ||
34 | CFLAGS_REMOVE_vread_tsc_64.o = -pg -fno-optimize-sibling-calls | ||
35 | |||
34 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o | 36 | obj-y := process_$(BITS).o signal.o entry_$(BITS).o |
35 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o | 37 | obj-y += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o |
36 | obj-y += time.o ioport.o ldt.o dumpstack.o | 38 | obj-y += time.o ioport.o ldt.o dumpstack.o |
@@ -39,7 +41,7 @@ obj-$(CONFIG_IRQ_WORK) += irq_work.o | |||
39 | obj-y += probe_roms.o | 41 | obj-y += probe_roms.o |
40 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o | 42 | obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o |
41 | obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o | 43 | obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o |
42 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o | 44 | obj-$(CONFIG_X86_64) += syscall_64.o vsyscall_64.o vread_tsc_64.o |
43 | obj-y += bootflag.o e820.o | 45 | obj-y += bootflag.o e820.o |
44 | obj-y += pci-dma.o quirks.o topology.o kdebugfs.o | 46 | obj-y += pci-dma.o quirks.o topology.o kdebugfs.o |
45 | obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o | 47 | obj-y += alternative.o i8253.o pci-nommu.o hw_breakpoint.o |
diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c index 25a28a245937..00cbb272627f 100644 --- a/arch/x86/kernel/time.c +++ b/arch/x86/kernel/time.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include <asm/time.h> | 23 | #include <asm/time.h> |
24 | 24 | ||
25 | #ifdef CONFIG_X86_64 | 25 | #ifdef CONFIG_X86_64 |
26 | volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; | 26 | DEFINE_VVAR(volatile unsigned long, jiffies) = INITIAL_JIFFIES; |
27 | #endif | 27 | #endif |
28 | 28 | ||
29 | unsigned long profile_pc(struct pt_regs *regs) | 29 | unsigned long profile_pc(struct pt_regs *regs) |
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c index 9335bf7dd2e7..6cc6922262af 100644 --- a/arch/x86/kernel/tsc.c +++ b/arch/x86/kernel/tsc.c | |||
@@ -763,25 +763,6 @@ static cycle_t read_tsc(struct clocksource *cs) | |||
763 | ret : clocksource_tsc.cycle_last; | 763 | ret : clocksource_tsc.cycle_last; |
764 | } | 764 | } |
765 | 765 | ||
766 | #ifdef CONFIG_X86_64 | ||
767 | static cycle_t __vsyscall_fn vread_tsc(void) | ||
768 | { | ||
769 | cycle_t ret; | ||
770 | |||
771 | /* | ||
772 | * Surround the RDTSC by barriers, to make sure it's not | ||
773 | * speculated to outside the seqlock critical section and | ||
774 | * does not cause time warps: | ||
775 | */ | ||
776 | rdtsc_barrier(); | ||
777 | ret = (cycle_t)vget_cycles(); | ||
778 | rdtsc_barrier(); | ||
779 | |||
780 | return ret >= __vsyscall_gtod_data.clock.cycle_last ? | ||
781 | ret : __vsyscall_gtod_data.clock.cycle_last; | ||
782 | } | ||
783 | #endif | ||
784 | |||
785 | static void resume_tsc(struct clocksource *cs) | 766 | static void resume_tsc(struct clocksource *cs) |
786 | { | 767 | { |
787 | clocksource_tsc.cycle_last = 0; | 768 | clocksource_tsc.cycle_last = 0; |
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S index 61682f0ac264..89aed99aafce 100644 --- a/arch/x86/kernel/vmlinux.lds.S +++ b/arch/x86/kernel/vmlinux.lds.S | |||
@@ -161,6 +161,12 @@ SECTIONS | |||
161 | 161 | ||
162 | #define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0) | 162 | #define VVIRT_OFFSET (VSYSCALL_ADDR - __vsyscall_0) |
163 | #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) | 163 | #define VVIRT(x) (ADDR(x) - VVIRT_OFFSET) |
164 | #define EMIT_VVAR(x, offset) .vsyscall_var_ ## x \ | ||
165 | ADDR(.vsyscall_0) + offset \ | ||
166 | : AT(VLOAD(.vsyscall_var_ ## x)) { \ | ||
167 | *(.vsyscall_var_ ## x) \ | ||
168 | } \ | ||
169 | x = VVIRT(.vsyscall_var_ ## x); | ||
164 | 170 | ||
165 | . = ALIGN(4096); | 171 | . = ALIGN(4096); |
166 | __vsyscall_0 = .; | 172 | __vsyscall_0 = .; |
@@ -175,18 +181,6 @@ SECTIONS | |||
175 | *(.vsyscall_fn) | 181 | *(.vsyscall_fn) |
176 | } | 182 | } |
177 | 183 | ||
178 | . = ALIGN(L1_CACHE_BYTES); | ||
179 | .vsyscall_gtod_data : AT(VLOAD(.vsyscall_gtod_data)) { | ||
180 | *(.vsyscall_gtod_data) | ||
181 | } | ||
182 | |||
183 | vsyscall_gtod_data = VVIRT(.vsyscall_gtod_data); | ||
184 | .vsyscall_clock : AT(VLOAD(.vsyscall_clock)) { | ||
185 | *(.vsyscall_clock) | ||
186 | } | ||
187 | vsyscall_clock = VVIRT(.vsyscall_clock); | ||
188 | |||
189 | |||
190 | .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { | 184 | .vsyscall_1 ADDR(.vsyscall_0) + 1024: AT(VLOAD(.vsyscall_1)) { |
191 | *(.vsyscall_1) | 185 | *(.vsyscall_1) |
192 | } | 186 | } |
@@ -194,21 +188,14 @@ SECTIONS | |||
194 | *(.vsyscall_2) | 188 | *(.vsyscall_2) |
195 | } | 189 | } |
196 | 190 | ||
197 | .vgetcpu_mode : AT(VLOAD(.vgetcpu_mode)) { | ||
198 | *(.vgetcpu_mode) | ||
199 | } | ||
200 | vgetcpu_mode = VVIRT(.vgetcpu_mode); | ||
201 | |||
202 | . = ALIGN(L1_CACHE_BYTES); | ||
203 | .jiffies : AT(VLOAD(.jiffies)) { | ||
204 | *(.jiffies) | ||
205 | } | ||
206 | jiffies = VVIRT(.jiffies); | ||
207 | |||
208 | .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { | 191 | .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { |
209 | *(.vsyscall_3) | 192 | *(.vsyscall_3) |
210 | } | 193 | } |
211 | 194 | ||
195 | #define __VVAR_KERNEL_LDS | ||
196 | #include <asm/vvar.h> | ||
197 | #undef __VVAR_KERNEL_LDS | ||
198 | |||
212 | . = __vsyscall_0 + PAGE_SIZE; | 199 | . = __vsyscall_0 + PAGE_SIZE; |
213 | 200 | ||
214 | #undef VSYSCALL_ADDR | 201 | #undef VSYSCALL_ADDR |
@@ -216,6 +203,7 @@ SECTIONS | |||
216 | #undef VLOAD | 203 | #undef VLOAD |
217 | #undef VVIRT_OFFSET | 204 | #undef VVIRT_OFFSET |
218 | #undef VVIRT | 205 | #undef VVIRT |
206 | #undef EMIT_VVAR | ||
219 | 207 | ||
220 | #endif /* CONFIG_X86_64 */ | 208 | #endif /* CONFIG_X86_64 */ |
221 | 209 | ||
diff --git a/arch/x86/kernel/vread_tsc_64.c b/arch/x86/kernel/vread_tsc_64.c new file mode 100644 index 000000000000..a81aa9e9894c --- /dev/null +++ b/arch/x86/kernel/vread_tsc_64.c | |||
@@ -0,0 +1,36 @@ | |||
1 | /* This code runs in userspace. */ | ||
2 | |||
3 | #define DISABLE_BRANCH_PROFILING | ||
4 | #include <asm/vgtod.h> | ||
5 | |||
6 | notrace cycle_t __vsyscall_fn vread_tsc(void) | ||
7 | { | ||
8 | cycle_t ret; | ||
9 | u64 last; | ||
10 | |||
11 | /* | ||
12 | * Empirically, a fence (of type that depends on the CPU) | ||
13 | * before rdtsc is enough to ensure that rdtsc is ordered | ||
14 | * with respect to loads. The various CPU manuals are unclear | ||
15 | * as to whether rdtsc can be reordered with later loads, | ||
16 | * but no one has ever seen it happen. | ||
17 | */ | ||
18 | rdtsc_barrier(); | ||
19 | ret = (cycle_t)vget_cycles(); | ||
20 | |||
21 | last = VVAR(vsyscall_gtod_data).clock.cycle_last; | ||
22 | |||
23 | if (likely(ret >= last)) | ||
24 | return ret; | ||
25 | |||
26 | /* | ||
27 | * GCC likes to generate cmov here, but this branch is extremely | ||
28 | * predictable (it's just a funciton of time and the likely is | ||
29 | * very likely) and there's a data dependence, so force GCC | ||
30 | * to generate a branch instead. I don't barrier() because | ||
31 | * we don't actually need a barrier, and if this function | ||
32 | * ever gets inlined it will generate worse code. | ||
33 | */ | ||
34 | asm volatile (""); | ||
35 | return last; | ||
36 | } | ||
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c index dcbb28c4b694..3e682184d76c 100644 --- a/arch/x86/kernel/vsyscall_64.c +++ b/arch/x86/kernel/vsyscall_64.c | |||
@@ -49,17 +49,10 @@ | |||
49 | __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace | 49 | __attribute__ ((unused, __section__(".vsyscall_" #nr))) notrace |
50 | #define __syscall_clobber "r11","cx","memory" | 50 | #define __syscall_clobber "r11","cx","memory" |
51 | 51 | ||
52 | /* | 52 | DEFINE_VVAR(int, vgetcpu_mode); |
53 | * vsyscall_gtod_data contains data that is : | 53 | DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data) = |
54 | * - readonly from vsyscalls | ||
55 | * - written by timer interrupt or systcl (/proc/sys/kernel/vsyscall64) | ||
56 | * Try to keep this structure as small as possible to avoid cache line ping pongs | ||
57 | */ | ||
58 | int __vgetcpu_mode __section_vgetcpu_mode; | ||
59 | |||
60 | struct vsyscall_gtod_data __vsyscall_gtod_data __section_vsyscall_gtod_data = | ||
61 | { | 54 | { |
62 | .lock = SEQLOCK_UNLOCKED, | 55 | .lock = __SEQLOCK_UNLOCKED(__vsyscall_gtod_data.lock), |
63 | .sysctl_enabled = 1, | 56 | .sysctl_enabled = 1, |
64 | }; | 57 | }; |
65 | 58 | ||
@@ -97,7 +90,7 @@ void update_vsyscall(struct timespec *wall_time, struct timespec *wtm, | |||
97 | */ | 90 | */ |
98 | static __always_inline void do_get_tz(struct timezone * tz) | 91 | static __always_inline void do_get_tz(struct timezone * tz) |
99 | { | 92 | { |
100 | *tz = __vsyscall_gtod_data.sys_tz; | 93 | *tz = VVAR(vsyscall_gtod_data).sys_tz; |
101 | } | 94 | } |
102 | 95 | ||
103 | static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz) | 96 | static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz) |
@@ -126,23 +119,24 @@ static __always_inline void do_vgettimeofday(struct timeval * tv) | |||
126 | unsigned long mult, shift, nsec; | 119 | unsigned long mult, shift, nsec; |
127 | cycle_t (*vread)(void); | 120 | cycle_t (*vread)(void); |
128 | do { | 121 | do { |
129 | seq = read_seqbegin(&__vsyscall_gtod_data.lock); | 122 | seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock); |
130 | 123 | ||
131 | vread = __vsyscall_gtod_data.clock.vread; | 124 | vread = VVAR(vsyscall_gtod_data).clock.vread; |
132 | if (unlikely(!__vsyscall_gtod_data.sysctl_enabled || !vread)) { | 125 | if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled || |
126 | !vread)) { | ||
133 | gettimeofday(tv,NULL); | 127 | gettimeofday(tv,NULL); |
134 | return; | 128 | return; |
135 | } | 129 | } |
136 | 130 | ||
137 | now = vread(); | 131 | now = vread(); |
138 | base = __vsyscall_gtod_data.clock.cycle_last; | 132 | base = VVAR(vsyscall_gtod_data).clock.cycle_last; |
139 | mask = __vsyscall_gtod_data.clock.mask; | 133 | mask = VVAR(vsyscall_gtod_data).clock.mask; |
140 | mult = __vsyscall_gtod_data.clock.mult; | 134 | mult = VVAR(vsyscall_gtod_data).clock.mult; |
141 | shift = __vsyscall_gtod_data.clock.shift; | 135 | shift = VVAR(vsyscall_gtod_data).clock.shift; |
142 | 136 | ||
143 | tv->tv_sec = __vsyscall_gtod_data.wall_time_sec; | 137 | tv->tv_sec = VVAR(vsyscall_gtod_data).wall_time_sec; |
144 | nsec = __vsyscall_gtod_data.wall_time_nsec; | 138 | nsec = VVAR(vsyscall_gtod_data).wall_time_nsec; |
145 | } while (read_seqretry(&__vsyscall_gtod_data.lock, seq)); | 139 | } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq)); |
146 | 140 | ||
147 | /* calculate interval: */ | 141 | /* calculate interval: */ |
148 | cycle_delta = (now - base) & mask; | 142 | cycle_delta = (now - base) & mask; |
@@ -171,15 +165,15 @@ time_t __vsyscall(1) vtime(time_t *t) | |||
171 | { | 165 | { |
172 | unsigned seq; | 166 | unsigned seq; |
173 | time_t result; | 167 | time_t result; |
174 | if (unlikely(!__vsyscall_gtod_data.sysctl_enabled)) | 168 | if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled)) |
175 | return time_syscall(t); | 169 | return time_syscall(t); |
176 | 170 | ||
177 | do { | 171 | do { |
178 | seq = read_seqbegin(&__vsyscall_gtod_data.lock); | 172 | seq = read_seqbegin(&VVAR(vsyscall_gtod_data).lock); |
179 | 173 | ||
180 | result = __vsyscall_gtod_data.wall_time_sec; | 174 | result = VVAR(vsyscall_gtod_data).wall_time_sec; |
181 | 175 | ||
182 | } while (read_seqretry(&__vsyscall_gtod_data.lock, seq)); | 176 | } while (read_seqretry(&VVAR(vsyscall_gtod_data).lock, seq)); |
183 | 177 | ||
184 | if (t) | 178 | if (t) |
185 | *t = result; | 179 | *t = result; |
@@ -208,9 +202,9 @@ vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache) | |||
208 | We do this here because otherwise user space would do it on | 202 | We do this here because otherwise user space would do it on |
209 | its own in a likely inferior way (no access to jiffies). | 203 | its own in a likely inferior way (no access to jiffies). |
210 | If you don't like it pass NULL. */ | 204 | If you don't like it pass NULL. */ |
211 | if (tcache && tcache->blob[0] == (j = __jiffies)) { | 205 | if (tcache && tcache->blob[0] == (j = VVAR(jiffies))) { |
212 | p = tcache->blob[1]; | 206 | p = tcache->blob[1]; |
213 | } else if (__vgetcpu_mode == VGETCPU_RDTSCP) { | 207 | } else if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) { |
214 | /* Load per CPU data from RDTSCP */ | 208 | /* Load per CPU data from RDTSCP */ |
215 | native_read_tscp(&p); | 209 | native_read_tscp(&p); |
216 | } else { | 210 | } else { |
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile index b6552b189bcd..bef0bc962400 100644 --- a/arch/x86/vdso/Makefile +++ b/arch/x86/vdso/Makefile | |||
@@ -11,7 +11,7 @@ vdso-install-$(VDSO32-y) += $(vdso32-images) | |||
11 | 11 | ||
12 | 12 | ||
13 | # files to link into the vdso | 13 | # files to link into the vdso |
14 | vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vvar.o | 14 | vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o |
15 | 15 | ||
16 | # files to link into kernel | 16 | # files to link into kernel |
17 | obj-$(VDSO64-y) += vma.o vdso.o | 17 | obj-$(VDSO64-y) += vma.o vdso.o |
@@ -37,11 +37,24 @@ $(obj)/%.so: OBJCOPYFLAGS := -S | |||
37 | $(obj)/%.so: $(obj)/%.so.dbg FORCE | 37 | $(obj)/%.so: $(obj)/%.so.dbg FORCE |
38 | $(call if_changed,objcopy) | 38 | $(call if_changed,objcopy) |
39 | 39 | ||
40 | # | ||
41 | # Don't omit frame pointers for ease of userspace debugging, but do | ||
42 | # optimize sibling calls. | ||
43 | # | ||
40 | CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ | 44 | CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ |
41 | $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) | 45 | $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ |
46 | -fno-omit-frame-pointer -foptimize-sibling-calls | ||
42 | 47 | ||
43 | $(vobjs): KBUILD_CFLAGS += $(CFL) | 48 | $(vobjs): KBUILD_CFLAGS += $(CFL) |
44 | 49 | ||
50 | # | ||
51 | # vDSO code runs in userspace and -pg doesn't help with profiling anyway. | ||
52 | # | ||
53 | CFLAGS_REMOVE_vdso-note.o = -pg | ||
54 | CFLAGS_REMOVE_vclock_gettime.o = -pg | ||
55 | CFLAGS_REMOVE_vgetcpu.o = -pg | ||
56 | CFLAGS_REMOVE_vvar.o = -pg | ||
57 | |||
45 | targets += vdso-syms.lds | 58 | targets += vdso-syms.lds |
46 | obj-$(VDSO64-y) += vdso-syms.lds | 59 | obj-$(VDSO64-y) += vdso-syms.lds |
47 | 60 | ||
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c index ee55754cc3c5..a724905fdae7 100644 --- a/arch/x86/vdso/vclock_gettime.c +++ b/arch/x86/vdso/vclock_gettime.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Copyright 2006 Andi Kleen, SUSE Labs. | 2 | * Copyright 2006 Andi Kleen, SUSE Labs. |
3 | * Subject to the GNU Public License, v.2 | 3 | * Subject to the GNU Public License, v.2 |
4 | * | 4 | * |
5 | * Fast user context implementation of clock_gettime and gettimeofday. | 5 | * Fast user context implementation of clock_gettime, gettimeofday, and time. |
6 | * | 6 | * |
7 | * The code should have no internal unresolved relocations. | 7 | * The code should have no internal unresolved relocations. |
8 | * Check with readelf after changing. | 8 | * Check with readelf after changing. |
@@ -22,9 +22,8 @@ | |||
22 | #include <asm/hpet.h> | 22 | #include <asm/hpet.h> |
23 | #include <asm/unistd.h> | 23 | #include <asm/unistd.h> |
24 | #include <asm/io.h> | 24 | #include <asm/io.h> |
25 | #include "vextern.h" | ||
26 | 25 | ||
27 | #define gtod vdso_vsyscall_gtod_data | 26 | #define gtod (&VVAR(vsyscall_gtod_data)) |
28 | 27 | ||
29 | notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) | 28 | notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) |
30 | { | 29 | { |
@@ -56,22 +55,6 @@ notrace static noinline int do_realtime(struct timespec *ts) | |||
56 | return 0; | 55 | return 0; |
57 | } | 56 | } |
58 | 57 | ||
59 | /* Copy of the version in kernel/time.c which we cannot directly access */ | ||
60 | notrace static void | ||
61 | vset_normalized_timespec(struct timespec *ts, long sec, long nsec) | ||
62 | { | ||
63 | while (nsec >= NSEC_PER_SEC) { | ||
64 | nsec -= NSEC_PER_SEC; | ||
65 | ++sec; | ||
66 | } | ||
67 | while (nsec < 0) { | ||
68 | nsec += NSEC_PER_SEC; | ||
69 | --sec; | ||
70 | } | ||
71 | ts->tv_sec = sec; | ||
72 | ts->tv_nsec = nsec; | ||
73 | } | ||
74 | |||
75 | notrace static noinline int do_monotonic(struct timespec *ts) | 58 | notrace static noinline int do_monotonic(struct timespec *ts) |
76 | { | 59 | { |
77 | unsigned long seq, ns, secs; | 60 | unsigned long seq, ns, secs; |
@@ -82,7 +65,17 @@ notrace static noinline int do_monotonic(struct timespec *ts) | |||
82 | secs += gtod->wall_to_monotonic.tv_sec; | 65 | secs += gtod->wall_to_monotonic.tv_sec; |
83 | ns += gtod->wall_to_monotonic.tv_nsec; | 66 | ns += gtod->wall_to_monotonic.tv_nsec; |
84 | } while (unlikely(read_seqretry(>od->lock, seq))); | 67 | } while (unlikely(read_seqretry(>od->lock, seq))); |
85 | vset_normalized_timespec(ts, secs, ns); | 68 | |
69 | /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec | ||
70 | * are all guaranteed to be nonnegative. | ||
71 | */ | ||
72 | while (ns >= NSEC_PER_SEC) { | ||
73 | ns -= NSEC_PER_SEC; | ||
74 | ++secs; | ||
75 | } | ||
76 | ts->tv_sec = secs; | ||
77 | ts->tv_nsec = ns; | ||
78 | |||
86 | return 0; | 79 | return 0; |
87 | } | 80 | } |
88 | 81 | ||
@@ -107,7 +100,17 @@ notrace static noinline int do_monotonic_coarse(struct timespec *ts) | |||
107 | secs += gtod->wall_to_monotonic.tv_sec; | 100 | secs += gtod->wall_to_monotonic.tv_sec; |
108 | ns += gtod->wall_to_monotonic.tv_nsec; | 101 | ns += gtod->wall_to_monotonic.tv_nsec; |
109 | } while (unlikely(read_seqretry(>od->lock, seq))); | 102 | } while (unlikely(read_seqretry(>od->lock, seq))); |
110 | vset_normalized_timespec(ts, secs, ns); | 103 | |
104 | /* wall_time_nsec and wall_to_monotonic.tv_nsec are | ||
105 | * guaranteed to be between 0 and NSEC_PER_SEC. | ||
106 | */ | ||
107 | if (ns >= NSEC_PER_SEC) { | ||
108 | ns -= NSEC_PER_SEC; | ||
109 | ++secs; | ||
110 | } | ||
111 | ts->tv_sec = secs; | ||
112 | ts->tv_nsec = ns; | ||
113 | |||
111 | return 0; | 114 | return 0; |
112 | } | 115 | } |
113 | 116 | ||
@@ -157,3 +160,32 @@ notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) | |||
157 | } | 160 | } |
158 | int gettimeofday(struct timeval *, struct timezone *) | 161 | int gettimeofday(struct timeval *, struct timezone *) |
159 | __attribute__((weak, alias("__vdso_gettimeofday"))); | 162 | __attribute__((weak, alias("__vdso_gettimeofday"))); |
163 | |||
164 | /* This will break when the xtime seconds get inaccurate, but that is | ||
165 | * unlikely */ | ||
166 | |||
167 | static __always_inline long time_syscall(long *t) | ||
168 | { | ||
169 | long secs; | ||
170 | asm volatile("syscall" | ||
171 | : "=a" (secs) | ||
172 | : "0" (__NR_time), "D" (t) : "cc", "r11", "cx", "memory"); | ||
173 | return secs; | ||
174 | } | ||
175 | |||
176 | notrace time_t __vdso_time(time_t *t) | ||
177 | { | ||
178 | time_t result; | ||
179 | |||
180 | if (unlikely(!VVAR(vsyscall_gtod_data).sysctl_enabled)) | ||
181 | return time_syscall(t); | ||
182 | |||
183 | /* This is atomic on x86_64 so we don't need any locks. */ | ||
184 | result = ACCESS_ONCE(VVAR(vsyscall_gtod_data).wall_time_sec); | ||
185 | |||
186 | if (t) | ||
187 | *t = result; | ||
188 | return result; | ||
189 | } | ||
190 | int time(time_t *t) | ||
191 | __attribute__((weak, alias("__vdso_time"))); | ||
diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S index 4e5dd3b4de7f..b96b2677cad8 100644 --- a/arch/x86/vdso/vdso.lds.S +++ b/arch/x86/vdso/vdso.lds.S | |||
@@ -23,15 +23,10 @@ VERSION { | |||
23 | __vdso_gettimeofday; | 23 | __vdso_gettimeofday; |
24 | getcpu; | 24 | getcpu; |
25 | __vdso_getcpu; | 25 | __vdso_getcpu; |
26 | time; | ||
27 | __vdso_time; | ||
26 | local: *; | 28 | local: *; |
27 | }; | 29 | }; |
28 | } | 30 | } |
29 | 31 | ||
30 | VDSO64_PRELINK = VDSO_PRELINK; | 32 | VDSO64_PRELINK = VDSO_PRELINK; |
31 | |||
32 | /* | ||
33 | * Define VDSO64_x for each VEXTERN(x), for use via VDSO64_SYMBOL. | ||
34 | */ | ||
35 | #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x; | ||
36 | #include "vextern.h" | ||
37 | #undef VEXTERN | ||
diff --git a/arch/x86/vdso/vextern.h b/arch/x86/vdso/vextern.h deleted file mode 100644 index 1683ba2ae3e8..000000000000 --- a/arch/x86/vdso/vextern.h +++ /dev/null | |||
@@ -1,16 +0,0 @@ | |||
1 | #ifndef VEXTERN | ||
2 | #include <asm/vsyscall.h> | ||
3 | #define VEXTERN(x) \ | ||
4 | extern typeof(x) *vdso_ ## x __attribute__((visibility("hidden"))); | ||
5 | #endif | ||
6 | |||
7 | #define VMAGIC 0xfeedbabeabcdefabUL | ||
8 | |||
9 | /* Any kernel variables used in the vDSO must be exported in the main | ||
10 | kernel's vmlinux.lds.S/vsyscall.h/proper __section and | ||
11 | put into vextern.h and be referenced as a pointer with vdso prefix. | ||
12 | The main kernel later fills in the values. */ | ||
13 | |||
14 | VEXTERN(jiffies) | ||
15 | VEXTERN(vgetcpu_mode) | ||
16 | VEXTERN(vsyscall_gtod_data) | ||
diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c index 9fbc6b20026b..5463ad558573 100644 --- a/arch/x86/vdso/vgetcpu.c +++ b/arch/x86/vdso/vgetcpu.c | |||
@@ -11,14 +11,13 @@ | |||
11 | #include <linux/time.h> | 11 | #include <linux/time.h> |
12 | #include <asm/vsyscall.h> | 12 | #include <asm/vsyscall.h> |
13 | #include <asm/vgtod.h> | 13 | #include <asm/vgtod.h> |
14 | #include "vextern.h" | ||
15 | 14 | ||
16 | notrace long | 15 | notrace long |
17 | __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) | 16 | __vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused) |
18 | { | 17 | { |
19 | unsigned int p; | 18 | unsigned int p; |
20 | 19 | ||
21 | if (*vdso_vgetcpu_mode == VGETCPU_RDTSCP) { | 20 | if (VVAR(vgetcpu_mode) == VGETCPU_RDTSCP) { |
22 | /* Load per CPU data from RDTSCP */ | 21 | /* Load per CPU data from RDTSCP */ |
23 | native_read_tscp(&p); | 22 | native_read_tscp(&p); |
24 | } else { | 23 | } else { |
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c index 4b5d26f108bb..7abd2be0f9b9 100644 --- a/arch/x86/vdso/vma.c +++ b/arch/x86/vdso/vma.c | |||
@@ -15,9 +15,6 @@ | |||
15 | #include <asm/proto.h> | 15 | #include <asm/proto.h> |
16 | #include <asm/vdso.h> | 16 | #include <asm/vdso.h> |
17 | 17 | ||
18 | #include "vextern.h" /* Just for VMAGIC. */ | ||
19 | #undef VEXTERN | ||
20 | |||
21 | unsigned int __read_mostly vdso_enabled = 1; | 18 | unsigned int __read_mostly vdso_enabled = 1; |
22 | 19 | ||
23 | extern char vdso_start[], vdso_end[]; | 20 | extern char vdso_start[], vdso_end[]; |
@@ -26,20 +23,10 @@ extern unsigned short vdso_sync_cpuid; | |||
26 | static struct page **vdso_pages; | 23 | static struct page **vdso_pages; |
27 | static unsigned vdso_size; | 24 | static unsigned vdso_size; |
28 | 25 | ||
29 | static inline void *var_ref(void *p, char *name) | ||
30 | { | ||
31 | if (*(void **)p != (void *)VMAGIC) { | ||
32 | printk("VDSO: variable %s broken\n", name); | ||
33 | vdso_enabled = 0; | ||
34 | } | ||
35 | return p; | ||
36 | } | ||
37 | |||
38 | static int __init init_vdso_vars(void) | 26 | static int __init init_vdso_vars(void) |
39 | { | 27 | { |
40 | int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; | 28 | int npages = (vdso_end - vdso_start + PAGE_SIZE - 1) / PAGE_SIZE; |
41 | int i; | 29 | int i; |
42 | char *vbase; | ||
43 | 30 | ||
44 | vdso_size = npages << PAGE_SHIFT; | 31 | vdso_size = npages << PAGE_SHIFT; |
45 | vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL); | 32 | vdso_pages = kmalloc(sizeof(struct page *) * npages, GFP_KERNEL); |
@@ -54,20 +41,6 @@ static int __init init_vdso_vars(void) | |||
54 | copy_page(page_address(p), vdso_start + i*PAGE_SIZE); | 41 | copy_page(page_address(p), vdso_start + i*PAGE_SIZE); |
55 | } | 42 | } |
56 | 43 | ||
57 | vbase = vmap(vdso_pages, npages, 0, PAGE_KERNEL); | ||
58 | if (!vbase) | ||
59 | goto oom; | ||
60 | |||
61 | if (memcmp(vbase, "\177ELF", 4)) { | ||
62 | printk("VDSO: I'm broken; not ELF\n"); | ||
63 | vdso_enabled = 0; | ||
64 | } | ||
65 | |||
66 | #define VEXTERN(x) \ | ||
67 | *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x; | ||
68 | #include "vextern.h" | ||
69 | #undef VEXTERN | ||
70 | vunmap(vbase); | ||
71 | return 0; | 44 | return 0; |
72 | 45 | ||
73 | oom: | 46 | oom: |
diff --git a/arch/x86/vdso/vvar.c b/arch/x86/vdso/vvar.c deleted file mode 100644 index 1b7e703684f9..000000000000 --- a/arch/x86/vdso/vvar.c +++ /dev/null | |||
@@ -1,12 +0,0 @@ | |||
1 | /* Define pointer to external vDSO variables. | ||
2 | These are part of the vDSO. The kernel fills in the real addresses | ||
3 | at boot time. This is done because when the vdso is linked the | ||
4 | kernel isn't yet and we don't know the final addresses. */ | ||
5 | #include <linux/kernel.h> | ||
6 | #include <linux/time.h> | ||
7 | #include <asm/vsyscall.h> | ||
8 | #include <asm/timex.h> | ||
9 | #include <asm/vgtod.h> | ||
10 | |||
11 | #define VEXTERN(x) typeof (__ ## x) *const vdso_ ## x = (void *)VMAGIC; | ||
12 | #include "vextern.h" | ||
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index 02d752460371..dc708dcc62f1 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c | |||
@@ -75,67 +75,12 @@ | |||
75 | #include "mmu.h" | 75 | #include "mmu.h" |
76 | #include "debugfs.h" | 76 | #include "debugfs.h" |
77 | 77 | ||
78 | #define MMU_UPDATE_HISTO 30 | ||
79 | |||
80 | /* | 78 | /* |
81 | * Protects atomic reservation decrease/increase against concurrent increases. | 79 | * Protects atomic reservation decrease/increase against concurrent increases. |
82 | * Also protects non-atomic updates of current_pages and balloon lists. | 80 | * Also protects non-atomic updates of current_pages and balloon lists. |
83 | */ | 81 | */ |
84 | DEFINE_SPINLOCK(xen_reservation_lock); | 82 | DEFINE_SPINLOCK(xen_reservation_lock); |
85 | 83 | ||
86 | #ifdef CONFIG_XEN_DEBUG_FS | ||
87 | |||
88 | static struct { | ||
89 | u32 pgd_update; | ||
90 | u32 pgd_update_pinned; | ||
91 | u32 pgd_update_batched; | ||
92 | |||
93 | u32 pud_update; | ||
94 | u32 pud_update_pinned; | ||
95 | u32 pud_update_batched; | ||
96 | |||
97 | u32 pmd_update; | ||
98 | u32 pmd_update_pinned; | ||
99 | u32 pmd_update_batched; | ||
100 | |||
101 | u32 pte_update; | ||
102 | u32 pte_update_pinned; | ||
103 | u32 pte_update_batched; | ||
104 | |||
105 | u32 mmu_update; | ||
106 | u32 mmu_update_extended; | ||
107 | u32 mmu_update_histo[MMU_UPDATE_HISTO]; | ||
108 | |||
109 | u32 prot_commit; | ||
110 | u32 prot_commit_batched; | ||
111 | |||
112 | u32 set_pte_at; | ||
113 | u32 set_pte_at_batched; | ||
114 | u32 set_pte_at_pinned; | ||
115 | u32 set_pte_at_current; | ||
116 | u32 set_pte_at_kernel; | ||
117 | } mmu_stats; | ||
118 | |||
119 | static u8 zero_stats; | ||
120 | |||
121 | static inline void check_zero(void) | ||
122 | { | ||
123 | if (unlikely(zero_stats)) { | ||
124 | memset(&mmu_stats, 0, sizeof(mmu_stats)); | ||
125 | zero_stats = 0; | ||
126 | } | ||
127 | } | ||
128 | |||
129 | #define ADD_STATS(elem, val) \ | ||
130 | do { check_zero(); mmu_stats.elem += (val); } while(0) | ||
131 | |||
132 | #else /* !CONFIG_XEN_DEBUG_FS */ | ||
133 | |||
134 | #define ADD_STATS(elem, val) do { (void)(val); } while(0) | ||
135 | |||
136 | #endif /* CONFIG_XEN_DEBUG_FS */ | ||
137 | |||
138 | |||
139 | /* | 84 | /* |
140 | * Identity map, in addition to plain kernel map. This needs to be | 85 | * Identity map, in addition to plain kernel map. This needs to be |
141 | * large enough to allocate page table pages to allocate the rest. | 86 | * large enough to allocate page table pages to allocate the rest. |
@@ -243,11 +188,6 @@ static bool xen_page_pinned(void *ptr) | |||
243 | return PagePinned(page); | 188 | return PagePinned(page); |
244 | } | 189 | } |
245 | 190 | ||
246 | static bool xen_iomap_pte(pte_t pte) | ||
247 | { | ||
248 | return pte_flags(pte) & _PAGE_IOMAP; | ||
249 | } | ||
250 | |||
251 | void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) | 191 | void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) |
252 | { | 192 | { |
253 | struct multicall_space mcs; | 193 | struct multicall_space mcs; |
@@ -257,7 +197,7 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) | |||
257 | u = mcs.args; | 197 | u = mcs.args; |
258 | 198 | ||
259 | /* ptep might be kmapped when using 32-bit HIGHPTE */ | 199 | /* ptep might be kmapped when using 32-bit HIGHPTE */ |
260 | u->ptr = arbitrary_virt_to_machine(ptep).maddr; | 200 | u->ptr = virt_to_machine(ptep).maddr; |
261 | u->val = pte_val_ma(pteval); | 201 | u->val = pte_val_ma(pteval); |
262 | 202 | ||
263 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); | 203 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, domid); |
@@ -266,11 +206,6 @@ void xen_set_domain_pte(pte_t *ptep, pte_t pteval, unsigned domid) | |||
266 | } | 206 | } |
267 | EXPORT_SYMBOL_GPL(xen_set_domain_pte); | 207 | EXPORT_SYMBOL_GPL(xen_set_domain_pte); |
268 | 208 | ||
269 | static void xen_set_iomap_pte(pte_t *ptep, pte_t pteval) | ||
270 | { | ||
271 | xen_set_domain_pte(ptep, pteval, DOMID_IO); | ||
272 | } | ||
273 | |||
274 | static void xen_extend_mmu_update(const struct mmu_update *update) | 209 | static void xen_extend_mmu_update(const struct mmu_update *update) |
275 | { | 210 | { |
276 | struct multicall_space mcs; | 211 | struct multicall_space mcs; |
@@ -279,27 +214,17 @@ static void xen_extend_mmu_update(const struct mmu_update *update) | |||
279 | mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); | 214 | mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); |
280 | 215 | ||
281 | if (mcs.mc != NULL) { | 216 | if (mcs.mc != NULL) { |
282 | ADD_STATS(mmu_update_extended, 1); | ||
283 | ADD_STATS(mmu_update_histo[mcs.mc->args[1]], -1); | ||
284 | |||
285 | mcs.mc->args[1]++; | 217 | mcs.mc->args[1]++; |
286 | |||
287 | if (mcs.mc->args[1] < MMU_UPDATE_HISTO) | ||
288 | ADD_STATS(mmu_update_histo[mcs.mc->args[1]], 1); | ||
289 | else | ||
290 | ADD_STATS(mmu_update_histo[0], 1); | ||
291 | } else { | 218 | } else { |
292 | ADD_STATS(mmu_update, 1); | ||
293 | mcs = __xen_mc_entry(sizeof(*u)); | 219 | mcs = __xen_mc_entry(sizeof(*u)); |
294 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); | 220 | MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); |
295 | ADD_STATS(mmu_update_histo[1], 1); | ||
296 | } | 221 | } |
297 | 222 | ||
298 | u = mcs.args; | 223 | u = mcs.args; |
299 | *u = *update; | 224 | *u = *update; |
300 | } | 225 | } |
301 | 226 | ||
302 | void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) | 227 | static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) |
303 | { | 228 | { |
304 | struct mmu_update u; | 229 | struct mmu_update u; |
305 | 230 | ||
@@ -312,17 +237,13 @@ void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) | |||
312 | u.val = pmd_val_ma(val); | 237 | u.val = pmd_val_ma(val); |
313 | xen_extend_mmu_update(&u); | 238 | xen_extend_mmu_update(&u); |
314 | 239 | ||
315 | ADD_STATS(pmd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | ||
316 | |||
317 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 240 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
318 | 241 | ||
319 | preempt_enable(); | 242 | preempt_enable(); |
320 | } | 243 | } |
321 | 244 | ||
322 | void xen_set_pmd(pmd_t *ptr, pmd_t val) | 245 | static void xen_set_pmd(pmd_t *ptr, pmd_t val) |
323 | { | 246 | { |
324 | ADD_STATS(pmd_update, 1); | ||
325 | |||
326 | /* If page is not pinned, we can just update the entry | 247 | /* If page is not pinned, we can just update the entry |
327 | directly */ | 248 | directly */ |
328 | if (!xen_page_pinned(ptr)) { | 249 | if (!xen_page_pinned(ptr)) { |
@@ -330,8 +251,6 @@ void xen_set_pmd(pmd_t *ptr, pmd_t val) | |||
330 | return; | 251 | return; |
331 | } | 252 | } |
332 | 253 | ||
333 | ADD_STATS(pmd_update_pinned, 1); | ||
334 | |||
335 | xen_set_pmd_hyper(ptr, val); | 254 | xen_set_pmd_hyper(ptr, val); |
336 | } | 255 | } |
337 | 256 | ||
@@ -344,35 +263,34 @@ void set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) | |||
344 | set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); | 263 | set_pte_vaddr(vaddr, mfn_pte(mfn, flags)); |
345 | } | 264 | } |
346 | 265 | ||
347 | void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | 266 | static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) |
348 | pte_t *ptep, pte_t pteval) | ||
349 | { | 267 | { |
350 | if (xen_iomap_pte(pteval)) { | 268 | struct mmu_update u; |
351 | xen_set_iomap_pte(ptep, pteval); | ||
352 | goto out; | ||
353 | } | ||
354 | 269 | ||
355 | ADD_STATS(set_pte_at, 1); | 270 | if (paravirt_get_lazy_mode() != PARAVIRT_LAZY_MMU) |
356 | // ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep)); | 271 | return false; |
357 | ADD_STATS(set_pte_at_current, mm == current->mm); | ||
358 | ADD_STATS(set_pte_at_kernel, mm == &init_mm); | ||
359 | 272 | ||
360 | if (mm == current->mm || mm == &init_mm) { | 273 | xen_mc_batch(); |
361 | if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { | ||
362 | struct multicall_space mcs; | ||
363 | mcs = xen_mc_entry(0); | ||
364 | 274 | ||
365 | MULTI_update_va_mapping(mcs.mc, addr, pteval, 0); | 275 | u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; |
366 | ADD_STATS(set_pte_at_batched, 1); | 276 | u.val = pte_val_ma(pteval); |
367 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 277 | xen_extend_mmu_update(&u); |
368 | goto out; | 278 | |
369 | } else | 279 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
370 | if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0) | ||
371 | goto out; | ||
372 | } | ||
373 | xen_set_pte(ptep, pteval); | ||
374 | 280 | ||
375 | out: return; | 281 | return true; |
282 | } | ||
283 | |||
284 | static void xen_set_pte(pte_t *ptep, pte_t pteval) | ||
285 | { | ||
286 | if (!xen_batched_set_pte(ptep, pteval)) | ||
287 | native_set_pte(ptep, pteval); | ||
288 | } | ||
289 | |||
290 | static void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
291 | pte_t *ptep, pte_t pteval) | ||
292 | { | ||
293 | xen_set_pte(ptep, pteval); | ||
376 | } | 294 | } |
377 | 295 | ||
378 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, | 296 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, |
@@ -389,13 +307,10 @@ void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | |||
389 | 307 | ||
390 | xen_mc_batch(); | 308 | xen_mc_batch(); |
391 | 309 | ||
392 | u.ptr = arbitrary_virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; | 310 | u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; |
393 | u.val = pte_val_ma(pte); | 311 | u.val = pte_val_ma(pte); |
394 | xen_extend_mmu_update(&u); | 312 | xen_extend_mmu_update(&u); |
395 | 313 | ||
396 | ADD_STATS(prot_commit, 1); | ||
397 | ADD_STATS(prot_commit_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | ||
398 | |||
399 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 314 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
400 | } | 315 | } |
401 | 316 | ||
@@ -463,7 +378,7 @@ static pteval_t iomap_pte(pteval_t val) | |||
463 | return val; | 378 | return val; |
464 | } | 379 | } |
465 | 380 | ||
466 | pteval_t xen_pte_val(pte_t pte) | 381 | static pteval_t xen_pte_val(pte_t pte) |
467 | { | 382 | { |
468 | pteval_t pteval = pte.pte; | 383 | pteval_t pteval = pte.pte; |
469 | 384 | ||
@@ -480,7 +395,7 @@ pteval_t xen_pte_val(pte_t pte) | |||
480 | } | 395 | } |
481 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); | 396 | PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); |
482 | 397 | ||
483 | pgdval_t xen_pgd_val(pgd_t pgd) | 398 | static pgdval_t xen_pgd_val(pgd_t pgd) |
484 | { | 399 | { |
485 | return pte_mfn_to_pfn(pgd.pgd); | 400 | return pte_mfn_to_pfn(pgd.pgd); |
486 | } | 401 | } |
@@ -511,7 +426,7 @@ void xen_set_pat(u64 pat) | |||
511 | WARN_ON(pat != 0x0007010600070106ull); | 426 | WARN_ON(pat != 0x0007010600070106ull); |
512 | } | 427 | } |
513 | 428 | ||
514 | pte_t xen_make_pte(pteval_t pte) | 429 | static pte_t xen_make_pte(pteval_t pte) |
515 | { | 430 | { |
516 | phys_addr_t addr = (pte & PTE_PFN_MASK); | 431 | phys_addr_t addr = (pte & PTE_PFN_MASK); |
517 | 432 | ||
@@ -581,20 +496,20 @@ pte_t xen_make_pte_debug(pteval_t pte) | |||
581 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); | 496 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_debug); |
582 | #endif | 497 | #endif |
583 | 498 | ||
584 | pgd_t xen_make_pgd(pgdval_t pgd) | 499 | static pgd_t xen_make_pgd(pgdval_t pgd) |
585 | { | 500 | { |
586 | pgd = pte_pfn_to_mfn(pgd); | 501 | pgd = pte_pfn_to_mfn(pgd); |
587 | return native_make_pgd(pgd); | 502 | return native_make_pgd(pgd); |
588 | } | 503 | } |
589 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); | 504 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); |
590 | 505 | ||
591 | pmdval_t xen_pmd_val(pmd_t pmd) | 506 | static pmdval_t xen_pmd_val(pmd_t pmd) |
592 | { | 507 | { |
593 | return pte_mfn_to_pfn(pmd.pmd); | 508 | return pte_mfn_to_pfn(pmd.pmd); |
594 | } | 509 | } |
595 | PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); | 510 | PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); |
596 | 511 | ||
597 | void xen_set_pud_hyper(pud_t *ptr, pud_t val) | 512 | static void xen_set_pud_hyper(pud_t *ptr, pud_t val) |
598 | { | 513 | { |
599 | struct mmu_update u; | 514 | struct mmu_update u; |
600 | 515 | ||
@@ -607,17 +522,13 @@ void xen_set_pud_hyper(pud_t *ptr, pud_t val) | |||
607 | u.val = pud_val_ma(val); | 522 | u.val = pud_val_ma(val); |
608 | xen_extend_mmu_update(&u); | 523 | xen_extend_mmu_update(&u); |
609 | 524 | ||
610 | ADD_STATS(pud_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | ||
611 | |||
612 | xen_mc_issue(PARAVIRT_LAZY_MMU); | 525 | xen_mc_issue(PARAVIRT_LAZY_MMU); |
613 | 526 | ||
614 | preempt_enable(); | 527 | preempt_enable(); |
615 | } | 528 | } |
616 | 529 | ||
617 | void xen_set_pud(pud_t *ptr, pud_t val) | 530 | static void xen_set_pud(pud_t *ptr, pud_t val) |
618 | { | 531 | { |
619 | ADD_STATS(pud_update, 1); | ||
620 | |||
621 | /* If page is not pinned, we can just update the entry | 532 | /* If page is not pinned, we can just update the entry |
622 | directly */ | 533 | directly */ |
623 | if (!xen_page_pinned(ptr)) { | 534 | if (!xen_page_pinned(ptr)) { |
@@ -625,56 +536,28 @@ void xen_set_pud(pud_t *ptr, pud_t val) | |||
625 | return; | 536 | return; |
626 | } | 537 | } |
627 | 538 | ||
628 | ADD_STATS(pud_update_pinned, 1); | ||
629 | |||
630 | xen_set_pud_hyper(ptr, val); | 539 | xen_set_pud_hyper(ptr, val); |
631 | } | 540 | } |
632 | 541 | ||
633 | void xen_set_pte(pte_t *ptep, pte_t pte) | ||
634 | { | ||
635 | if (xen_iomap_pte(pte)) { | ||
636 | xen_set_iomap_pte(ptep, pte); | ||
637 | return; | ||
638 | } | ||
639 | |||
640 | ADD_STATS(pte_update, 1); | ||
641 | // ADD_STATS(pte_update_pinned, xen_page_pinned(ptep)); | ||
642 | ADD_STATS(pte_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | ||
643 | |||
644 | #ifdef CONFIG_X86_PAE | 542 | #ifdef CONFIG_X86_PAE |
645 | ptep->pte_high = pte.pte_high; | 543 | static void xen_set_pte_atomic(pte_t *ptep, pte_t pte) |
646 | smp_wmb(); | ||
647 | ptep->pte_low = pte.pte_low; | ||
648 | #else | ||
649 | *ptep = pte; | ||
650 | #endif | ||
651 | } | ||
652 | |||
653 | #ifdef CONFIG_X86_PAE | ||
654 | void xen_set_pte_atomic(pte_t *ptep, pte_t pte) | ||
655 | { | 544 | { |
656 | if (xen_iomap_pte(pte)) { | ||
657 | xen_set_iomap_pte(ptep, pte); | ||
658 | return; | ||
659 | } | ||
660 | |||
661 | set_64bit((u64 *)ptep, native_pte_val(pte)); | 545 | set_64bit((u64 *)ptep, native_pte_val(pte)); |
662 | } | 546 | } |
663 | 547 | ||
664 | void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) | 548 | static void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) |
665 | { | 549 | { |
666 | ptep->pte_low = 0; | 550 | if (!xen_batched_set_pte(ptep, native_make_pte(0))) |
667 | smp_wmb(); /* make sure low gets written first */ | 551 | native_pte_clear(mm, addr, ptep); |
668 | ptep->pte_high = 0; | ||
669 | } | 552 | } |
670 | 553 | ||
671 | void xen_pmd_clear(pmd_t *pmdp) | 554 | static void xen_pmd_clear(pmd_t *pmdp) |
672 | { | 555 | { |
673 | set_pmd(pmdp, __pmd(0)); | 556 | set_pmd(pmdp, __pmd(0)); |
674 | } | 557 | } |
675 | #endif /* CONFIG_X86_PAE */ | 558 | #endif /* CONFIG_X86_PAE */ |
676 | 559 | ||
677 | pmd_t xen_make_pmd(pmdval_t pmd) | 560 | static pmd_t xen_make_pmd(pmdval_t pmd) |
678 | { | 561 | { |
679 | pmd = pte_pfn_to_mfn(pmd); | 562 | pmd = pte_pfn_to_mfn(pmd); |
680 | return native_make_pmd(pmd); | 563 | return native_make_pmd(pmd); |
@@ -682,13 +565,13 @@ pmd_t xen_make_pmd(pmdval_t pmd) | |||
682 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); | 565 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); |
683 | 566 | ||
684 | #if PAGETABLE_LEVELS == 4 | 567 | #if PAGETABLE_LEVELS == 4 |
685 | pudval_t xen_pud_val(pud_t pud) | 568 | static pudval_t xen_pud_val(pud_t pud) |
686 | { | 569 | { |
687 | return pte_mfn_to_pfn(pud.pud); | 570 | return pte_mfn_to_pfn(pud.pud); |
688 | } | 571 | } |
689 | PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); | 572 | PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); |
690 | 573 | ||
691 | pud_t xen_make_pud(pudval_t pud) | 574 | static pud_t xen_make_pud(pudval_t pud) |
692 | { | 575 | { |
693 | pud = pte_pfn_to_mfn(pud); | 576 | pud = pte_pfn_to_mfn(pud); |
694 | 577 | ||
@@ -696,7 +579,7 @@ pud_t xen_make_pud(pudval_t pud) | |||
696 | } | 579 | } |
697 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); | 580 | PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); |
698 | 581 | ||
699 | pgd_t *xen_get_user_pgd(pgd_t *pgd) | 582 | static pgd_t *xen_get_user_pgd(pgd_t *pgd) |
700 | { | 583 | { |
701 | pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); | 584 | pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); |
702 | unsigned offset = pgd - pgd_page; | 585 | unsigned offset = pgd - pgd_page; |
@@ -728,7 +611,7 @@ static void __xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | |||
728 | * 2. It is always pinned | 611 | * 2. It is always pinned |
729 | * 3. It has no user pagetable attached to it | 612 | * 3. It has no user pagetable attached to it |
730 | */ | 613 | */ |
731 | void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | 614 | static void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) |
732 | { | 615 | { |
733 | preempt_disable(); | 616 | preempt_disable(); |
734 | 617 | ||
@@ -741,12 +624,10 @@ void __init xen_set_pgd_hyper(pgd_t *ptr, pgd_t val) | |||
741 | preempt_enable(); | 624 | preempt_enable(); |
742 | } | 625 | } |
743 | 626 | ||
744 | void xen_set_pgd(pgd_t *ptr, pgd_t val) | 627 | static void xen_set_pgd(pgd_t *ptr, pgd_t val) |
745 | { | 628 | { |
746 | pgd_t *user_ptr = xen_get_user_pgd(ptr); | 629 | pgd_t *user_ptr = xen_get_user_pgd(ptr); |
747 | 630 | ||
748 | ADD_STATS(pgd_update, 1); | ||
749 | |||
750 | /* If page is not pinned, we can just update the entry | 631 | /* If page is not pinned, we can just update the entry |
751 | directly */ | 632 | directly */ |
752 | if (!xen_page_pinned(ptr)) { | 633 | if (!xen_page_pinned(ptr)) { |
@@ -758,9 +639,6 @@ void xen_set_pgd(pgd_t *ptr, pgd_t val) | |||
758 | return; | 639 | return; |
759 | } | 640 | } |
760 | 641 | ||
761 | ADD_STATS(pgd_update_pinned, 1); | ||
762 | ADD_STATS(pgd_update_batched, paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU); | ||
763 | |||
764 | /* If it's pinned, then we can at least batch the kernel and | 642 | /* If it's pinned, then we can at least batch the kernel and |
765 | user updates together. */ | 643 | user updates together. */ |
766 | xen_mc_batch(); | 644 | xen_mc_batch(); |
@@ -1162,14 +1040,14 @@ void xen_mm_unpin_all(void) | |||
1162 | spin_unlock(&pgd_lock); | 1040 | spin_unlock(&pgd_lock); |
1163 | } | 1041 | } |
1164 | 1042 | ||
1165 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) | 1043 | static void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next) |
1166 | { | 1044 | { |
1167 | spin_lock(&next->page_table_lock); | 1045 | spin_lock(&next->page_table_lock); |
1168 | xen_pgd_pin(next); | 1046 | xen_pgd_pin(next); |
1169 | spin_unlock(&next->page_table_lock); | 1047 | spin_unlock(&next->page_table_lock); |
1170 | } | 1048 | } |
1171 | 1049 | ||
1172 | void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) | 1050 | static void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm) |
1173 | { | 1051 | { |
1174 | spin_lock(&mm->page_table_lock); | 1052 | spin_lock(&mm->page_table_lock); |
1175 | xen_pgd_pin(mm); | 1053 | xen_pgd_pin(mm); |
@@ -1256,7 +1134,7 @@ static void xen_drop_mm_ref(struct mm_struct *mm) | |||
1256 | * pagetable because of lazy tlb flushing. This means we need need to | 1134 | * pagetable because of lazy tlb flushing. This means we need need to |
1257 | * switch all CPUs off this pagetable before we can unpin it. | 1135 | * switch all CPUs off this pagetable before we can unpin it. |
1258 | */ | 1136 | */ |
1259 | void xen_exit_mmap(struct mm_struct *mm) | 1137 | static void xen_exit_mmap(struct mm_struct *mm) |
1260 | { | 1138 | { |
1261 | get_cpu(); /* make sure we don't move around */ | 1139 | get_cpu(); /* make sure we don't move around */ |
1262 | xen_drop_mm_ref(mm); | 1140 | xen_drop_mm_ref(mm); |
@@ -2371,7 +2249,7 @@ static int remap_area_mfn_pte_fn(pte_t *ptep, pgtable_t token, | |||
2371 | struct remap_data *rmd = data; | 2249 | struct remap_data *rmd = data; |
2372 | pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); | 2250 | pte_t pte = pte_mkspecial(pfn_pte(rmd->mfn++, rmd->prot)); |
2373 | 2251 | ||
2374 | rmd->mmu_update->ptr = arbitrary_virt_to_machine(ptep).maddr; | 2252 | rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; |
2375 | rmd->mmu_update->val = pte_val_ma(pte); | 2253 | rmd->mmu_update->val = pte_val_ma(pte); |
2376 | rmd->mmu_update++; | 2254 | rmd->mmu_update++; |
2377 | 2255 | ||
@@ -2425,7 +2303,6 @@ out: | |||
2425 | EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); | 2303 | EXPORT_SYMBOL_GPL(xen_remap_domain_mfn_range); |
2426 | 2304 | ||
2427 | #ifdef CONFIG_XEN_DEBUG_FS | 2305 | #ifdef CONFIG_XEN_DEBUG_FS |
2428 | |||
2429 | static int p2m_dump_open(struct inode *inode, struct file *filp) | 2306 | static int p2m_dump_open(struct inode *inode, struct file *filp) |
2430 | { | 2307 | { |
2431 | return single_open(filp, p2m_dump_show, NULL); | 2308 | return single_open(filp, p2m_dump_show, NULL); |
@@ -2437,65 +2314,4 @@ static const struct file_operations p2m_dump_fops = { | |||
2437 | .llseek = seq_lseek, | 2314 | .llseek = seq_lseek, |
2438 | .release = single_release, | 2315 | .release = single_release, |
2439 | }; | 2316 | }; |
2440 | 2317 | #endif /* CONFIG_XEN_DEBUG_FS */ | |
2441 | static struct dentry *d_mmu_debug; | ||
2442 | |||
2443 | static int __init xen_mmu_debugfs(void) | ||
2444 | { | ||
2445 | struct dentry *d_xen = xen_init_debugfs(); | ||
2446 | |||
2447 | if (d_xen == NULL) | ||
2448 | return -ENOMEM; | ||
2449 | |||
2450 | d_mmu_debug = debugfs_create_dir("mmu", d_xen); | ||
2451 | |||
2452 | debugfs_create_u8("zero_stats", 0644, d_mmu_debug, &zero_stats); | ||
2453 | |||
2454 | debugfs_create_u32("pgd_update", 0444, d_mmu_debug, &mmu_stats.pgd_update); | ||
2455 | debugfs_create_u32("pgd_update_pinned", 0444, d_mmu_debug, | ||
2456 | &mmu_stats.pgd_update_pinned); | ||
2457 | debugfs_create_u32("pgd_update_batched", 0444, d_mmu_debug, | ||
2458 | &mmu_stats.pgd_update_pinned); | ||
2459 | |||
2460 | debugfs_create_u32("pud_update", 0444, d_mmu_debug, &mmu_stats.pud_update); | ||
2461 | debugfs_create_u32("pud_update_pinned", 0444, d_mmu_debug, | ||
2462 | &mmu_stats.pud_update_pinned); | ||
2463 | debugfs_create_u32("pud_update_batched", 0444, d_mmu_debug, | ||
2464 | &mmu_stats.pud_update_pinned); | ||
2465 | |||
2466 | debugfs_create_u32("pmd_update", 0444, d_mmu_debug, &mmu_stats.pmd_update); | ||
2467 | debugfs_create_u32("pmd_update_pinned", 0444, d_mmu_debug, | ||
2468 | &mmu_stats.pmd_update_pinned); | ||
2469 | debugfs_create_u32("pmd_update_batched", 0444, d_mmu_debug, | ||
2470 | &mmu_stats.pmd_update_pinned); | ||
2471 | |||
2472 | debugfs_create_u32("pte_update", 0444, d_mmu_debug, &mmu_stats.pte_update); | ||
2473 | // debugfs_create_u32("pte_update_pinned", 0444, d_mmu_debug, | ||
2474 | // &mmu_stats.pte_update_pinned); | ||
2475 | debugfs_create_u32("pte_update_batched", 0444, d_mmu_debug, | ||
2476 | &mmu_stats.pte_update_pinned); | ||
2477 | |||
2478 | debugfs_create_u32("mmu_update", 0444, d_mmu_debug, &mmu_stats.mmu_update); | ||
2479 | debugfs_create_u32("mmu_update_extended", 0444, d_mmu_debug, | ||
2480 | &mmu_stats.mmu_update_extended); | ||
2481 | xen_debugfs_create_u32_array("mmu_update_histo", 0444, d_mmu_debug, | ||
2482 | mmu_stats.mmu_update_histo, 20); | ||
2483 | |||
2484 | debugfs_create_u32("set_pte_at", 0444, d_mmu_debug, &mmu_stats.set_pte_at); | ||
2485 | debugfs_create_u32("set_pte_at_batched", 0444, d_mmu_debug, | ||
2486 | &mmu_stats.set_pte_at_batched); | ||
2487 | debugfs_create_u32("set_pte_at_current", 0444, d_mmu_debug, | ||
2488 | &mmu_stats.set_pte_at_current); | ||
2489 | debugfs_create_u32("set_pte_at_kernel", 0444, d_mmu_debug, | ||
2490 | &mmu_stats.set_pte_at_kernel); | ||
2491 | |||
2492 | debugfs_create_u32("prot_commit", 0444, d_mmu_debug, &mmu_stats.prot_commit); | ||
2493 | debugfs_create_u32("prot_commit_batched", 0444, d_mmu_debug, | ||
2494 | &mmu_stats.prot_commit_batched); | ||
2495 | |||
2496 | debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); | ||
2497 | return 0; | ||
2498 | } | ||
2499 | fs_initcall(xen_mmu_debugfs); | ||
2500 | |||
2501 | #endif /* CONFIG_XEN_DEBUG_FS */ | ||
diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h index 537bb9aab777..73809bb951b4 100644 --- a/arch/x86/xen/mmu.h +++ b/arch/x86/xen/mmu.h | |||
@@ -15,43 +15,6 @@ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); | |||
15 | 15 | ||
16 | void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); | 16 | void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); |
17 | 17 | ||
18 | |||
19 | void xen_activate_mm(struct mm_struct *prev, struct mm_struct *next); | ||
20 | void xen_dup_mmap(struct mm_struct *oldmm, struct mm_struct *mm); | ||
21 | void xen_exit_mmap(struct mm_struct *mm); | ||
22 | |||
23 | pteval_t xen_pte_val(pte_t); | ||
24 | pmdval_t xen_pmd_val(pmd_t); | ||
25 | pgdval_t xen_pgd_val(pgd_t); | ||
26 | |||
27 | pte_t xen_make_pte(pteval_t); | ||
28 | pmd_t xen_make_pmd(pmdval_t); | ||
29 | pgd_t xen_make_pgd(pgdval_t); | ||
30 | |||
31 | void xen_set_pte(pte_t *ptep, pte_t pteval); | ||
32 | void xen_set_pte_at(struct mm_struct *mm, unsigned long addr, | ||
33 | pte_t *ptep, pte_t pteval); | ||
34 | |||
35 | #ifdef CONFIG_X86_PAE | ||
36 | void xen_set_pte_atomic(pte_t *ptep, pte_t pte); | ||
37 | void xen_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | ||
38 | void xen_pmd_clear(pmd_t *pmdp); | ||
39 | #endif /* CONFIG_X86_PAE */ | ||
40 | |||
41 | void xen_set_pmd(pmd_t *pmdp, pmd_t pmdval); | ||
42 | void xen_set_pud(pud_t *ptr, pud_t val); | ||
43 | void xen_set_pmd_hyper(pmd_t *pmdp, pmd_t pmdval); | ||
44 | void xen_set_pud_hyper(pud_t *ptr, pud_t val); | ||
45 | |||
46 | #if PAGETABLE_LEVELS == 4 | ||
47 | pudval_t xen_pud_val(pud_t pud); | ||
48 | pud_t xen_make_pud(pudval_t pudval); | ||
49 | void xen_set_pgd(pgd_t *pgdp, pgd_t pgd); | ||
50 | void xen_set_pgd_hyper(pgd_t *pgdp, pgd_t pgd); | ||
51 | #endif | ||
52 | |||
53 | pgd_t *xen_get_user_pgd(pgd_t *pgd); | ||
54 | |||
55 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); | 18 | pte_t xen_ptep_modify_prot_start(struct mm_struct *mm, unsigned long addr, pte_t *ptep); |
56 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, | 19 | void xen_ptep_modify_prot_commit(struct mm_struct *mm, unsigned long addr, |
57 | pte_t *ptep, pte_t pte); | 20 | pte_t *ptep, pte_t pte); |
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig index 7c275f5d0df0..5d43c1f8ada8 100644 --- a/arch/xtensa/Kconfig +++ b/arch/xtensa/Kconfig | |||
@@ -20,12 +20,6 @@ config XTENSA | |||
20 | config RWSEM_XCHGADD_ALGORITHM | 20 | config RWSEM_XCHGADD_ALGORITHM |
21 | def_bool y | 21 | def_bool y |
22 | 22 | ||
23 | config GENERIC_FIND_NEXT_BIT | ||
24 | def_bool y | ||
25 | |||
26 | config GENERIC_FIND_BIT_LE | ||
27 | def_bool y | ||
28 | |||
29 | config GENERIC_HWEIGHT | 23 | config GENERIC_HWEIGHT |
30 | def_bool y | 24 | def_bool y |
31 | 25 | ||
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c index 07371cfdfae6..bcaf16ee6ad1 100644 --- a/block/blk-cgroup.c +++ b/block/blk-cgroup.c | |||
@@ -30,10 +30,8 @@ EXPORT_SYMBOL_GPL(blkio_root_cgroup); | |||
30 | 30 | ||
31 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, | 31 | static struct cgroup_subsys_state *blkiocg_create(struct cgroup_subsys *, |
32 | struct cgroup *); | 32 | struct cgroup *); |
33 | static int blkiocg_can_attach(struct cgroup_subsys *, struct cgroup *, | 33 | static int blkiocg_can_attach_task(struct cgroup *, struct task_struct *); |
34 | struct task_struct *, bool); | 34 | static void blkiocg_attach_task(struct cgroup *, struct task_struct *); |
35 | static void blkiocg_attach(struct cgroup_subsys *, struct cgroup *, | ||
36 | struct cgroup *, struct task_struct *, bool); | ||
37 | static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); | 35 | static void blkiocg_destroy(struct cgroup_subsys *, struct cgroup *); |
38 | static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); | 36 | static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); |
39 | 37 | ||
@@ -46,8 +44,8 @@ static int blkiocg_populate(struct cgroup_subsys *, struct cgroup *); | |||
46 | struct cgroup_subsys blkio_subsys = { | 44 | struct cgroup_subsys blkio_subsys = { |
47 | .name = "blkio", | 45 | .name = "blkio", |
48 | .create = blkiocg_create, | 46 | .create = blkiocg_create, |
49 | .can_attach = blkiocg_can_attach, | 47 | .can_attach_task = blkiocg_can_attach_task, |
50 | .attach = blkiocg_attach, | 48 | .attach_task = blkiocg_attach_task, |
51 | .destroy = blkiocg_destroy, | 49 | .destroy = blkiocg_destroy, |
52 | .populate = blkiocg_populate, | 50 | .populate = blkiocg_populate, |
53 | #ifdef CONFIG_BLK_CGROUP | 51 | #ifdef CONFIG_BLK_CGROUP |
@@ -1616,9 +1614,7 @@ done: | |||
1616 | * of the main cic data structures. For now we allow a task to change | 1614 | * of the main cic data structures. For now we allow a task to change |
1617 | * its cgroup only if it's the only owner of its ioc. | 1615 | * its cgroup only if it's the only owner of its ioc. |
1618 | */ | 1616 | */ |
1619 | static int blkiocg_can_attach(struct cgroup_subsys *subsys, | 1617 | static int blkiocg_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |
1620 | struct cgroup *cgroup, struct task_struct *tsk, | ||
1621 | bool threadgroup) | ||
1622 | { | 1618 | { |
1623 | struct io_context *ioc; | 1619 | struct io_context *ioc; |
1624 | int ret = 0; | 1620 | int ret = 0; |
@@ -1633,9 +1629,7 @@ static int blkiocg_can_attach(struct cgroup_subsys *subsys, | |||
1633 | return ret; | 1629 | return ret; |
1634 | } | 1630 | } |
1635 | 1631 | ||
1636 | static void blkiocg_attach(struct cgroup_subsys *subsys, struct cgroup *cgroup, | 1632 | static void blkiocg_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |
1637 | struct cgroup *prev, struct task_struct *tsk, | ||
1638 | bool threadgroup) | ||
1639 | { | 1633 | { |
1640 | struct io_context *ioc; | 1634 | struct io_context *ioc; |
1641 | 1635 | ||
diff --git a/block/blk-core.c b/block/blk-core.c index c8303e9d919d..d2f8f4049abd 100644 --- a/block/blk-core.c +++ b/block/blk-core.c | |||
@@ -345,6 +345,7 @@ void blk_put_queue(struct request_queue *q) | |||
345 | { | 345 | { |
346 | kobject_put(&q->kobj); | 346 | kobject_put(&q->kobj); |
347 | } | 347 | } |
348 | EXPORT_SYMBOL(blk_put_queue); | ||
348 | 349 | ||
349 | /* | 350 | /* |
350 | * Note: If a driver supplied the queue lock, it should not zap that lock | 351 | * Note: If a driver supplied the queue lock, it should not zap that lock |
@@ -566,6 +567,7 @@ int blk_get_queue(struct request_queue *q) | |||
566 | 567 | ||
567 | return 1; | 568 | return 1; |
568 | } | 569 | } |
570 | EXPORT_SYMBOL(blk_get_queue); | ||
569 | 571 | ||
570 | static inline void blk_free_request(struct request_queue *q, struct request *rq) | 572 | static inline void blk_free_request(struct request_queue *q, struct request *rq) |
571 | { | 573 | { |
@@ -1130,7 +1132,6 @@ static bool bio_attempt_front_merge(struct request_queue *q, | |||
1130 | struct request *req, struct bio *bio) | 1132 | struct request *req, struct bio *bio) |
1131 | { | 1133 | { |
1132 | const int ff = bio->bi_rw & REQ_FAILFAST_MASK; | 1134 | const int ff = bio->bi_rw & REQ_FAILFAST_MASK; |
1133 | sector_t sector; | ||
1134 | 1135 | ||
1135 | if (!ll_front_merge_fn(q, req, bio)) | 1136 | if (!ll_front_merge_fn(q, req, bio)) |
1136 | return false; | 1137 | return false; |
@@ -1140,8 +1141,6 @@ static bool bio_attempt_front_merge(struct request_queue *q, | |||
1140 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) | 1141 | if ((req->cmd_flags & REQ_FAILFAST_MASK) != ff) |
1141 | blk_rq_set_mixed_merge(req); | 1142 | blk_rq_set_mixed_merge(req); |
1142 | 1143 | ||
1143 | sector = bio->bi_sector; | ||
1144 | |||
1145 | bio->bi_next = req->bio; | 1144 | bio->bi_next = req->bio; |
1146 | req->bio = bio; | 1145 | req->bio = bio; |
1147 | 1146 | ||
diff --git a/block/genhd.c b/block/genhd.c index 2dd988723d73..95822ae25cfe 100644 --- a/block/genhd.c +++ b/block/genhd.c | |||
@@ -1728,7 +1728,7 @@ static void disk_add_events(struct gendisk *disk) | |||
1728 | { | 1728 | { |
1729 | struct disk_events *ev; | 1729 | struct disk_events *ev; |
1730 | 1730 | ||
1731 | if (!disk->fops->check_events || !(disk->events | disk->async_events)) | 1731 | if (!disk->fops->check_events) |
1732 | return; | 1732 | return; |
1733 | 1733 | ||
1734 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); | 1734 | ev = kzalloc(sizeof(*ev), GFP_KERNEL); |
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c index ffd8797faf4f..471a04013fe0 100644 --- a/drivers/bcma/host_pci.c +++ b/drivers/bcma/host_pci.c | |||
@@ -6,6 +6,7 @@ | |||
6 | */ | 6 | */ |
7 | 7 | ||
8 | #include "bcma_private.h" | 8 | #include "bcma_private.h" |
9 | #include <linux/slab.h> | ||
9 | #include <linux/bcma/bcma.h> | 10 | #include <linux/bcma/bcma.h> |
10 | #include <linux/pci.h> | 11 | #include <linux/pci.h> |
11 | 12 | ||
diff --git a/drivers/block/brd.c b/drivers/block/brd.c index b7f51e4594f8..dba1c32e1ddf 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c | |||
@@ -35,10 +35,6 @@ | |||
35 | */ | 35 | */ |
36 | struct brd_device { | 36 | struct brd_device { |
37 | int brd_number; | 37 | int brd_number; |
38 | int brd_refcnt; | ||
39 | loff_t brd_offset; | ||
40 | loff_t brd_sizelimit; | ||
41 | unsigned brd_blocksize; | ||
42 | 38 | ||
43 | struct request_queue *brd_queue; | 39 | struct request_queue *brd_queue; |
44 | struct gendisk *brd_disk; | 40 | struct gendisk *brd_disk; |
@@ -440,11 +436,11 @@ static int rd_nr; | |||
440 | int rd_size = CONFIG_BLK_DEV_RAM_SIZE; | 436 | int rd_size = CONFIG_BLK_DEV_RAM_SIZE; |
441 | static int max_part; | 437 | static int max_part; |
442 | static int part_shift; | 438 | static int part_shift; |
443 | module_param(rd_nr, int, 0); | 439 | module_param(rd_nr, int, S_IRUGO); |
444 | MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); | 440 | MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); |
445 | module_param(rd_size, int, 0); | 441 | module_param(rd_size, int, S_IRUGO); |
446 | MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); | 442 | MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); |
447 | module_param(max_part, int, 0); | 443 | module_param(max_part, int, S_IRUGO); |
448 | MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk"); | 444 | MODULE_PARM_DESC(max_part, "Maximum number of partitions per RAM disk"); |
449 | MODULE_LICENSE("GPL"); | 445 | MODULE_LICENSE("GPL"); |
450 | MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); | 446 | MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); |
@@ -552,7 +548,7 @@ static struct kobject *brd_probe(dev_t dev, int *part, void *data) | |||
552 | struct kobject *kobj; | 548 | struct kobject *kobj; |
553 | 549 | ||
554 | mutex_lock(&brd_devices_mutex); | 550 | mutex_lock(&brd_devices_mutex); |
555 | brd = brd_init_one(dev & MINORMASK); | 551 | brd = brd_init_one(MINOR(dev) >> part_shift); |
556 | kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM); | 552 | kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM); |
557 | mutex_unlock(&brd_devices_mutex); | 553 | mutex_unlock(&brd_devices_mutex); |
558 | 554 | ||
@@ -575,25 +571,39 @@ static int __init brd_init(void) | |||
575 | * | 571 | * |
576 | * (1) if rd_nr is specified, create that many upfront, and this | 572 | * (1) if rd_nr is specified, create that many upfront, and this |
577 | * also becomes a hard limit. | 573 | * also becomes a hard limit. |
578 | * (2) if rd_nr is not specified, create 1 rd device on module | 574 | * (2) if rd_nr is not specified, create CONFIG_BLK_DEV_RAM_COUNT |
579 | * load, user can further extend brd device by create dev node | 575 | * (default 16) rd device on module load, user can further |
580 | * themselves and have kernel automatically instantiate actual | 576 | * extend brd device by create dev node themselves and have |
581 | * device on-demand. | 577 | * kernel automatically instantiate actual device on-demand. |
582 | */ | 578 | */ |
583 | 579 | ||
584 | part_shift = 0; | 580 | part_shift = 0; |
585 | if (max_part > 0) | 581 | if (max_part > 0) { |
586 | part_shift = fls(max_part); | 582 | part_shift = fls(max_part); |
587 | 583 | ||
584 | /* | ||
585 | * Adjust max_part according to part_shift as it is exported | ||
586 | * to user space so that user can decide correct minor number | ||
587 | * if [s]he want to create more devices. | ||
588 | * | ||
589 | * Note that -1 is required because partition 0 is reserved | ||
590 | * for the whole disk. | ||
591 | */ | ||
592 | max_part = (1UL << part_shift) - 1; | ||
593 | } | ||
594 | |||
595 | if ((1UL << part_shift) > DISK_MAX_PARTS) | ||
596 | return -EINVAL; | ||
597 | |||
588 | if (rd_nr > 1UL << (MINORBITS - part_shift)) | 598 | if (rd_nr > 1UL << (MINORBITS - part_shift)) |
589 | return -EINVAL; | 599 | return -EINVAL; |
590 | 600 | ||
591 | if (rd_nr) { | 601 | if (rd_nr) { |
592 | nr = rd_nr; | 602 | nr = rd_nr; |
593 | range = rd_nr; | 603 | range = rd_nr << part_shift; |
594 | } else { | 604 | } else { |
595 | nr = CONFIG_BLK_DEV_RAM_COUNT; | 605 | nr = CONFIG_BLK_DEV_RAM_COUNT; |
596 | range = 1UL << (MINORBITS - part_shift); | 606 | range = 1UL << MINORBITS; |
597 | } | 607 | } |
598 | 608 | ||
599 | if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) | 609 | if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) |
@@ -632,7 +642,7 @@ static void __exit brd_exit(void) | |||
632 | unsigned long range; | 642 | unsigned long range; |
633 | struct brd_device *brd, *next; | 643 | struct brd_device *brd, *next; |
634 | 644 | ||
635 | range = rd_nr ? rd_nr : 1UL << (MINORBITS - part_shift); | 645 | range = rd_nr ? rd_nr << part_shift : 1UL << MINORBITS; |
636 | 646 | ||
637 | list_for_each_entry_safe(brd, next, &brd_devices, brd_list) | 647 | list_for_each_entry_safe(brd, next, &brd_devices, brd_list) |
638 | brd_del_one(brd); | 648 | brd_del_one(brd); |
diff --git a/drivers/block/loop.c b/drivers/block/loop.c index c59a672a3de0..76c8da78212b 100644 --- a/drivers/block/loop.c +++ b/drivers/block/loop.c | |||
@@ -1540,9 +1540,9 @@ static const struct block_device_operations lo_fops = { | |||
1540 | * And now the modules code and kernel interface. | 1540 | * And now the modules code and kernel interface. |
1541 | */ | 1541 | */ |
1542 | static int max_loop; | 1542 | static int max_loop; |
1543 | module_param(max_loop, int, 0); | 1543 | module_param(max_loop, int, S_IRUGO); |
1544 | MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); | 1544 | MODULE_PARM_DESC(max_loop, "Maximum number of loop devices"); |
1545 | module_param(max_part, int, 0); | 1545 | module_param(max_part, int, S_IRUGO); |
1546 | MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); | 1546 | MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device"); |
1547 | MODULE_LICENSE("GPL"); | 1547 | MODULE_LICENSE("GPL"); |
1548 | MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); | 1548 | MODULE_ALIAS_BLOCKDEV_MAJOR(LOOP_MAJOR); |
@@ -1688,9 +1688,20 @@ static int __init loop_init(void) | |||
1688 | */ | 1688 | */ |
1689 | 1689 | ||
1690 | part_shift = 0; | 1690 | part_shift = 0; |
1691 | if (max_part > 0) | 1691 | if (max_part > 0) { |
1692 | part_shift = fls(max_part); | 1692 | part_shift = fls(max_part); |
1693 | 1693 | ||
1694 | /* | ||
1695 | * Adjust max_part according to part_shift as it is exported | ||
1696 | * to user space so that user can decide correct minor number | ||
1697 | * if [s]he want to create more devices. | ||
1698 | * | ||
1699 | * Note that -1 is required because partition 0 is reserved | ||
1700 | * for the whole disk. | ||
1701 | */ | ||
1702 | max_part = (1UL << part_shift) - 1; | ||
1703 | } | ||
1704 | |||
1694 | if ((1UL << part_shift) > DISK_MAX_PARTS) | 1705 | if ((1UL << part_shift) > DISK_MAX_PARTS) |
1695 | return -EINVAL; | 1706 | return -EINVAL; |
1696 | 1707 | ||
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c index 38223e93aa98..58c0e6387cf7 100644 --- a/drivers/char/ipmi/ipmi_msghandler.c +++ b/drivers/char/ipmi/ipmi_msghandler.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <asm/system.h> | 36 | #include <asm/system.h> |
37 | #include <linux/poll.h> | 37 | #include <linux/poll.h> |
38 | #include <linux/sched.h> | 38 | #include <linux/sched.h> |
39 | #include <linux/seq_file.h> | ||
39 | #include <linux/spinlock.h> | 40 | #include <linux/spinlock.h> |
40 | #include <linux/mutex.h> | 41 | #include <linux/mutex.h> |
41 | #include <linux/slab.h> | 42 | #include <linux/slab.h> |
@@ -1896,102 +1897,128 @@ int ipmi_request_supply_msgs(ipmi_user_t user, | |||
1896 | EXPORT_SYMBOL(ipmi_request_supply_msgs); | 1897 | EXPORT_SYMBOL(ipmi_request_supply_msgs); |
1897 | 1898 | ||
1898 | #ifdef CONFIG_PROC_FS | 1899 | #ifdef CONFIG_PROC_FS |
1899 | static int ipmb_file_read_proc(char *page, char **start, off_t off, | 1900 | static int smi_ipmb_proc_show(struct seq_file *m, void *v) |
1900 | int count, int *eof, void *data) | ||
1901 | { | 1901 | { |
1902 | char *out = (char *) page; | 1902 | ipmi_smi_t intf = m->private; |
1903 | ipmi_smi_t intf = data; | ||
1904 | int i; | 1903 | int i; |
1905 | int rv = 0; | ||
1906 | 1904 | ||
1907 | for (i = 0; i < IPMI_MAX_CHANNELS; i++) | 1905 | seq_printf(m, "%x", intf->channels[0].address); |
1908 | rv += sprintf(out+rv, "%x ", intf->channels[i].address); | 1906 | for (i = 1; i < IPMI_MAX_CHANNELS; i++) |
1909 | out[rv-1] = '\n'; /* Replace the final space with a newline */ | 1907 | seq_printf(m, " %x", intf->channels[i].address); |
1910 | out[rv] = '\0'; | 1908 | return seq_putc(m, '\n'); |
1911 | rv++; | ||
1912 | return rv; | ||
1913 | } | 1909 | } |
1914 | 1910 | ||
1915 | static int version_file_read_proc(char *page, char **start, off_t off, | 1911 | static int smi_ipmb_proc_open(struct inode *inode, struct file *file) |
1916 | int count, int *eof, void *data) | ||
1917 | { | 1912 | { |
1918 | char *out = (char *) page; | 1913 | return single_open(file, smi_ipmb_proc_show, PDE(inode)->data); |
1919 | ipmi_smi_t intf = data; | 1914 | } |
1920 | 1915 | ||
1921 | return sprintf(out, "%u.%u\n", | 1916 | static const struct file_operations smi_ipmb_proc_ops = { |
1917 | .open = smi_ipmb_proc_open, | ||
1918 | .read = seq_read, | ||
1919 | .llseek = seq_lseek, | ||
1920 | .release = single_release, | ||
1921 | }; | ||
1922 | |||
1923 | static int smi_version_proc_show(struct seq_file *m, void *v) | ||
1924 | { | ||
1925 | ipmi_smi_t intf = m->private; | ||
1926 | |||
1927 | return seq_printf(m, "%u.%u\n", | ||
1922 | ipmi_version_major(&intf->bmc->id), | 1928 | ipmi_version_major(&intf->bmc->id), |
1923 | ipmi_version_minor(&intf->bmc->id)); | 1929 | ipmi_version_minor(&intf->bmc->id)); |
1924 | } | 1930 | } |
1925 | 1931 | ||
1926 | static int stat_file_read_proc(char *page, char **start, off_t off, | 1932 | static int smi_version_proc_open(struct inode *inode, struct file *file) |
1927 | int count, int *eof, void *data) | ||
1928 | { | 1933 | { |
1929 | char *out = (char *) page; | 1934 | return single_open(file, smi_version_proc_show, PDE(inode)->data); |
1930 | ipmi_smi_t intf = data; | 1935 | } |
1936 | |||
1937 | static const struct file_operations smi_version_proc_ops = { | ||
1938 | .open = smi_version_proc_open, | ||
1939 | .read = seq_read, | ||
1940 | .llseek = seq_lseek, | ||
1941 | .release = single_release, | ||
1942 | }; | ||
1931 | 1943 | ||
1932 | out += sprintf(out, "sent_invalid_commands: %u\n", | 1944 | static int smi_stats_proc_show(struct seq_file *m, void *v) |
1945 | { | ||
1946 | ipmi_smi_t intf = m->private; | ||
1947 | |||
1948 | seq_printf(m, "sent_invalid_commands: %u\n", | ||
1933 | ipmi_get_stat(intf, sent_invalid_commands)); | 1949 | ipmi_get_stat(intf, sent_invalid_commands)); |
1934 | out += sprintf(out, "sent_local_commands: %u\n", | 1950 | seq_printf(m, "sent_local_commands: %u\n", |
1935 | ipmi_get_stat(intf, sent_local_commands)); | 1951 | ipmi_get_stat(intf, sent_local_commands)); |
1936 | out += sprintf(out, "handled_local_responses: %u\n", | 1952 | seq_printf(m, "handled_local_responses: %u\n", |
1937 | ipmi_get_stat(intf, handled_local_responses)); | 1953 | ipmi_get_stat(intf, handled_local_responses)); |
1938 | out += sprintf(out, "unhandled_local_responses: %u\n", | 1954 | seq_printf(m, "unhandled_local_responses: %u\n", |
1939 | ipmi_get_stat(intf, unhandled_local_responses)); | 1955 | ipmi_get_stat(intf, unhandled_local_responses)); |
1940 | out += sprintf(out, "sent_ipmb_commands: %u\n", | 1956 | seq_printf(m, "sent_ipmb_commands: %u\n", |
1941 | ipmi_get_stat(intf, sent_ipmb_commands)); | 1957 | ipmi_get_stat(intf, sent_ipmb_commands)); |
1942 | out += sprintf(out, "sent_ipmb_command_errs: %u\n", | 1958 | seq_printf(m, "sent_ipmb_command_errs: %u\n", |
1943 | ipmi_get_stat(intf, sent_ipmb_command_errs)); | 1959 | ipmi_get_stat(intf, sent_ipmb_command_errs)); |
1944 | out += sprintf(out, "retransmitted_ipmb_commands: %u\n", | 1960 | seq_printf(m, "retransmitted_ipmb_commands: %u\n", |
1945 | ipmi_get_stat(intf, retransmitted_ipmb_commands)); | 1961 | ipmi_get_stat(intf, retransmitted_ipmb_commands)); |
1946 | out += sprintf(out, "timed_out_ipmb_commands: %u\n", | 1962 | seq_printf(m, "timed_out_ipmb_commands: %u\n", |
1947 | ipmi_get_stat(intf, timed_out_ipmb_commands)); | 1963 | ipmi_get_stat(intf, timed_out_ipmb_commands)); |
1948 | out += sprintf(out, "timed_out_ipmb_broadcasts: %u\n", | 1964 | seq_printf(m, "timed_out_ipmb_broadcasts: %u\n", |
1949 | ipmi_get_stat(intf, timed_out_ipmb_broadcasts)); | 1965 | ipmi_get_stat(intf, timed_out_ipmb_broadcasts)); |
1950 | out += sprintf(out, "sent_ipmb_responses: %u\n", | 1966 | seq_printf(m, "sent_ipmb_responses: %u\n", |
1951 | ipmi_get_stat(intf, sent_ipmb_responses)); | 1967 | ipmi_get_stat(intf, sent_ipmb_responses)); |
1952 | out += sprintf(out, "handled_ipmb_responses: %u\n", | 1968 | seq_printf(m, "handled_ipmb_responses: %u\n", |
1953 | ipmi_get_stat(intf, handled_ipmb_responses)); | 1969 | ipmi_get_stat(intf, handled_ipmb_responses)); |
1954 | out += sprintf(out, "invalid_ipmb_responses: %u\n", | 1970 | seq_printf(m, "invalid_ipmb_responses: %u\n", |
1955 | ipmi_get_stat(intf, invalid_ipmb_responses)); | 1971 | ipmi_get_stat(intf, invalid_ipmb_responses)); |
1956 | out += sprintf(out, "unhandled_ipmb_responses: %u\n", | 1972 | seq_printf(m, "unhandled_ipmb_responses: %u\n", |
1957 | ipmi_get_stat(intf, unhandled_ipmb_responses)); | 1973 | ipmi_get_stat(intf, unhandled_ipmb_responses)); |
1958 | out += sprintf(out, "sent_lan_commands: %u\n", | 1974 | seq_printf(m, "sent_lan_commands: %u\n", |
1959 | ipmi_get_stat(intf, sent_lan_commands)); | 1975 | ipmi_get_stat(intf, sent_lan_commands)); |
1960 | out += sprintf(out, "sent_lan_command_errs: %u\n", | 1976 | seq_printf(m, "sent_lan_command_errs: %u\n", |
1961 | ipmi_get_stat(intf, sent_lan_command_errs)); | 1977 | ipmi_get_stat(intf, sent_lan_command_errs)); |
1962 | out += sprintf(out, "retransmitted_lan_commands: %u\n", | 1978 | seq_printf(m, "retransmitted_lan_commands: %u\n", |
1963 | ipmi_get_stat(intf, retransmitted_lan_commands)); | 1979 | ipmi_get_stat(intf, retransmitted_lan_commands)); |
1964 | out += sprintf(out, "timed_out_lan_commands: %u\n", | 1980 | seq_printf(m, "timed_out_lan_commands: %u\n", |
1965 | ipmi_get_stat(intf, timed_out_lan_commands)); | 1981 | ipmi_get_stat(intf, timed_out_lan_commands)); |
1966 | out += sprintf(out, "sent_lan_responses: %u\n", | 1982 | seq_printf(m, "sent_lan_responses: %u\n", |
1967 | ipmi_get_stat(intf, sent_lan_responses)); | 1983 | ipmi_get_stat(intf, sent_lan_responses)); |
1968 | out += sprintf(out, "handled_lan_responses: %u\n", | 1984 | seq_printf(m, "handled_lan_responses: %u\n", |
1969 | ipmi_get_stat(intf, handled_lan_responses)); | 1985 | ipmi_get_stat(intf, handled_lan_responses)); |
1970 | out += sprintf(out, "invalid_lan_responses: %u\n", | 1986 | seq_printf(m, "invalid_lan_responses: %u\n", |
1971 | ipmi_get_stat(intf, invalid_lan_responses)); | 1987 | ipmi_get_stat(intf, invalid_lan_responses)); |
1972 | out += sprintf(out, "unhandled_lan_responses: %u\n", | 1988 | seq_printf(m, "unhandled_lan_responses: %u\n", |
1973 | ipmi_get_stat(intf, unhandled_lan_responses)); | 1989 | ipmi_get_stat(intf, unhandled_lan_responses)); |
1974 | out += sprintf(out, "handled_commands: %u\n", | 1990 | seq_printf(m, "handled_commands: %u\n", |
1975 | ipmi_get_stat(intf, handled_commands)); | 1991 | ipmi_get_stat(intf, handled_commands)); |
1976 | out += sprintf(out, "invalid_commands: %u\n", | 1992 | seq_printf(m, "invalid_commands: %u\n", |
1977 | ipmi_get_stat(intf, invalid_commands)); | 1993 | ipmi_get_stat(intf, invalid_commands)); |
1978 | out += sprintf(out, "unhandled_commands: %u\n", | 1994 | seq_printf(m, "unhandled_commands: %u\n", |
1979 | ipmi_get_stat(intf, unhandled_commands)); | 1995 | ipmi_get_stat(intf, unhandled_commands)); |
1980 | out += sprintf(out, "invalid_events: %u\n", | 1996 | seq_printf(m, "invalid_events: %u\n", |
1981 | ipmi_get_stat(intf, invalid_events)); | 1997 | ipmi_get_stat(intf, invalid_events)); |
1982 | out += sprintf(out, "events: %u\n", | 1998 | seq_printf(m, "events: %u\n", |
1983 | ipmi_get_stat(intf, events)); | 1999 | ipmi_get_stat(intf, events)); |
1984 | out += sprintf(out, "failed rexmit LAN msgs: %u\n", | 2000 | seq_printf(m, "failed rexmit LAN msgs: %u\n", |
1985 | ipmi_get_stat(intf, dropped_rexmit_lan_commands)); | 2001 | ipmi_get_stat(intf, dropped_rexmit_lan_commands)); |
1986 | out += sprintf(out, "failed rexmit IPMB msgs: %u\n", | 2002 | seq_printf(m, "failed rexmit IPMB msgs: %u\n", |
1987 | ipmi_get_stat(intf, dropped_rexmit_ipmb_commands)); | 2003 | ipmi_get_stat(intf, dropped_rexmit_ipmb_commands)); |
2004 | return 0; | ||
2005 | } | ||
1988 | 2006 | ||
1989 | return (out - ((char *) page)); | 2007 | static int smi_stats_proc_open(struct inode *inode, struct file *file) |
2008 | { | ||
2009 | return single_open(file, smi_stats_proc_show, PDE(inode)->data); | ||
1990 | } | 2010 | } |
2011 | |||
2012 | static const struct file_operations smi_stats_proc_ops = { | ||
2013 | .open = smi_stats_proc_open, | ||
2014 | .read = seq_read, | ||
2015 | .llseek = seq_lseek, | ||
2016 | .release = single_release, | ||
2017 | }; | ||
1991 | #endif /* CONFIG_PROC_FS */ | 2018 | #endif /* CONFIG_PROC_FS */ |
1992 | 2019 | ||
1993 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | 2020 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, |
1994 | read_proc_t *read_proc, | 2021 | const struct file_operations *proc_ops, |
1995 | void *data) | 2022 | void *data) |
1996 | { | 2023 | { |
1997 | int rv = 0; | 2024 | int rv = 0; |
@@ -2010,15 +2037,12 @@ int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | |||
2010 | } | 2037 | } |
2011 | strcpy(entry->name, name); | 2038 | strcpy(entry->name, name); |
2012 | 2039 | ||
2013 | file = create_proc_entry(name, 0, smi->proc_dir); | 2040 | file = proc_create_data(name, 0, smi->proc_dir, proc_ops, data); |
2014 | if (!file) { | 2041 | if (!file) { |
2015 | kfree(entry->name); | 2042 | kfree(entry->name); |
2016 | kfree(entry); | 2043 | kfree(entry); |
2017 | rv = -ENOMEM; | 2044 | rv = -ENOMEM; |
2018 | } else { | 2045 | } else { |
2019 | file->data = data; | ||
2020 | file->read_proc = read_proc; | ||
2021 | |||
2022 | mutex_lock(&smi->proc_entry_lock); | 2046 | mutex_lock(&smi->proc_entry_lock); |
2023 | /* Stick it on the list. */ | 2047 | /* Stick it on the list. */ |
2024 | entry->next = smi->proc_entries; | 2048 | entry->next = smi->proc_entries; |
@@ -2043,17 +2067,17 @@ static int add_proc_entries(ipmi_smi_t smi, int num) | |||
2043 | 2067 | ||
2044 | if (rv == 0) | 2068 | if (rv == 0) |
2045 | rv = ipmi_smi_add_proc_entry(smi, "stats", | 2069 | rv = ipmi_smi_add_proc_entry(smi, "stats", |
2046 | stat_file_read_proc, | 2070 | &smi_stats_proc_ops, |
2047 | smi); | 2071 | smi); |
2048 | 2072 | ||
2049 | if (rv == 0) | 2073 | if (rv == 0) |
2050 | rv = ipmi_smi_add_proc_entry(smi, "ipmb", | 2074 | rv = ipmi_smi_add_proc_entry(smi, "ipmb", |
2051 | ipmb_file_read_proc, | 2075 | &smi_ipmb_proc_ops, |
2052 | smi); | 2076 | smi); |
2053 | 2077 | ||
2054 | if (rv == 0) | 2078 | if (rv == 0) |
2055 | rv = ipmi_smi_add_proc_entry(smi, "version", | 2079 | rv = ipmi_smi_add_proc_entry(smi, "version", |
2056 | version_file_read_proc, | 2080 | &smi_version_proc_ops, |
2057 | smi); | 2081 | smi); |
2058 | #endif /* CONFIG_PROC_FS */ | 2082 | #endif /* CONFIG_PROC_FS */ |
2059 | 2083 | ||
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c index 64c6b8530615..9397ab49b72e 100644 --- a/drivers/char/ipmi/ipmi_si_intf.c +++ b/drivers/char/ipmi/ipmi_si_intf.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include <linux/moduleparam.h> | 43 | #include <linux/moduleparam.h> |
44 | #include <asm/system.h> | 44 | #include <asm/system.h> |
45 | #include <linux/sched.h> | 45 | #include <linux/sched.h> |
46 | #include <linux/seq_file.h> | ||
46 | #include <linux/timer.h> | 47 | #include <linux/timer.h> |
47 | #include <linux/errno.h> | 48 | #include <linux/errno.h> |
48 | #include <linux/spinlock.h> | 49 | #include <linux/spinlock.h> |
@@ -2805,54 +2806,73 @@ static int try_enable_event_buffer(struct smi_info *smi_info) | |||
2805 | return rv; | 2806 | return rv; |
2806 | } | 2807 | } |
2807 | 2808 | ||
2808 | static int type_file_read_proc(char *page, char **start, off_t off, | 2809 | static int smi_type_proc_show(struct seq_file *m, void *v) |
2809 | int count, int *eof, void *data) | ||
2810 | { | 2810 | { |
2811 | struct smi_info *smi = data; | 2811 | struct smi_info *smi = m->private; |
2812 | 2812 | ||
2813 | return sprintf(page, "%s\n", si_to_str[smi->si_type]); | 2813 | return seq_printf(m, "%s\n", si_to_str[smi->si_type]); |
2814 | } | 2814 | } |
2815 | 2815 | ||
2816 | static int stat_file_read_proc(char *page, char **start, off_t off, | 2816 | static int smi_type_proc_open(struct inode *inode, struct file *file) |
2817 | int count, int *eof, void *data) | ||
2818 | { | 2817 | { |
2819 | char *out = (char *) page; | 2818 | return single_open(file, smi_type_proc_show, PDE(inode)->data); |
2820 | struct smi_info *smi = data; | 2819 | } |
2820 | |||
2821 | static const struct file_operations smi_type_proc_ops = { | ||
2822 | .open = smi_type_proc_open, | ||
2823 | .read = seq_read, | ||
2824 | .llseek = seq_lseek, | ||
2825 | .release = single_release, | ||
2826 | }; | ||
2827 | |||
2828 | static int smi_si_stats_proc_show(struct seq_file *m, void *v) | ||
2829 | { | ||
2830 | struct smi_info *smi = m->private; | ||
2821 | 2831 | ||
2822 | out += sprintf(out, "interrupts_enabled: %d\n", | 2832 | seq_printf(m, "interrupts_enabled: %d\n", |
2823 | smi->irq && !smi->interrupt_disabled); | 2833 | smi->irq && !smi->interrupt_disabled); |
2824 | out += sprintf(out, "short_timeouts: %u\n", | 2834 | seq_printf(m, "short_timeouts: %u\n", |
2825 | smi_get_stat(smi, short_timeouts)); | 2835 | smi_get_stat(smi, short_timeouts)); |
2826 | out += sprintf(out, "long_timeouts: %u\n", | 2836 | seq_printf(m, "long_timeouts: %u\n", |
2827 | smi_get_stat(smi, long_timeouts)); | 2837 | smi_get_stat(smi, long_timeouts)); |
2828 | out += sprintf(out, "idles: %u\n", | 2838 | seq_printf(m, "idles: %u\n", |
2829 | smi_get_stat(smi, idles)); | 2839 | smi_get_stat(smi, idles)); |
2830 | out += sprintf(out, "interrupts: %u\n", | 2840 | seq_printf(m, "interrupts: %u\n", |
2831 | smi_get_stat(smi, interrupts)); | 2841 | smi_get_stat(smi, interrupts)); |
2832 | out += sprintf(out, "attentions: %u\n", | 2842 | seq_printf(m, "attentions: %u\n", |
2833 | smi_get_stat(smi, attentions)); | 2843 | smi_get_stat(smi, attentions)); |
2834 | out += sprintf(out, "flag_fetches: %u\n", | 2844 | seq_printf(m, "flag_fetches: %u\n", |
2835 | smi_get_stat(smi, flag_fetches)); | 2845 | smi_get_stat(smi, flag_fetches)); |
2836 | out += sprintf(out, "hosed_count: %u\n", | 2846 | seq_printf(m, "hosed_count: %u\n", |
2837 | smi_get_stat(smi, hosed_count)); | 2847 | smi_get_stat(smi, hosed_count)); |
2838 | out += sprintf(out, "complete_transactions: %u\n", | 2848 | seq_printf(m, "complete_transactions: %u\n", |
2839 | smi_get_stat(smi, complete_transactions)); | 2849 | smi_get_stat(smi, complete_transactions)); |
2840 | out += sprintf(out, "events: %u\n", | 2850 | seq_printf(m, "events: %u\n", |
2841 | smi_get_stat(smi, events)); | 2851 | smi_get_stat(smi, events)); |
2842 | out += sprintf(out, "watchdog_pretimeouts: %u\n", | 2852 | seq_printf(m, "watchdog_pretimeouts: %u\n", |
2843 | smi_get_stat(smi, watchdog_pretimeouts)); | 2853 | smi_get_stat(smi, watchdog_pretimeouts)); |
2844 | out += sprintf(out, "incoming_messages: %u\n", | 2854 | seq_printf(m, "incoming_messages: %u\n", |
2845 | smi_get_stat(smi, incoming_messages)); | 2855 | smi_get_stat(smi, incoming_messages)); |
2856 | return 0; | ||
2857 | } | ||
2846 | 2858 | ||
2847 | return out - page; | 2859 | static int smi_si_stats_proc_open(struct inode *inode, struct file *file) |
2860 | { | ||
2861 | return single_open(file, smi_si_stats_proc_show, PDE(inode)->data); | ||
2848 | } | 2862 | } |
2849 | 2863 | ||
2850 | static int param_read_proc(char *page, char **start, off_t off, | 2864 | static const struct file_operations smi_si_stats_proc_ops = { |
2851 | int count, int *eof, void *data) | 2865 | .open = smi_si_stats_proc_open, |
2866 | .read = seq_read, | ||
2867 | .llseek = seq_lseek, | ||
2868 | .release = single_release, | ||
2869 | }; | ||
2870 | |||
2871 | static int smi_params_proc_show(struct seq_file *m, void *v) | ||
2852 | { | 2872 | { |
2853 | struct smi_info *smi = data; | 2873 | struct smi_info *smi = m->private; |
2854 | 2874 | ||
2855 | return sprintf(page, | 2875 | return seq_printf(m, |
2856 | "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", | 2876 | "%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n", |
2857 | si_to_str[smi->si_type], | 2877 | si_to_str[smi->si_type], |
2858 | addr_space_to_str[smi->io.addr_type], | 2878 | addr_space_to_str[smi->io.addr_type], |
@@ -2864,6 +2884,18 @@ static int param_read_proc(char *page, char **start, off_t off, | |||
2864 | smi->slave_addr); | 2884 | smi->slave_addr); |
2865 | } | 2885 | } |
2866 | 2886 | ||
2887 | static int smi_params_proc_open(struct inode *inode, struct file *file) | ||
2888 | { | ||
2889 | return single_open(file, smi_params_proc_show, PDE(inode)->data); | ||
2890 | } | ||
2891 | |||
2892 | static const struct file_operations smi_params_proc_ops = { | ||
2893 | .open = smi_params_proc_open, | ||
2894 | .read = seq_read, | ||
2895 | .llseek = seq_lseek, | ||
2896 | .release = single_release, | ||
2897 | }; | ||
2898 | |||
2867 | /* | 2899 | /* |
2868 | * oem_data_avail_to_receive_msg_avail | 2900 | * oem_data_avail_to_receive_msg_avail |
2869 | * @info - smi_info structure with msg_flags set | 2901 | * @info - smi_info structure with msg_flags set |
@@ -3257,7 +3289,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3257 | } | 3289 | } |
3258 | 3290 | ||
3259 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", | 3291 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "type", |
3260 | type_file_read_proc, | 3292 | &smi_type_proc_ops, |
3261 | new_smi); | 3293 | new_smi); |
3262 | if (rv) { | 3294 | if (rv) { |
3263 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); | 3295 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); |
@@ -3265,7 +3297,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3265 | } | 3297 | } |
3266 | 3298 | ||
3267 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", | 3299 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "si_stats", |
3268 | stat_file_read_proc, | 3300 | &smi_si_stats_proc_ops, |
3269 | new_smi); | 3301 | new_smi); |
3270 | if (rv) { | 3302 | if (rv) { |
3271 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); | 3303 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); |
@@ -3273,7 +3305,7 @@ static int try_smi_init(struct smi_info *new_smi) | |||
3273 | } | 3305 | } |
3274 | 3306 | ||
3275 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", | 3307 | rv = ipmi_smi_add_proc_entry(new_smi->intf, "params", |
3276 | param_read_proc, | 3308 | &smi_params_proc_ops, |
3277 | new_smi); | 3309 | new_smi); |
3278 | if (rv) { | 3310 | if (rv) { |
3279 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); | 3311 | dev_err(new_smi->dev, "Unable to create proc entry: %d\n", rv); |
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c index 966a95bc974b..25d139c9dbed 100644 --- a/drivers/char/mspec.c +++ b/drivers/char/mspec.c | |||
@@ -271,14 +271,13 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma, | |||
271 | pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; | 271 | pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; |
272 | vdata_size = sizeof(struct vma_data) + pages * sizeof(long); | 272 | vdata_size = sizeof(struct vma_data) + pages * sizeof(long); |
273 | if (vdata_size <= PAGE_SIZE) | 273 | if (vdata_size <= PAGE_SIZE) |
274 | vdata = kmalloc(vdata_size, GFP_KERNEL); | 274 | vdata = kzalloc(vdata_size, GFP_KERNEL); |
275 | else { | 275 | else { |
276 | vdata = vmalloc(vdata_size); | 276 | vdata = vzalloc(vdata_size); |
277 | flags = VMD_VMALLOCED; | 277 | flags = VMD_VMALLOCED; |
278 | } | 278 | } |
279 | if (!vdata) | 279 | if (!vdata) |
280 | return -ENOMEM; | 280 | return -ENOMEM; |
281 | memset(vdata, 0, vdata_size); | ||
282 | 281 | ||
283 | vdata->vm_start = vma->vm_start; | 282 | vdata->vm_start = vma->vm_start; |
284 | vdata->vm_end = vma->vm_end; | 283 | vdata->vm_end = vma->vm_end; |
diff --git a/drivers/char/ppdev.c b/drivers/char/ppdev.c index f176dbaeb15a..3fcf80ff12f2 100644 --- a/drivers/char/ppdev.c +++ b/drivers/char/ppdev.c | |||
@@ -457,6 +457,7 @@ static int pp_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg) | |||
457 | return -ENODEV; | 457 | return -ENODEV; |
458 | 458 | ||
459 | modes = port->modes; | 459 | modes = port->modes; |
460 | parport_put_port(port); | ||
460 | if (copy_to_user (argp, &modes, sizeof (modes))) { | 461 | if (copy_to_user (argp, &modes, sizeof (modes))) { |
461 | return -EFAULT; | 462 | return -EFAULT; |
462 | } | 463 | } |
diff --git a/drivers/dma/timb_dma.c b/drivers/dma/timb_dma.c index d2c75feff7df..f69f90a61873 100644 --- a/drivers/dma/timb_dma.c +++ b/drivers/dma/timb_dma.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/io.h> | 27 | #include <linux/io.h> |
28 | #include <linux/module.h> | 28 | #include <linux/module.h> |
29 | #include <linux/platform_device.h> | 29 | #include <linux/platform_device.h> |
30 | #include <linux/mfd/core.h> | ||
31 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
32 | 31 | ||
33 | #include <linux/timb_dma.h> | 32 | #include <linux/timb_dma.h> |
@@ -685,7 +684,7 @@ static irqreturn_t td_irq(int irq, void *devid) | |||
685 | 684 | ||
686 | static int __devinit td_probe(struct platform_device *pdev) | 685 | static int __devinit td_probe(struct platform_device *pdev) |
687 | { | 686 | { |
688 | struct timb_dma_platform_data *pdata = mfd_get_data(pdev); | 687 | struct timb_dma_platform_data *pdata = pdev->dev.platform_data; |
689 | struct timb_dma *td; | 688 | struct timb_dma *td; |
690 | struct resource *iomem; | 689 | struct resource *iomem; |
691 | int irq; | 690 | int irq; |
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c index cace0a7b707a..e47e73bbbcc5 100644 --- a/drivers/edac/amd76x_edac.c +++ b/drivers/edac/amd76x_edac.c | |||
@@ -19,7 +19,7 @@ | |||
19 | #include <linux/edac.h> | 19 | #include <linux/edac.h> |
20 | #include "edac_core.h" | 20 | #include "edac_core.h" |
21 | 21 | ||
22 | #define AMD76X_REVISION " Ver: 2.0.2 " __DATE__ | 22 | #define AMD76X_REVISION " Ver: 2.0.2" |
23 | #define EDAC_MOD_STR "amd76x_edac" | 23 | #define EDAC_MOD_STR "amd76x_edac" |
24 | 24 | ||
25 | #define amd76x_printk(level, fmt, arg...) \ | 25 | #define amd76x_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/amd8111_edac.c b/drivers/edac/amd8111_edac.c index 35b78d04bbfa..ddd890052ce2 100644 --- a/drivers/edac/amd8111_edac.c +++ b/drivers/edac/amd8111_edac.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include "edac_module.h" | 33 | #include "edac_module.h" |
34 | #include "amd8111_edac.h" | 34 | #include "amd8111_edac.h" |
35 | 35 | ||
36 | #define AMD8111_EDAC_REVISION " Ver: 1.0.0 " __DATE__ | 36 | #define AMD8111_EDAC_REVISION " Ver: 1.0.0" |
37 | #define AMD8111_EDAC_MOD_STR "amd8111_edac" | 37 | #define AMD8111_EDAC_MOD_STR "amd8111_edac" |
38 | 38 | ||
39 | #define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 | 39 | #define PCI_DEVICE_ID_AMD_8111_PCI 0x7460 |
diff --git a/drivers/edac/amd8131_edac.c b/drivers/edac/amd8131_edac.c index b432d60c622a..a5c680561c73 100644 --- a/drivers/edac/amd8131_edac.c +++ b/drivers/edac/amd8131_edac.c | |||
@@ -33,7 +33,7 @@ | |||
33 | #include "edac_module.h" | 33 | #include "edac_module.h" |
34 | #include "amd8131_edac.h" | 34 | #include "amd8131_edac.h" |
35 | 35 | ||
36 | #define AMD8131_EDAC_REVISION " Ver: 1.0.0 " __DATE__ | 36 | #define AMD8131_EDAC_REVISION " Ver: 1.0.0" |
37 | #define AMD8131_EDAC_MOD_STR "amd8131_edac" | 37 | #define AMD8131_EDAC_MOD_STR "amd8131_edac" |
38 | 38 | ||
39 | /* Wrapper functions for accessing PCI configuration space */ | 39 | /* Wrapper functions for accessing PCI configuration space */ |
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c index 837ad8f85b48..a687a0d16962 100644 --- a/drivers/edac/cpc925_edac.c +++ b/drivers/edac/cpc925_edac.c | |||
@@ -30,7 +30,7 @@ | |||
30 | #include "edac_core.h" | 30 | #include "edac_core.h" |
31 | #include "edac_module.h" | 31 | #include "edac_module.h" |
32 | 32 | ||
33 | #define CPC925_EDAC_REVISION " Ver: 1.0.0 " __DATE__ | 33 | #define CPC925_EDAC_REVISION " Ver: 1.0.0" |
34 | #define CPC925_EDAC_MOD_STR "cpc925_edac" | 34 | #define CPC925_EDAC_MOD_STR "cpc925_edac" |
35 | 35 | ||
36 | #define cpc925_printk(level, fmt, arg...) \ | 36 | #define cpc925_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c index ec302d426589..1af531a11d21 100644 --- a/drivers/edac/e752x_edac.c +++ b/drivers/edac/e752x_edac.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <linux/edac.h> | 24 | #include <linux/edac.h> |
25 | #include "edac_core.h" | 25 | #include "edac_core.h" |
26 | 26 | ||
27 | #define E752X_REVISION " Ver: 2.0.2 " __DATE__ | 27 | #define E752X_REVISION " Ver: 2.0.2" |
28 | #define EDAC_MOD_STR "e752x_edac" | 28 | #define EDAC_MOD_STR "e752x_edac" |
29 | 29 | ||
30 | static int report_non_memory_errors; | 30 | static int report_non_memory_errors; |
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c index 1731d7245816..6ffb6d23281f 100644 --- a/drivers/edac/e7xxx_edac.c +++ b/drivers/edac/e7xxx_edac.c | |||
@@ -29,7 +29,7 @@ | |||
29 | #include <linux/edac.h> | 29 | #include <linux/edac.h> |
30 | #include "edac_core.h" | 30 | #include "edac_core.h" |
31 | 31 | ||
32 | #define E7XXX_REVISION " Ver: 2.0.2 " __DATE__ | 32 | #define E7XXX_REVISION " Ver: 2.0.2" |
33 | #define EDAC_MOD_STR "e7xxx_edac" | 33 | #define EDAC_MOD_STR "e7xxx_edac" |
34 | 34 | ||
35 | #define e7xxx_printk(level, fmt, arg...) \ | 35 | #define e7xxx_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h index eefa3501916b..55b8278bb172 100644 --- a/drivers/edac/edac_core.h +++ b/drivers/edac/edac_core.h | |||
@@ -421,10 +421,6 @@ struct mem_ctl_info { | |||
421 | u32 ce_count; /* Total Correctable Errors for this MC */ | 421 | u32 ce_count; /* Total Correctable Errors for this MC */ |
422 | unsigned long start_time; /* mci load start time (in jiffies) */ | 422 | unsigned long start_time; /* mci load start time (in jiffies) */ |
423 | 423 | ||
424 | /* this stuff is for safe removal of mc devices from global list while | ||
425 | * NMI handlers may be traversing list | ||
426 | */ | ||
427 | struct rcu_head rcu; | ||
428 | struct completion complete; | 424 | struct completion complete; |
429 | 425 | ||
430 | /* edac sysfs device control */ | 426 | /* edac sysfs device control */ |
@@ -620,10 +616,6 @@ struct edac_device_ctl_info { | |||
620 | 616 | ||
621 | unsigned long start_time; /* edac_device load start time (jiffies) */ | 617 | unsigned long start_time; /* edac_device load start time (jiffies) */ |
622 | 618 | ||
623 | /* these are for safe removal of mc devices from global list while | ||
624 | * NMI handlers may be traversing list | ||
625 | */ | ||
626 | struct rcu_head rcu; | ||
627 | struct completion removal_complete; | 619 | struct completion removal_complete; |
628 | 620 | ||
629 | /* sysfs top name under 'edac' directory | 621 | /* sysfs top name under 'edac' directory |
@@ -722,10 +714,6 @@ struct edac_pci_ctl_info { | |||
722 | 714 | ||
723 | unsigned long start_time; /* edac_pci load start time (jiffies) */ | 715 | unsigned long start_time; /* edac_pci load start time (jiffies) */ |
724 | 716 | ||
725 | /* these are for safe removal of devices from global list while | ||
726 | * NMI handlers may be traversing list | ||
727 | */ | ||
728 | struct rcu_head rcu; | ||
729 | struct completion complete; | 717 | struct completion complete; |
730 | 718 | ||
731 | /* sysfs top name under 'edac' directory | 719 | /* sysfs top name under 'edac' directory |
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c index a7408cf86f37..c3f67437afb6 100644 --- a/drivers/edac/edac_device.c +++ b/drivers/edac/edac_device.c | |||
@@ -346,30 +346,18 @@ fail1: | |||
346 | } | 346 | } |
347 | 347 | ||
348 | /* | 348 | /* |
349 | * complete_edac_device_list_del | ||
350 | * | ||
351 | * callback function when reference count is zero | ||
352 | */ | ||
353 | static void complete_edac_device_list_del(struct rcu_head *head) | ||
354 | { | ||
355 | struct edac_device_ctl_info *edac_dev; | ||
356 | |||
357 | edac_dev = container_of(head, struct edac_device_ctl_info, rcu); | ||
358 | INIT_LIST_HEAD(&edac_dev->link); | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * del_edac_device_from_global_list | 349 | * del_edac_device_from_global_list |
363 | * | ||
364 | * remove the RCU, setup for a callback call, | ||
365 | * then wait for the callback to occur | ||
366 | */ | 350 | */ |
367 | static void del_edac_device_from_global_list(struct edac_device_ctl_info | 351 | static void del_edac_device_from_global_list(struct edac_device_ctl_info |
368 | *edac_device) | 352 | *edac_device) |
369 | { | 353 | { |
370 | list_del_rcu(&edac_device->link); | 354 | list_del_rcu(&edac_device->link); |
371 | call_rcu(&edac_device->rcu, complete_edac_device_list_del); | 355 | |
372 | rcu_barrier(); | 356 | /* these are for safe removal of devices from global list while |
357 | * NMI handlers may be traversing list | ||
358 | */ | ||
359 | synchronize_rcu(); | ||
360 | INIT_LIST_HEAD(&edac_device->link); | ||
373 | } | 361 | } |
374 | 362 | ||
375 | /* | 363 | /* |
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c index 1d8056049072..d69144a09043 100644 --- a/drivers/edac/edac_mc.c +++ b/drivers/edac/edac_mc.c | |||
@@ -447,20 +447,16 @@ fail1: | |||
447 | return 1; | 447 | return 1; |
448 | } | 448 | } |
449 | 449 | ||
450 | static void complete_mc_list_del(struct rcu_head *head) | ||
451 | { | ||
452 | struct mem_ctl_info *mci; | ||
453 | |||
454 | mci = container_of(head, struct mem_ctl_info, rcu); | ||
455 | INIT_LIST_HEAD(&mci->link); | ||
456 | } | ||
457 | |||
458 | static void del_mc_from_global_list(struct mem_ctl_info *mci) | 450 | static void del_mc_from_global_list(struct mem_ctl_info *mci) |
459 | { | 451 | { |
460 | atomic_dec(&edac_handlers); | 452 | atomic_dec(&edac_handlers); |
461 | list_del_rcu(&mci->link); | 453 | list_del_rcu(&mci->link); |
462 | call_rcu(&mci->rcu, complete_mc_list_del); | 454 | |
463 | rcu_barrier(); | 455 | /* these are for safe removal of devices from global list while |
456 | * NMI handlers may be traversing list | ||
457 | */ | ||
458 | synchronize_rcu(); | ||
459 | INIT_LIST_HEAD(&mci->link); | ||
464 | } | 460 | } |
465 | 461 | ||
466 | /** | 462 | /** |
diff --git a/drivers/edac/edac_module.c b/drivers/edac/edac_module.c index be4b075c3098..5ddaa86d6a6e 100644 --- a/drivers/edac/edac_module.c +++ b/drivers/edac/edac_module.c | |||
@@ -15,7 +15,7 @@ | |||
15 | #include "edac_core.h" | 15 | #include "edac_core.h" |
16 | #include "edac_module.h" | 16 | #include "edac_module.h" |
17 | 17 | ||
18 | #define EDAC_VERSION "Ver: 2.1.0 " __DATE__ | 18 | #define EDAC_VERSION "Ver: 2.1.0" |
19 | 19 | ||
20 | #ifdef CONFIG_EDAC_DEBUG | 20 | #ifdef CONFIG_EDAC_DEBUG |
21 | /* Values of 0 to 4 will generate output */ | 21 | /* Values of 0 to 4 will generate output */ |
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c index efb5d5650783..2b378207d571 100644 --- a/drivers/edac/edac_pci.c +++ b/drivers/edac/edac_pci.c | |||
@@ -164,19 +164,6 @@ fail1: | |||
164 | } | 164 | } |
165 | 165 | ||
166 | /* | 166 | /* |
167 | * complete_edac_pci_list_del | ||
168 | * | ||
169 | * RCU completion callback to indicate item is deleted | ||
170 | */ | ||
171 | static void complete_edac_pci_list_del(struct rcu_head *head) | ||
172 | { | ||
173 | struct edac_pci_ctl_info *pci; | ||
174 | |||
175 | pci = container_of(head, struct edac_pci_ctl_info, rcu); | ||
176 | INIT_LIST_HEAD(&pci->link); | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * del_edac_pci_from_global_list | 167 | * del_edac_pci_from_global_list |
181 | * | 168 | * |
182 | * remove the PCI control struct from the global list | 169 | * remove the PCI control struct from the global list |
@@ -184,8 +171,12 @@ static void complete_edac_pci_list_del(struct rcu_head *head) | |||
184 | static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci) | 171 | static void del_edac_pci_from_global_list(struct edac_pci_ctl_info *pci) |
185 | { | 172 | { |
186 | list_del_rcu(&pci->link); | 173 | list_del_rcu(&pci->link); |
187 | call_rcu(&pci->rcu, complete_edac_pci_list_del); | 174 | |
188 | rcu_barrier(); | 175 | /* these are for safe removal of devices from global list while |
176 | * NMI handlers may be traversing list | ||
177 | */ | ||
178 | synchronize_rcu(); | ||
179 | INIT_LIST_HEAD(&pci->link); | ||
189 | } | 180 | } |
190 | 181 | ||
191 | #if 0 | 182 | #if 0 |
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index 87f427c2ce5c..4dc3ac25a422 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c | |||
@@ -27,7 +27,7 @@ | |||
27 | /* | 27 | /* |
28 | * Alter this version for the I5000 module when modifications are made | 28 | * Alter this version for the I5000 module when modifications are made |
29 | */ | 29 | */ |
30 | #define I5000_REVISION " Ver: 2.0.12 " __DATE__ | 30 | #define I5000_REVISION " Ver: 2.0.12" |
31 | #define EDAC_MOD_STR "i5000_edac" | 31 | #define EDAC_MOD_STR "i5000_edac" |
32 | 32 | ||
33 | #define i5000_printk(level, fmt, arg...) \ | 33 | #define i5000_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c index 80a465efbae8..74d6ec342afb 100644 --- a/drivers/edac/i5400_edac.c +++ b/drivers/edac/i5400_edac.c | |||
@@ -33,7 +33,7 @@ | |||
33 | /* | 33 | /* |
34 | * Alter this version for the I5400 module when modifications are made | 34 | * Alter this version for the I5400 module when modifications are made |
35 | */ | 35 | */ |
36 | #define I5400_REVISION " Ver: 1.0.0 " __DATE__ | 36 | #define I5400_REVISION " Ver: 1.0.0" |
37 | 37 | ||
38 | #define EDAC_MOD_STR "i5400_edac" | 38 | #define EDAC_MOD_STR "i5400_edac" |
39 | 39 | ||
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c index 363cc1602944..a76fe8366b68 100644 --- a/drivers/edac/i7300_edac.c +++ b/drivers/edac/i7300_edac.c | |||
@@ -31,7 +31,7 @@ | |||
31 | /* | 31 | /* |
32 | * Alter this version for the I7300 module when modifications are made | 32 | * Alter this version for the I7300 module when modifications are made |
33 | */ | 33 | */ |
34 | #define I7300_REVISION " Ver: 1.0.0 " __DATE__ | 34 | #define I7300_REVISION " Ver: 1.0.0" |
35 | 35 | ||
36 | #define EDAC_MOD_STR "i7300_edac" | 36 | #define EDAC_MOD_STR "i7300_edac" |
37 | 37 | ||
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c index 465cbc25149f..04f1e7ce02b1 100644 --- a/drivers/edac/i7core_edac.c +++ b/drivers/edac/i7core_edac.c | |||
@@ -59,7 +59,7 @@ MODULE_PARM_DESC(use_pci_fixup, "Enable PCI fixup to seek for hidden devices"); | |||
59 | /* | 59 | /* |
60 | * Alter this version for the module when modifications are made | 60 | * Alter this version for the module when modifications are made |
61 | */ | 61 | */ |
62 | #define I7CORE_REVISION " Ver: 1.0.0 " __DATE__ | 62 | #define I7CORE_REVISION " Ver: 1.0.0" |
63 | #define EDAC_MOD_STR "i7core_edac" | 63 | #define EDAC_MOD_STR "i7core_edac" |
64 | 64 | ||
65 | /* | 65 | /* |
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c index b8a95cf50718..931a05775049 100644 --- a/drivers/edac/i82860_edac.c +++ b/drivers/edac/i82860_edac.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/edac.h> | 16 | #include <linux/edac.h> |
17 | #include "edac_core.h" | 17 | #include "edac_core.h" |
18 | 18 | ||
19 | #define I82860_REVISION " Ver: 2.0.2 " __DATE__ | 19 | #define I82860_REVISION " Ver: 2.0.2" |
20 | #define EDAC_MOD_STR "i82860_edac" | 20 | #define EDAC_MOD_STR "i82860_edac" |
21 | 21 | ||
22 | #define i82860_printk(level, fmt, arg...) \ | 22 | #define i82860_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c index b2fd1e899142..33864c63c684 100644 --- a/drivers/edac/i82875p_edac.c +++ b/drivers/edac/i82875p_edac.c | |||
@@ -20,7 +20,7 @@ | |||
20 | #include <linux/edac.h> | 20 | #include <linux/edac.h> |
21 | #include "edac_core.h" | 21 | #include "edac_core.h" |
22 | 22 | ||
23 | #define I82875P_REVISION " Ver: 2.0.2 " __DATE__ | 23 | #define I82875P_REVISION " Ver: 2.0.2" |
24 | #define EDAC_MOD_STR "i82875p_edac" | 24 | #define EDAC_MOD_STR "i82875p_edac" |
25 | 25 | ||
26 | #define i82875p_printk(level, fmt, arg...) \ | 26 | #define i82875p_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c index 92e65e7038e9..a5da732fe5b2 100644 --- a/drivers/edac/i82975x_edac.c +++ b/drivers/edac/i82975x_edac.c | |||
@@ -16,7 +16,7 @@ | |||
16 | #include <linux/edac.h> | 16 | #include <linux/edac.h> |
17 | #include "edac_core.h" | 17 | #include "edac_core.h" |
18 | 18 | ||
19 | #define I82975X_REVISION " Ver: 1.0.0 " __DATE__ | 19 | #define I82975X_REVISION " Ver: 1.0.0" |
20 | #define EDAC_MOD_STR "i82975x_edac" | 20 | #define EDAC_MOD_STR "i82975x_edac" |
21 | 21 | ||
22 | #define i82975x_printk(level, fmt, arg...) \ | 22 | #define i82975x_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/mpc85xx_edac.h b/drivers/edac/mpc85xx_edac.h index cb24df839460..932016f2cf06 100644 --- a/drivers/edac/mpc85xx_edac.h +++ b/drivers/edac/mpc85xx_edac.h | |||
@@ -11,7 +11,7 @@ | |||
11 | #ifndef _MPC85XX_EDAC_H_ | 11 | #ifndef _MPC85XX_EDAC_H_ |
12 | #define _MPC85XX_EDAC_H_ | 12 | #define _MPC85XX_EDAC_H_ |
13 | 13 | ||
14 | #define MPC85XX_REVISION " Ver: 2.0.0 " __DATE__ | 14 | #define MPC85XX_REVISION " Ver: 2.0.0" |
15 | #define EDAC_MOD_STR "MPC85xx_edac" | 15 | #define EDAC_MOD_STR "MPC85xx_edac" |
16 | 16 | ||
17 | #define mpc85xx_printk(level, fmt, arg...) \ | 17 | #define mpc85xx_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/mv64x60_edac.h b/drivers/edac/mv64x60_edac.h index e042e2daa8f4..c7f209c92a1a 100644 --- a/drivers/edac/mv64x60_edac.h +++ b/drivers/edac/mv64x60_edac.h | |||
@@ -12,7 +12,7 @@ | |||
12 | #ifndef _MV64X60_EDAC_H_ | 12 | #ifndef _MV64X60_EDAC_H_ |
13 | #define _MV64X60_EDAC_H_ | 13 | #define _MV64X60_EDAC_H_ |
14 | 14 | ||
15 | #define MV64x60_REVISION " Ver: 2.0.0 " __DATE__ | 15 | #define MV64x60_REVISION " Ver: 2.0.0" |
16 | #define EDAC_MOD_STR "MV64x60_edac" | 16 | #define EDAC_MOD_STR "MV64x60_edac" |
17 | 17 | ||
18 | #define mv64x60_printk(level, fmt, arg...) \ | 18 | #define mv64x60_printk(level, fmt, arg...) \ |
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c index af8e7b1aa290..0de7d8770891 100644 --- a/drivers/edac/ppc4xx_edac.c +++ b/drivers/edac/ppc4xx_edac.c | |||
@@ -113,7 +113,7 @@ | |||
113 | #define EDAC_OPSTATE_UNKNOWN_STR "unknown" | 113 | #define EDAC_OPSTATE_UNKNOWN_STR "unknown" |
114 | 114 | ||
115 | #define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac" | 115 | #define PPC4XX_EDAC_MODULE_NAME "ppc4xx_edac" |
116 | #define PPC4XX_EDAC_MODULE_REVISION "v1.0.0 " __DATE__ | 116 | #define PPC4XX_EDAC_MODULE_REVISION "v1.0.0" |
117 | 117 | ||
118 | #define PPC4XX_EDAC_MESSAGE_SIZE 256 | 118 | #define PPC4XX_EDAC_MESSAGE_SIZE 256 |
119 | 119 | ||
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c index 678513738c33..b153674431f1 100644 --- a/drivers/edac/r82600_edac.c +++ b/drivers/edac/r82600_edac.c | |||
@@ -22,7 +22,7 @@ | |||
22 | #include <linux/edac.h> | 22 | #include <linux/edac.h> |
23 | #include "edac_core.h" | 23 | #include "edac_core.h" |
24 | 24 | ||
25 | #define R82600_REVISION " Ver: 2.0.2 " __DATE__ | 25 | #define R82600_REVISION " Ver: 2.0.2" |
26 | #define EDAC_MOD_STR "r82600_edac" | 26 | #define EDAC_MOD_STR "r82600_edac" |
27 | 27 | ||
28 | #define r82600_printk(level, fmt, arg...) \ | 28 | #define r82600_printk(level, fmt, arg...) \ |
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig index d3b295305542..d21364603755 100644 --- a/drivers/gpio/Kconfig +++ b/drivers/gpio/Kconfig | |||
@@ -1,5 +1,5 @@ | |||
1 | # | 1 | # |
2 | # platform-neutral GPIO infrastructure and expanders | 2 | # GPIO infrastructure and drivers |
3 | # | 3 | # |
4 | 4 | ||
5 | config ARCH_WANT_OPTIONAL_GPIOLIB | 5 | config ARCH_WANT_OPTIONAL_GPIOLIB |
@@ -31,7 +31,7 @@ menuconfig GPIOLIB | |||
31 | help | 31 | help |
32 | This enables GPIO support through the generic GPIO library. | 32 | This enables GPIO support through the generic GPIO library. |
33 | You only need to enable this, if you also want to enable | 33 | You only need to enable this, if you also want to enable |
34 | one or more of the GPIO expansion card drivers below. | 34 | one or more of the GPIO drivers below. |
35 | 35 | ||
36 | If unsure, say N. | 36 | If unsure, say N. |
37 | 37 | ||
@@ -63,21 +63,26 @@ config GPIO_SYSFS | |||
63 | Kernel drivers may also request that a particular GPIO be | 63 | Kernel drivers may also request that a particular GPIO be |
64 | exported to userspace; this can be useful when debugging. | 64 | exported to userspace; this can be useful when debugging. |
65 | 65 | ||
66 | # put expanders in the right section, in alphabetical order | 66 | # put drivers in the right section, in alphabetical order |
67 | 67 | ||
68 | config GPIO_MAX730X | 68 | config GPIO_MAX730X |
69 | tristate | 69 | tristate |
70 | 70 | ||
71 | comment "Memory mapped GPIO expanders:" | 71 | comment "Memory mapped GPIO drivers:" |
72 | |||
73 | config GPIO_BASIC_MMIO_CORE | ||
74 | tristate | ||
75 | help | ||
76 | Provides core functionality for basic memory-mapped GPIO controllers. | ||
72 | 77 | ||
73 | config GPIO_BASIC_MMIO | 78 | config GPIO_BASIC_MMIO |
74 | tristate "Basic memory-mapped GPIO controllers support" | 79 | tristate "Basic memory-mapped GPIO controllers support" |
80 | select GPIO_BASIC_MMIO_CORE | ||
75 | help | 81 | help |
76 | Say yes here to support basic memory-mapped GPIO controllers. | 82 | Say yes here to support basic memory-mapped GPIO controllers. |
77 | 83 | ||
78 | config GPIO_IT8761E | 84 | config GPIO_IT8761E |
79 | tristate "IT8761E GPIO support" | 85 | tristate "IT8761E GPIO support" |
80 | depends on GPIOLIB | ||
81 | help | 86 | help |
82 | Say yes here to support GPIO functionality of IT8761E super I/O chip. | 87 | Say yes here to support GPIO functionality of IT8761E super I/O chip. |
83 | 88 | ||
@@ -101,7 +106,7 @@ config GPIO_VR41XX | |||
101 | 106 | ||
102 | config GPIO_SCH | 107 | config GPIO_SCH |
103 | tristate "Intel SCH/TunnelCreek GPIO" | 108 | tristate "Intel SCH/TunnelCreek GPIO" |
104 | depends on GPIOLIB && PCI && X86 | 109 | depends on PCI && X86 |
105 | select MFD_CORE | 110 | select MFD_CORE |
106 | select LPC_SCH | 111 | select LPC_SCH |
107 | help | 112 | help |
@@ -121,7 +126,7 @@ config GPIO_SCH | |||
121 | 126 | ||
122 | config GPIO_VX855 | 127 | config GPIO_VX855 |
123 | tristate "VIA VX855/VX875 GPIO" | 128 | tristate "VIA VX855/VX875 GPIO" |
124 | depends on GPIOLIB && MFD_SUPPORT && PCI | 129 | depends on MFD_SUPPORT && PCI |
125 | select MFD_CORE | 130 | select MFD_CORE |
126 | select MFD_VX855 | 131 | select MFD_VX855 |
127 | help | 132 | help |
@@ -347,13 +352,13 @@ config GPIO_ML_IOH | |||
347 | 352 | ||
348 | config GPIO_TIMBERDALE | 353 | config GPIO_TIMBERDALE |
349 | bool "Support for timberdale GPIO IP" | 354 | bool "Support for timberdale GPIO IP" |
350 | depends on MFD_TIMBERDALE && GPIOLIB && HAS_IOMEM | 355 | depends on MFD_TIMBERDALE && HAS_IOMEM |
351 | ---help--- | 356 | ---help--- |
352 | Add support for the GPIO IP in the timberdale FPGA. | 357 | Add support for the GPIO IP in the timberdale FPGA. |
353 | 358 | ||
354 | config GPIO_RDC321X | 359 | config GPIO_RDC321X |
355 | tristate "RDC R-321x GPIO support" | 360 | tristate "RDC R-321x GPIO support" |
356 | depends on PCI && GPIOLIB | 361 | depends on PCI |
357 | select MFD_SUPPORT | 362 | select MFD_SUPPORT |
358 | select MFD_CORE | 363 | select MFD_CORE |
359 | select MFD_RDC321X | 364 | select MFD_RDC321X |
@@ -419,4 +424,11 @@ config AB8500_GPIO | |||
419 | depends on AB8500_CORE && BROKEN | 424 | depends on AB8500_CORE && BROKEN |
420 | help | 425 | help |
421 | Select this to enable the AB8500 IC GPIO driver | 426 | Select this to enable the AB8500 IC GPIO driver |
427 | |||
428 | config GPIO_TPS65910 | ||
429 | bool "TPS65910 GPIO" | ||
430 | depends on MFD_TPS65910 | ||
431 | help | ||
432 | Select this option to enable GPIO driver for the TPS65910 | ||
433 | chip family. | ||
422 | endif | 434 | endif |
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile index becef5954356..6a3387acc0e5 100644 --- a/drivers/gpio/Makefile +++ b/drivers/gpio/Makefile | |||
@@ -1,8 +1,4 @@ | |||
1 | # generic gpio support: dedicated expander chips, etc | 1 | # generic gpio support: platform drivers, dedicated expander chips, etc |
2 | # | ||
3 | # NOTE: platform-specific GPIO drivers don't belong in the | ||
4 | # drivers/gpio directory; put them with other platform setup | ||
5 | # code, IRQ controllers, board init, etc. | ||
6 | 2 | ||
7 | ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG | 3 | ccflags-$(CONFIG_DEBUG_GPIO) += -DDEBUG |
8 | 4 | ||
@@ -10,6 +6,7 @@ obj-$(CONFIG_GPIOLIB) += gpiolib.o | |||
10 | 6 | ||
11 | obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o | 7 | obj-$(CONFIG_GPIO_ADP5520) += adp5520-gpio.o |
12 | obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o | 8 | obj-$(CONFIG_GPIO_ADP5588) += adp5588-gpio.o |
9 | obj-$(CONFIG_GPIO_BASIC_MMIO_CORE) += basic_mmio_gpio.o | ||
13 | obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o | 10 | obj-$(CONFIG_GPIO_BASIC_MMIO) += basic_mmio_gpio.o |
14 | obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o | 11 | obj-$(CONFIG_GPIO_LANGWELL) += langwell_gpio.o |
15 | obj-$(CONFIG_GPIO_MAX730X) += max730x.o | 12 | obj-$(CONFIG_GPIO_MAX730X) += max730x.o |
@@ -43,3 +40,4 @@ obj-$(CONFIG_GPIO_SX150X) += sx150x.o | |||
43 | obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o | 40 | obj-$(CONFIG_GPIO_VX855) += vx855_gpio.o |
44 | obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o | 41 | obj-$(CONFIG_GPIO_ML_IOH) += ml_ioh_gpio.o |
45 | obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o | 42 | obj-$(CONFIG_AB8500_GPIO) += ab8500-gpio.o |
43 | obj-$(CONFIG_GPIO_TPS65910) += tps65910-gpio.o | ||
diff --git a/drivers/gpio/basic_mmio_gpio.c b/drivers/gpio/basic_mmio_gpio.c index 3addea65894e..8152e9f516b0 100644 --- a/drivers/gpio/basic_mmio_gpio.c +++ b/drivers/gpio/basic_mmio_gpio.c | |||
@@ -45,6 +45,7 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.` | |||
45 | */ | 45 | */ |
46 | 46 | ||
47 | #include <linux/init.h> | 47 | #include <linux/init.h> |
48 | #include <linux/err.h> | ||
48 | #include <linux/bug.h> | 49 | #include <linux/bug.h> |
49 | #include <linux/kernel.h> | 50 | #include <linux/kernel.h> |
50 | #include <linux/module.h> | 51 | #include <linux/module.h> |
@@ -61,102 +62,101 @@ o ` ~~~~\___/~~~~ ` controller in FPGA is ,.` | |||
61 | #include <linux/mod_devicetable.h> | 62 | #include <linux/mod_devicetable.h> |
62 | #include <linux/basic_mmio_gpio.h> | 63 | #include <linux/basic_mmio_gpio.h> |
63 | 64 | ||
64 | struct bgpio_chip { | 65 | static void bgpio_write8(void __iomem *reg, unsigned long data) |
65 | struct gpio_chip gc; | 66 | { |
66 | void __iomem *reg_dat; | 67 | writeb(data, reg); |
67 | void __iomem *reg_set; | 68 | } |
68 | void __iomem *reg_clr; | ||
69 | |||
70 | /* Number of bits (GPIOs): <register width> * 8. */ | ||
71 | int bits; | ||
72 | |||
73 | /* | ||
74 | * Some GPIO controllers work with the big-endian bits notation, | ||
75 | * e.g. in a 8-bits register, GPIO7 is the least significant bit. | ||
76 | */ | ||
77 | int big_endian_bits; | ||
78 | |||
79 | /* | ||
80 | * Used to lock bgpio_chip->data. Also, this is needed to keep | ||
81 | * shadowed and real data registers writes together. | ||
82 | */ | ||
83 | spinlock_t lock; | ||
84 | |||
85 | /* Shadowed data register to clear/set bits safely. */ | ||
86 | unsigned long data; | ||
87 | }; | ||
88 | 69 | ||
89 | static struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc) | 70 | static unsigned long bgpio_read8(void __iomem *reg) |
90 | { | 71 | { |
91 | return container_of(gc, struct bgpio_chip, gc); | 72 | return readb(reg); |
92 | } | 73 | } |
93 | 74 | ||
94 | static unsigned long bgpio_in(struct bgpio_chip *bgc) | 75 | static void bgpio_write16(void __iomem *reg, unsigned long data) |
95 | { | 76 | { |
96 | switch (bgc->bits) { | 77 | writew(data, reg); |
97 | case 8: | ||
98 | return __raw_readb(bgc->reg_dat); | ||
99 | case 16: | ||
100 | return __raw_readw(bgc->reg_dat); | ||
101 | case 32: | ||
102 | return __raw_readl(bgc->reg_dat); | ||
103 | #if BITS_PER_LONG >= 64 | ||
104 | case 64: | ||
105 | return __raw_readq(bgc->reg_dat); | ||
106 | #endif | ||
107 | } | ||
108 | return -EINVAL; | ||
109 | } | 78 | } |
110 | 79 | ||
111 | static void bgpio_out(struct bgpio_chip *bgc, void __iomem *reg, | 80 | static unsigned long bgpio_read16(void __iomem *reg) |
112 | unsigned long data) | ||
113 | { | 81 | { |
114 | switch (bgc->bits) { | 82 | return readw(reg); |
115 | case 8: | 83 | } |
116 | __raw_writeb(data, reg); | 84 | |
117 | return; | 85 | static void bgpio_write32(void __iomem *reg, unsigned long data) |
118 | case 16: | 86 | { |
119 | __raw_writew(data, reg); | 87 | writel(data, reg); |
120 | return; | 88 | } |
121 | case 32: | 89 | |
122 | __raw_writel(data, reg); | 90 | static unsigned long bgpio_read32(void __iomem *reg) |
123 | return; | 91 | { |
92 | return readl(reg); | ||
93 | } | ||
94 | |||
124 | #if BITS_PER_LONG >= 64 | 95 | #if BITS_PER_LONG >= 64 |
125 | case 64: | 96 | static void bgpio_write64(void __iomem *reg, unsigned long data) |
126 | __raw_writeq(data, reg); | 97 | { |
127 | return; | 98 | writeq(data, reg); |
128 | #endif | ||
129 | } | ||
130 | } | 99 | } |
131 | 100 | ||
101 | static unsigned long bgpio_read64(void __iomem *reg) | ||
102 | { | ||
103 | return readq(reg); | ||
104 | } | ||
105 | #endif /* BITS_PER_LONG >= 64 */ | ||
106 | |||
132 | static unsigned long bgpio_pin2mask(struct bgpio_chip *bgc, unsigned int pin) | 107 | static unsigned long bgpio_pin2mask(struct bgpio_chip *bgc, unsigned int pin) |
133 | { | 108 | { |
134 | if (bgc->big_endian_bits) | 109 | return 1 << pin; |
135 | return 1 << (bgc->bits - 1 - pin); | 110 | } |
136 | else | 111 | |
137 | return 1 << pin; | 112 | static unsigned long bgpio_pin2mask_be(struct bgpio_chip *bgc, |
113 | unsigned int pin) | ||
114 | { | ||
115 | return 1 << (bgc->bits - 1 - pin); | ||
138 | } | 116 | } |
139 | 117 | ||
140 | static int bgpio_get(struct gpio_chip *gc, unsigned int gpio) | 118 | static int bgpio_get(struct gpio_chip *gc, unsigned int gpio) |
141 | { | 119 | { |
142 | struct bgpio_chip *bgc = to_bgpio_chip(gc); | 120 | struct bgpio_chip *bgc = to_bgpio_chip(gc); |
143 | 121 | ||
144 | return bgpio_in(bgc) & bgpio_pin2mask(bgc, gpio); | 122 | return bgc->read_reg(bgc->reg_dat) & bgc->pin2mask(bgc, gpio); |
145 | } | 123 | } |
146 | 124 | ||
147 | static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) | 125 | static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) |
148 | { | 126 | { |
149 | struct bgpio_chip *bgc = to_bgpio_chip(gc); | 127 | struct bgpio_chip *bgc = to_bgpio_chip(gc); |
150 | unsigned long mask = bgpio_pin2mask(bgc, gpio); | 128 | unsigned long mask = bgc->pin2mask(bgc, gpio); |
151 | unsigned long flags; | 129 | unsigned long flags; |
152 | 130 | ||
153 | if (bgc->reg_set) { | 131 | spin_lock_irqsave(&bgc->lock, flags); |
154 | if (val) | 132 | |
155 | bgpio_out(bgc, bgc->reg_set, mask); | 133 | if (val) |
156 | else | 134 | bgc->data |= mask; |
157 | bgpio_out(bgc, bgc->reg_clr, mask); | 135 | else |
158 | return; | 136 | bgc->data &= ~mask; |
159 | } | 137 | |
138 | bgc->write_reg(bgc->reg_dat, bgc->data); | ||
139 | |||
140 | spin_unlock_irqrestore(&bgc->lock, flags); | ||
141 | } | ||
142 | |||
143 | static void bgpio_set_with_clear(struct gpio_chip *gc, unsigned int gpio, | ||
144 | int val) | ||
145 | { | ||
146 | struct bgpio_chip *bgc = to_bgpio_chip(gc); | ||
147 | unsigned long mask = bgc->pin2mask(bgc, gpio); | ||
148 | |||
149 | if (val) | ||
150 | bgc->write_reg(bgc->reg_set, mask); | ||
151 | else | ||
152 | bgc->write_reg(bgc->reg_clr, mask); | ||
153 | } | ||
154 | |||
155 | static void bgpio_set_set(struct gpio_chip *gc, unsigned int gpio, int val) | ||
156 | { | ||
157 | struct bgpio_chip *bgc = to_bgpio_chip(gc); | ||
158 | unsigned long mask = bgc->pin2mask(bgc, gpio); | ||
159 | unsigned long flags; | ||
160 | 160 | ||
161 | spin_lock_irqsave(&bgc->lock, flags); | 161 | spin_lock_irqsave(&bgc->lock, flags); |
162 | 162 | ||
@@ -165,103 +165,352 @@ static void bgpio_set(struct gpio_chip *gc, unsigned int gpio, int val) | |||
165 | else | 165 | else |
166 | bgc->data &= ~mask; | 166 | bgc->data &= ~mask; |
167 | 167 | ||
168 | bgpio_out(bgc, bgc->reg_dat, bgc->data); | 168 | bgc->write_reg(bgc->reg_set, bgc->data); |
169 | 169 | ||
170 | spin_unlock_irqrestore(&bgc->lock, flags); | 170 | spin_unlock_irqrestore(&bgc->lock, flags); |
171 | } | 171 | } |
172 | 172 | ||
173 | static int bgpio_simple_dir_in(struct gpio_chip *gc, unsigned int gpio) | ||
174 | { | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | static int bgpio_simple_dir_out(struct gpio_chip *gc, unsigned int gpio, | ||
179 | int val) | ||
180 | { | ||
181 | gc->set(gc, gpio, val); | ||
182 | |||
183 | return 0; | ||
184 | } | ||
185 | |||
173 | static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio) | 186 | static int bgpio_dir_in(struct gpio_chip *gc, unsigned int gpio) |
174 | { | 187 | { |
188 | struct bgpio_chip *bgc = to_bgpio_chip(gc); | ||
189 | unsigned long flags; | ||
190 | |||
191 | spin_lock_irqsave(&bgc->lock, flags); | ||
192 | |||
193 | bgc->dir &= ~bgc->pin2mask(bgc, gpio); | ||
194 | bgc->write_reg(bgc->reg_dir, bgc->dir); | ||
195 | |||
196 | spin_unlock_irqrestore(&bgc->lock, flags); | ||
197 | |||
175 | return 0; | 198 | return 0; |
176 | } | 199 | } |
177 | 200 | ||
178 | static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) | 201 | static int bgpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) |
179 | { | 202 | { |
180 | bgpio_set(gc, gpio, val); | 203 | struct bgpio_chip *bgc = to_bgpio_chip(gc); |
204 | unsigned long flags; | ||
205 | |||
206 | gc->set(gc, gpio, val); | ||
207 | |||
208 | spin_lock_irqsave(&bgc->lock, flags); | ||
209 | |||
210 | bgc->dir |= bgc->pin2mask(bgc, gpio); | ||
211 | bgc->write_reg(bgc->reg_dir, bgc->dir); | ||
212 | |||
213 | spin_unlock_irqrestore(&bgc->lock, flags); | ||
214 | |||
181 | return 0; | 215 | return 0; |
182 | } | 216 | } |
183 | 217 | ||
184 | static int __devinit bgpio_probe(struct platform_device *pdev) | 218 | static int bgpio_dir_in_inv(struct gpio_chip *gc, unsigned int gpio) |
185 | { | 219 | { |
186 | const struct platform_device_id *platid = platform_get_device_id(pdev); | 220 | struct bgpio_chip *bgc = to_bgpio_chip(gc); |
187 | struct device *dev = &pdev->dev; | 221 | unsigned long flags; |
188 | struct bgpio_pdata *pdata = dev_get_platdata(dev); | ||
189 | struct bgpio_chip *bgc; | ||
190 | struct resource *res_dat; | ||
191 | struct resource *res_set; | ||
192 | struct resource *res_clr; | ||
193 | resource_size_t dat_sz; | ||
194 | int bits; | ||
195 | int ret; | ||
196 | 222 | ||
197 | res_dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat"); | 223 | spin_lock_irqsave(&bgc->lock, flags); |
198 | if (!res_dat) | ||
199 | return -EINVAL; | ||
200 | 224 | ||
201 | dat_sz = resource_size(res_dat); | 225 | bgc->dir |= bgc->pin2mask(bgc, gpio); |
202 | if (!is_power_of_2(dat_sz)) | 226 | bgc->write_reg(bgc->reg_dir, bgc->dir); |
203 | return -EINVAL; | 227 | |
228 | spin_unlock_irqrestore(&bgc->lock, flags); | ||
229 | |||
230 | return 0; | ||
231 | } | ||
204 | 232 | ||
205 | bits = dat_sz * 8; | 233 | static int bgpio_dir_out_inv(struct gpio_chip *gc, unsigned int gpio, int val) |
206 | if (bits > BITS_PER_LONG) | 234 | { |
235 | struct bgpio_chip *bgc = to_bgpio_chip(gc); | ||
236 | unsigned long flags; | ||
237 | |||
238 | gc->set(gc, gpio, val); | ||
239 | |||
240 | spin_lock_irqsave(&bgc->lock, flags); | ||
241 | |||
242 | bgc->dir &= ~bgc->pin2mask(bgc, gpio); | ||
243 | bgc->write_reg(bgc->reg_dir, bgc->dir); | ||
244 | |||
245 | spin_unlock_irqrestore(&bgc->lock, flags); | ||
246 | |||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | static int bgpio_setup_accessors(struct device *dev, | ||
251 | struct bgpio_chip *bgc, | ||
252 | bool be) | ||
253 | { | ||
254 | |||
255 | switch (bgc->bits) { | ||
256 | case 8: | ||
257 | bgc->read_reg = bgpio_read8; | ||
258 | bgc->write_reg = bgpio_write8; | ||
259 | break; | ||
260 | case 16: | ||
261 | bgc->read_reg = bgpio_read16; | ||
262 | bgc->write_reg = bgpio_write16; | ||
263 | break; | ||
264 | case 32: | ||
265 | bgc->read_reg = bgpio_read32; | ||
266 | bgc->write_reg = bgpio_write32; | ||
267 | break; | ||
268 | #if BITS_PER_LONG >= 64 | ||
269 | case 64: | ||
270 | bgc->read_reg = bgpio_read64; | ||
271 | bgc->write_reg = bgpio_write64; | ||
272 | break; | ||
273 | #endif /* BITS_PER_LONG >= 64 */ | ||
274 | default: | ||
275 | dev_err(dev, "unsupported data width %u bits\n", bgc->bits); | ||
207 | return -EINVAL; | 276 | return -EINVAL; |
277 | } | ||
208 | 278 | ||
209 | bgc = devm_kzalloc(dev, sizeof(*bgc), GFP_KERNEL); | 279 | bgc->pin2mask = be ? bgpio_pin2mask_be : bgpio_pin2mask; |
210 | if (!bgc) | 280 | |
211 | return -ENOMEM; | 281 | return 0; |
282 | } | ||
283 | |||
284 | /* | ||
285 | * Create the device and allocate the resources. For setting GPIO's there are | ||
286 | * three supported configurations: | ||
287 | * | ||
288 | * - single input/output register resource (named "dat"). | ||
289 | * - set/clear pair (named "set" and "clr"). | ||
290 | * - single output register resource and single input resource ("set" and | ||
291 | * dat"). | ||
292 | * | ||
293 | * For the single output register, this drives a 1 by setting a bit and a zero | ||
294 | * by clearing a bit. For the set clr pair, this drives a 1 by setting a bit | ||
295 | * in the set register and clears it by setting a bit in the clear register. | ||
296 | * The configuration is detected by which resources are present. | ||
297 | * | ||
298 | * For setting the GPIO direction, there are three supported configurations: | ||
299 | * | ||
300 | * - simple bidirection GPIO that requires no configuration. | ||
301 | * - an output direction register (named "dirout") where a 1 bit | ||
302 | * indicates the GPIO is an output. | ||
303 | * - an input direction register (named "dirin") where a 1 bit indicates | ||
304 | * the GPIO is an input. | ||
305 | */ | ||
306 | static int bgpio_setup_io(struct bgpio_chip *bgc, | ||
307 | void __iomem *dat, | ||
308 | void __iomem *set, | ||
309 | void __iomem *clr) | ||
310 | { | ||
212 | 311 | ||
213 | bgc->reg_dat = devm_ioremap(dev, res_dat->start, dat_sz); | 312 | bgc->reg_dat = dat; |
214 | if (!bgc->reg_dat) | 313 | if (!bgc->reg_dat) |
215 | return -ENOMEM; | 314 | return -EINVAL; |
315 | |||
316 | if (set && clr) { | ||
317 | bgc->reg_set = set; | ||
318 | bgc->reg_clr = clr; | ||
319 | bgc->gc.set = bgpio_set_with_clear; | ||
320 | } else if (set && !clr) { | ||
321 | bgc->reg_set = set; | ||
322 | bgc->gc.set = bgpio_set_set; | ||
323 | } else { | ||
324 | bgc->gc.set = bgpio_set; | ||
325 | } | ||
326 | |||
327 | bgc->gc.get = bgpio_get; | ||
328 | |||
329 | return 0; | ||
330 | } | ||
216 | 331 | ||
217 | res_set = platform_get_resource_byname(pdev, IORESOURCE_MEM, "set"); | 332 | static int bgpio_setup_direction(struct bgpio_chip *bgc, |
218 | res_clr = platform_get_resource_byname(pdev, IORESOURCE_MEM, "clr"); | 333 | void __iomem *dirout, |
219 | if (res_set && res_clr) { | 334 | void __iomem *dirin) |
220 | if (resource_size(res_set) != resource_size(res_clr) || | 335 | { |
221 | resource_size(res_set) != dat_sz) | 336 | if (dirout && dirin) { |
222 | return -EINVAL; | ||
223 | |||
224 | bgc->reg_set = devm_ioremap(dev, res_set->start, dat_sz); | ||
225 | bgc->reg_clr = devm_ioremap(dev, res_clr->start, dat_sz); | ||
226 | if (!bgc->reg_set || !bgc->reg_clr) | ||
227 | return -ENOMEM; | ||
228 | } else if (res_set || res_clr) { | ||
229 | return -EINVAL; | 337 | return -EINVAL; |
338 | } else if (dirout) { | ||
339 | bgc->reg_dir = dirout; | ||
340 | bgc->gc.direction_output = bgpio_dir_out; | ||
341 | bgc->gc.direction_input = bgpio_dir_in; | ||
342 | } else if (dirin) { | ||
343 | bgc->reg_dir = dirin; | ||
344 | bgc->gc.direction_output = bgpio_dir_out_inv; | ||
345 | bgc->gc.direction_input = bgpio_dir_in_inv; | ||
346 | } else { | ||
347 | bgc->gc.direction_output = bgpio_simple_dir_out; | ||
348 | bgc->gc.direction_input = bgpio_simple_dir_in; | ||
230 | } | 349 | } |
231 | 350 | ||
232 | spin_lock_init(&bgc->lock); | 351 | return 0; |
352 | } | ||
233 | 353 | ||
234 | bgc->bits = bits; | 354 | int __devexit bgpio_remove(struct bgpio_chip *bgc) |
235 | bgc->big_endian_bits = !strcmp(platid->name, "basic-mmio-gpio-be"); | 355 | { |
236 | bgc->data = bgpio_in(bgc); | 356 | int err = gpiochip_remove(&bgc->gc); |
237 | 357 | ||
238 | bgc->gc.ngpio = bits; | 358 | kfree(bgc); |
239 | bgc->gc.direction_input = bgpio_dir_in; | 359 | |
240 | bgc->gc.direction_output = bgpio_dir_out; | 360 | return err; |
241 | bgc->gc.get = bgpio_get; | 361 | } |
242 | bgc->gc.set = bgpio_set; | 362 | EXPORT_SYMBOL_GPL(bgpio_remove); |
363 | |||
364 | int __devinit bgpio_init(struct bgpio_chip *bgc, | ||
365 | struct device *dev, | ||
366 | unsigned long sz, | ||
367 | void __iomem *dat, | ||
368 | void __iomem *set, | ||
369 | void __iomem *clr, | ||
370 | void __iomem *dirout, | ||
371 | void __iomem *dirin, | ||
372 | bool big_endian) | ||
373 | { | ||
374 | int ret; | ||
375 | |||
376 | if (!is_power_of_2(sz)) | ||
377 | return -EINVAL; | ||
378 | |||
379 | bgc->bits = sz * 8; | ||
380 | if (bgc->bits > BITS_PER_LONG) | ||
381 | return -EINVAL; | ||
382 | |||
383 | spin_lock_init(&bgc->lock); | ||
243 | bgc->gc.dev = dev; | 384 | bgc->gc.dev = dev; |
244 | bgc->gc.label = dev_name(dev); | 385 | bgc->gc.label = dev_name(dev); |
386 | bgc->gc.base = -1; | ||
387 | bgc->gc.ngpio = bgc->bits; | ||
245 | 388 | ||
246 | if (pdata) | 389 | ret = bgpio_setup_io(bgc, dat, set, clr); |
247 | bgc->gc.base = pdata->base; | 390 | if (ret) |
248 | else | 391 | return ret; |
249 | bgc->gc.base = -1; | ||
250 | 392 | ||
251 | dev_set_drvdata(dev, bgc); | 393 | ret = bgpio_setup_accessors(dev, bgc, big_endian); |
394 | if (ret) | ||
395 | return ret; | ||
252 | 396 | ||
253 | ret = gpiochip_add(&bgc->gc); | 397 | ret = bgpio_setup_direction(bgc, dirout, dirin); |
254 | if (ret) | 398 | if (ret) |
255 | dev_err(dev, "gpiochip_add() failed: %d\n", ret); | 399 | return ret; |
400 | |||
401 | bgc->data = bgc->read_reg(bgc->reg_dat); | ||
256 | 402 | ||
257 | return ret; | 403 | return ret; |
258 | } | 404 | } |
405 | EXPORT_SYMBOL_GPL(bgpio_init); | ||
406 | |||
407 | #ifdef CONFIG_GPIO_BASIC_MMIO | ||
259 | 408 | ||
260 | static int __devexit bgpio_remove(struct platform_device *pdev) | 409 | static void __iomem *bgpio_map(struct platform_device *pdev, |
410 | const char *name, | ||
411 | resource_size_t sane_sz, | ||
412 | int *err) | ||
261 | { | 413 | { |
262 | struct bgpio_chip *bgc = dev_get_drvdata(&pdev->dev); | 414 | struct device *dev = &pdev->dev; |
415 | struct resource *r; | ||
416 | resource_size_t start; | ||
417 | resource_size_t sz; | ||
418 | void __iomem *ret; | ||
419 | |||
420 | *err = 0; | ||
421 | |||
422 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, name); | ||
423 | if (!r) | ||
424 | return NULL; | ||
263 | 425 | ||
264 | return gpiochip_remove(&bgc->gc); | 426 | sz = resource_size(r); |
427 | if (sz != sane_sz) { | ||
428 | *err = -EINVAL; | ||
429 | return NULL; | ||
430 | } | ||
431 | |||
432 | start = r->start; | ||
433 | if (!devm_request_mem_region(dev, start, sz, r->name)) { | ||
434 | *err = -EBUSY; | ||
435 | return NULL; | ||
436 | } | ||
437 | |||
438 | ret = devm_ioremap(dev, start, sz); | ||
439 | if (!ret) { | ||
440 | *err = -ENOMEM; | ||
441 | return NULL; | ||
442 | } | ||
443 | |||
444 | return ret; | ||
445 | } | ||
446 | |||
447 | static int __devinit bgpio_pdev_probe(struct platform_device *pdev) | ||
448 | { | ||
449 | struct device *dev = &pdev->dev; | ||
450 | struct resource *r; | ||
451 | void __iomem *dat; | ||
452 | void __iomem *set; | ||
453 | void __iomem *clr; | ||
454 | void __iomem *dirout; | ||
455 | void __iomem *dirin; | ||
456 | unsigned long sz; | ||
457 | bool be; | ||
458 | int err; | ||
459 | struct bgpio_chip *bgc; | ||
460 | struct bgpio_pdata *pdata = dev_get_platdata(dev); | ||
461 | |||
462 | r = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat"); | ||
463 | if (!r) | ||
464 | return -EINVAL; | ||
465 | |||
466 | sz = resource_size(r); | ||
467 | |||
468 | dat = bgpio_map(pdev, "dat", sz, &err); | ||
469 | if (!dat) | ||
470 | return err ? err : -EINVAL; | ||
471 | |||
472 | set = bgpio_map(pdev, "set", sz, &err); | ||
473 | if (err) | ||
474 | return err; | ||
475 | |||
476 | clr = bgpio_map(pdev, "clr", sz, &err); | ||
477 | if (err) | ||
478 | return err; | ||
479 | |||
480 | dirout = bgpio_map(pdev, "dirout", sz, &err); | ||
481 | if (err) | ||
482 | return err; | ||
483 | |||
484 | dirin = bgpio_map(pdev, "dirin", sz, &err); | ||
485 | if (err) | ||
486 | return err; | ||
487 | |||
488 | be = !strcmp(platform_get_device_id(pdev)->name, "basic-mmio-gpio-be"); | ||
489 | |||
490 | bgc = devm_kzalloc(&pdev->dev, sizeof(*bgc), GFP_KERNEL); | ||
491 | if (!bgc) | ||
492 | return -ENOMEM; | ||
493 | |||
494 | err = bgpio_init(bgc, dev, sz, dat, set, clr, dirout, dirin, be); | ||
495 | if (err) | ||
496 | return err; | ||
497 | |||
498 | if (pdata) { | ||
499 | bgc->gc.base = pdata->base; | ||
500 | if (pdata->ngpio > 0) | ||
501 | bgc->gc.ngpio = pdata->ngpio; | ||
502 | } | ||
503 | |||
504 | platform_set_drvdata(pdev, bgc); | ||
505 | |||
506 | return gpiochip_add(&bgc->gc); | ||
507 | } | ||
508 | |||
509 | static int __devexit bgpio_pdev_remove(struct platform_device *pdev) | ||
510 | { | ||
511 | struct bgpio_chip *bgc = platform_get_drvdata(pdev); | ||
512 | |||
513 | return bgpio_remove(bgc); | ||
265 | } | 514 | } |
266 | 515 | ||
267 | static const struct platform_device_id bgpio_id_table[] = { | 516 | static const struct platform_device_id bgpio_id_table[] = { |
@@ -276,21 +525,23 @@ static struct platform_driver bgpio_driver = { | |||
276 | .name = "basic-mmio-gpio", | 525 | .name = "basic-mmio-gpio", |
277 | }, | 526 | }, |
278 | .id_table = bgpio_id_table, | 527 | .id_table = bgpio_id_table, |
279 | .probe = bgpio_probe, | 528 | .probe = bgpio_pdev_probe, |
280 | .remove = __devexit_p(bgpio_remove), | 529 | .remove = __devexit_p(bgpio_pdev_remove), |
281 | }; | 530 | }; |
282 | 531 | ||
283 | static int __init bgpio_init(void) | 532 | static int __init bgpio_platform_init(void) |
284 | { | 533 | { |
285 | return platform_driver_register(&bgpio_driver); | 534 | return platform_driver_register(&bgpio_driver); |
286 | } | 535 | } |
287 | module_init(bgpio_init); | 536 | module_init(bgpio_platform_init); |
288 | 537 | ||
289 | static void __exit bgpio_exit(void) | 538 | static void __exit bgpio_platform_exit(void) |
290 | { | 539 | { |
291 | platform_driver_unregister(&bgpio_driver); | 540 | platform_driver_unregister(&bgpio_driver); |
292 | } | 541 | } |
293 | module_exit(bgpio_exit); | 542 | module_exit(bgpio_platform_exit); |
543 | |||
544 | #endif /* CONFIG_GPIO_BASIC_MMIO */ | ||
294 | 545 | ||
295 | MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers"); | 546 | MODULE_DESCRIPTION("Driver for basic memory-mapped GPIO controllers"); |
296 | MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>"); | 547 | MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>"); |
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c index 36a2974815b7..137a8ca67822 100644 --- a/drivers/gpio/gpiolib.c +++ b/drivers/gpio/gpiolib.c | |||
@@ -12,6 +12,8 @@ | |||
12 | #include <linux/idr.h> | 12 | #include <linux/idr.h> |
13 | #include <linux/slab.h> | 13 | #include <linux/slab.h> |
14 | 14 | ||
15 | #define CREATE_TRACE_POINTS | ||
16 | #include <trace/events/gpio.h> | ||
15 | 17 | ||
16 | /* Optional implementation infrastructure for GPIO interfaces. | 18 | /* Optional implementation infrastructure for GPIO interfaces. |
17 | * | 19 | * |
@@ -1165,6 +1167,7 @@ struct gpio_chip *gpiochip_find(void *data, | |||
1165 | 1167 | ||
1166 | return chip; | 1168 | return chip; |
1167 | } | 1169 | } |
1170 | EXPORT_SYMBOL_GPL(gpiochip_find); | ||
1168 | 1171 | ||
1169 | /* These "optional" allocation calls help prevent drivers from stomping | 1172 | /* These "optional" allocation calls help prevent drivers from stomping |
1170 | * on each other, and help provide better diagnostics in debugfs. | 1173 | * on each other, and help provide better diagnostics in debugfs. |
@@ -1404,6 +1407,8 @@ int gpio_direction_input(unsigned gpio) | |||
1404 | status = chip->direction_input(chip, gpio); | 1407 | status = chip->direction_input(chip, gpio); |
1405 | if (status == 0) | 1408 | if (status == 0) |
1406 | clear_bit(FLAG_IS_OUT, &desc->flags); | 1409 | clear_bit(FLAG_IS_OUT, &desc->flags); |
1410 | |||
1411 | trace_gpio_direction(chip->base + gpio, 1, status); | ||
1407 | lose: | 1412 | lose: |
1408 | return status; | 1413 | return status; |
1409 | fail: | 1414 | fail: |
@@ -1457,6 +1462,8 @@ int gpio_direction_output(unsigned gpio, int value) | |||
1457 | status = chip->direction_output(chip, gpio, value); | 1462 | status = chip->direction_output(chip, gpio, value); |
1458 | if (status == 0) | 1463 | if (status == 0) |
1459 | set_bit(FLAG_IS_OUT, &desc->flags); | 1464 | set_bit(FLAG_IS_OUT, &desc->flags); |
1465 | trace_gpio_value(chip->base + gpio, 0, value); | ||
1466 | trace_gpio_direction(chip->base + gpio, 0, status); | ||
1460 | lose: | 1467 | lose: |
1461 | return status; | 1468 | return status; |
1462 | fail: | 1469 | fail: |
@@ -1546,10 +1553,13 @@ EXPORT_SYMBOL_GPL(gpio_set_debounce); | |||
1546 | int __gpio_get_value(unsigned gpio) | 1553 | int __gpio_get_value(unsigned gpio) |
1547 | { | 1554 | { |
1548 | struct gpio_chip *chip; | 1555 | struct gpio_chip *chip; |
1556 | int value; | ||
1549 | 1557 | ||
1550 | chip = gpio_to_chip(gpio); | 1558 | chip = gpio_to_chip(gpio); |
1551 | WARN_ON(chip->can_sleep); | 1559 | WARN_ON(chip->can_sleep); |
1552 | return chip->get ? chip->get(chip, gpio - chip->base) : 0; | 1560 | value = chip->get ? chip->get(chip, gpio - chip->base) : 0; |
1561 | trace_gpio_value(gpio, 1, value); | ||
1562 | return value; | ||
1553 | } | 1563 | } |
1554 | EXPORT_SYMBOL_GPL(__gpio_get_value); | 1564 | EXPORT_SYMBOL_GPL(__gpio_get_value); |
1555 | 1565 | ||
@@ -1568,6 +1578,7 @@ void __gpio_set_value(unsigned gpio, int value) | |||
1568 | 1578 | ||
1569 | chip = gpio_to_chip(gpio); | 1579 | chip = gpio_to_chip(gpio); |
1570 | WARN_ON(chip->can_sleep); | 1580 | WARN_ON(chip->can_sleep); |
1581 | trace_gpio_value(gpio, 0, value); | ||
1571 | chip->set(chip, gpio - chip->base, value); | 1582 | chip->set(chip, gpio - chip->base, value); |
1572 | } | 1583 | } |
1573 | EXPORT_SYMBOL_GPL(__gpio_set_value); | 1584 | EXPORT_SYMBOL_GPL(__gpio_set_value); |
@@ -1618,10 +1629,13 @@ EXPORT_SYMBOL_GPL(__gpio_to_irq); | |||
1618 | int gpio_get_value_cansleep(unsigned gpio) | 1629 | int gpio_get_value_cansleep(unsigned gpio) |
1619 | { | 1630 | { |
1620 | struct gpio_chip *chip; | 1631 | struct gpio_chip *chip; |
1632 | int value; | ||
1621 | 1633 | ||
1622 | might_sleep_if(extra_checks); | 1634 | might_sleep_if(extra_checks); |
1623 | chip = gpio_to_chip(gpio); | 1635 | chip = gpio_to_chip(gpio); |
1624 | return chip->get ? chip->get(chip, gpio - chip->base) : 0; | 1636 | value = chip->get ? chip->get(chip, gpio - chip->base) : 0; |
1637 | trace_gpio_value(gpio, 1, value); | ||
1638 | return value; | ||
1625 | } | 1639 | } |
1626 | EXPORT_SYMBOL_GPL(gpio_get_value_cansleep); | 1640 | EXPORT_SYMBOL_GPL(gpio_get_value_cansleep); |
1627 | 1641 | ||
@@ -1631,6 +1645,7 @@ void gpio_set_value_cansleep(unsigned gpio, int value) | |||
1631 | 1645 | ||
1632 | might_sleep_if(extra_checks); | 1646 | might_sleep_if(extra_checks); |
1633 | chip = gpio_to_chip(gpio); | 1647 | chip = gpio_to_chip(gpio); |
1648 | trace_gpio_value(gpio, 0, value); | ||
1634 | chip->set(chip, gpio - chip->base, value); | 1649 | chip->set(chip, gpio - chip->base, value); |
1635 | } | 1650 | } |
1636 | EXPORT_SYMBOL_GPL(gpio_set_value_cansleep); | 1651 | EXPORT_SYMBOL_GPL(gpio_set_value_cansleep); |
diff --git a/drivers/gpio/janz-ttl.c b/drivers/gpio/janz-ttl.c index 2514fb075f4a..813ac077e5d7 100644 --- a/drivers/gpio/janz-ttl.c +++ b/drivers/gpio/janz-ttl.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/mfd/core.h> | ||
19 | #include <linux/io.h> | 18 | #include <linux/io.h> |
20 | #include <linux/gpio.h> | 19 | #include <linux/gpio.h> |
21 | #include <linux/slab.h> | 20 | #include <linux/slab.h> |
@@ -150,7 +149,7 @@ static int __devinit ttl_probe(struct platform_device *pdev) | |||
150 | struct resource *res; | 149 | struct resource *res; |
151 | int ret; | 150 | int ret; |
152 | 151 | ||
153 | pdata = mfd_get_data(pdev); | 152 | pdata = pdev->dev.platform_data; |
154 | if (!pdata) { | 153 | if (!pdata) { |
155 | dev_err(dev, "no platform data\n"); | 154 | dev_err(dev, "no platform data\n"); |
156 | ret = -ENXIO; | 155 | ret = -ENXIO; |
diff --git a/drivers/gpio/pca953x.c b/drivers/gpio/pca953x.c index 7630ab7b9bec..78a843947d82 100644 --- a/drivers/gpio/pca953x.c +++ b/drivers/gpio/pca953x.c | |||
@@ -397,7 +397,7 @@ static int pca953x_irq_setup(struct pca953x_chip *chip, | |||
397 | 397 | ||
398 | irq_set_chip_data(irq, chip); | 398 | irq_set_chip_data(irq, chip); |
399 | irq_set_chip_and_handler(irq, &pca953x_irq_chip, | 399 | irq_set_chip_and_handler(irq, &pca953x_irq_chip, |
400 | handle_edge_irq); | 400 | handle_simple_irq); |
401 | #ifdef CONFIG_ARM | 401 | #ifdef CONFIG_ARM |
402 | set_irq_flags(irq, IRQF_VALID); | 402 | set_irq_flags(irq, IRQF_VALID); |
403 | #else | 403 | #else |
diff --git a/drivers/gpio/rdc321x-gpio.c b/drivers/gpio/rdc321x-gpio.c index a9bda881935a..2762698e0204 100644 --- a/drivers/gpio/rdc321x-gpio.c +++ b/drivers/gpio/rdc321x-gpio.c | |||
@@ -27,7 +27,6 @@ | |||
27 | #include <linux/pci.h> | 27 | #include <linux/pci.h> |
28 | #include <linux/gpio.h> | 28 | #include <linux/gpio.h> |
29 | #include <linux/mfd/rdc321x.h> | 29 | #include <linux/mfd/rdc321x.h> |
30 | #include <linux/mfd/core.h> | ||
31 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
32 | 31 | ||
33 | struct rdc321x_gpio { | 32 | struct rdc321x_gpio { |
@@ -136,7 +135,7 @@ static int __devinit rdc321x_gpio_probe(struct platform_device *pdev) | |||
136 | struct rdc321x_gpio *rdc321x_gpio_dev; | 135 | struct rdc321x_gpio *rdc321x_gpio_dev; |
137 | struct rdc321x_gpio_pdata *pdata; | 136 | struct rdc321x_gpio_pdata *pdata; |
138 | 137 | ||
139 | pdata = mfd_get_data(pdev); | 138 | pdata = pdev->dev.platform_data; |
140 | if (!pdata) { | 139 | if (!pdata) { |
141 | dev_err(&pdev->dev, "no platform data supplied\n"); | 140 | dev_err(&pdev->dev, "no platform data supplied\n"); |
142 | return -ENODEV; | 141 | return -ENODEV; |
diff --git a/drivers/gpio/timbgpio.c b/drivers/gpio/timbgpio.c index edbe1eae531f..0265872e57d1 100644 --- a/drivers/gpio/timbgpio.c +++ b/drivers/gpio/timbgpio.c | |||
@@ -23,7 +23,6 @@ | |||
23 | #include <linux/module.h> | 23 | #include <linux/module.h> |
24 | #include <linux/gpio.h> | 24 | #include <linux/gpio.h> |
25 | #include <linux/platform_device.h> | 25 | #include <linux/platform_device.h> |
26 | #include <linux/mfd/core.h> | ||
27 | #include <linux/irq.h> | 26 | #include <linux/irq.h> |
28 | #include <linux/io.h> | 27 | #include <linux/io.h> |
29 | #include <linux/timb_gpio.h> | 28 | #include <linux/timb_gpio.h> |
@@ -229,7 +228,7 @@ static int __devinit timbgpio_probe(struct platform_device *pdev) | |||
229 | struct gpio_chip *gc; | 228 | struct gpio_chip *gc; |
230 | struct timbgpio *tgpio; | 229 | struct timbgpio *tgpio; |
231 | struct resource *iomem; | 230 | struct resource *iomem; |
232 | struct timbgpio_platform_data *pdata = mfd_get_data(pdev); | 231 | struct timbgpio_platform_data *pdata = pdev->dev.platform_data; |
233 | int irq = platform_get_irq(pdev, 0); | 232 | int irq = platform_get_irq(pdev, 0); |
234 | 233 | ||
235 | if (!pdata || pdata->nr_pins > 32) { | 234 | if (!pdata || pdata->nr_pins > 32) { |
@@ -320,13 +319,14 @@ err_mem: | |||
320 | static int __devexit timbgpio_remove(struct platform_device *pdev) | 319 | static int __devexit timbgpio_remove(struct platform_device *pdev) |
321 | { | 320 | { |
322 | int err; | 321 | int err; |
322 | struct timbgpio_platform_data *pdata = pdev->dev.platform_data; | ||
323 | struct timbgpio *tgpio = platform_get_drvdata(pdev); | 323 | struct timbgpio *tgpio = platform_get_drvdata(pdev); |
324 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 324 | struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
325 | int irq = platform_get_irq(pdev, 0); | 325 | int irq = platform_get_irq(pdev, 0); |
326 | 326 | ||
327 | if (irq >= 0 && tgpio->irq_base > 0) { | 327 | if (irq >= 0 && tgpio->irq_base > 0) { |
328 | int i; | 328 | int i; |
329 | for (i = 0; i < tgpio->gpio.ngpio; i++) { | 329 | for (i = 0; i < pdata->nr_pins; i++) { |
330 | irq_set_chip(tgpio->irq_base + i, NULL); | 330 | irq_set_chip(tgpio->irq_base + i, NULL); |
331 | irq_set_chip_data(tgpio->irq_base + i, NULL); | 331 | irq_set_chip_data(tgpio->irq_base + i, NULL); |
332 | } | 332 | } |
diff --git a/drivers/gpio/tps65910-gpio.c b/drivers/gpio/tps65910-gpio.c new file mode 100644 index 000000000000..8d1ddfdd63eb --- /dev/null +++ b/drivers/gpio/tps65910-gpio.c | |||
@@ -0,0 +1,100 @@ | |||
1 | /* | ||
2 | * tps65910-gpio.c -- TI TPS6591x | ||
3 | * | ||
4 | * Copyright 2010 Texas Instruments Inc. | ||
5 | * | ||
6 | * Author: Graeme Gregory <gg@slimlogic.co.uk> | ||
7 | * Author: Jorge Eduardo Candelaria jedu@slimlogic.co.uk> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/gpio.h> | ||
20 | #include <linux/i2c.h> | ||
21 | #include <linux/mfd/tps65910.h> | ||
22 | |||
23 | static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset) | ||
24 | { | ||
25 | struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio); | ||
26 | uint8_t val; | ||
27 | |||
28 | tps65910->read(tps65910, TPS65910_GPIO0 + offset, 1, &val); | ||
29 | |||
30 | if (val & GPIO_STS_MASK) | ||
31 | return 1; | ||
32 | |||
33 | return 0; | ||
34 | } | ||
35 | |||
36 | static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset, | ||
37 | int value) | ||
38 | { | ||
39 | struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio); | ||
40 | |||
41 | if (value) | ||
42 | tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset, | ||
43 | GPIO_SET_MASK); | ||
44 | else | ||
45 | tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset, | ||
46 | GPIO_SET_MASK); | ||
47 | } | ||
48 | |||
49 | static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset, | ||
50 | int value) | ||
51 | { | ||
52 | struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio); | ||
53 | |||
54 | /* Set the initial value */ | ||
55 | tps65910_gpio_set(gc, 0, value); | ||
56 | |||
57 | return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset, | ||
58 | GPIO_CFG_MASK); | ||
59 | } | ||
60 | |||
61 | static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset) | ||
62 | { | ||
63 | struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio); | ||
64 | |||
65 | return tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset, | ||
66 | GPIO_CFG_MASK); | ||
67 | } | ||
68 | |||
69 | void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base) | ||
70 | { | ||
71 | int ret; | ||
72 | |||
73 | if (!gpio_base) | ||
74 | return; | ||
75 | |||
76 | tps65910->gpio.owner = THIS_MODULE; | ||
77 | tps65910->gpio.label = tps65910->i2c_client->name; | ||
78 | tps65910->gpio.dev = tps65910->dev; | ||
79 | tps65910->gpio.base = gpio_base; | ||
80 | |||
81 | switch(tps65910_chip_id(tps65910)) { | ||
82 | case TPS65910: | ||
83 | tps65910->gpio.ngpio = 6; | ||
84 | case TPS65911: | ||
85 | tps65910->gpio.ngpio = 9; | ||
86 | default: | ||
87 | return; | ||
88 | } | ||
89 | tps65910->gpio.can_sleep = 1; | ||
90 | |||
91 | tps65910->gpio.direction_input = tps65910_gpio_input; | ||
92 | tps65910->gpio.direction_output = tps65910_gpio_output; | ||
93 | tps65910->gpio.set = tps65910_gpio_set; | ||
94 | tps65910->gpio.get = tps65910_gpio_get; | ||
95 | |||
96 | ret = gpiochip_add(&tps65910->gpio); | ||
97 | |||
98 | if (ret) | ||
99 | dev_warn(tps65910->dev, "GPIO registration failed: %d\n", ret); | ||
100 | } | ||
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c index 9577c432e77f..de3d2465fe24 100644 --- a/drivers/hwmon/coretemp.c +++ b/drivers/hwmon/coretemp.c | |||
@@ -350,6 +350,7 @@ static int get_pkg_tjmax(unsigned int cpu, struct device *dev) | |||
350 | 350 | ||
351 | static int create_name_attr(struct platform_data *pdata, struct device *dev) | 351 | static int create_name_attr(struct platform_data *pdata, struct device *dev) |
352 | { | 352 | { |
353 | sysfs_attr_init(&pdata->name_attr.attr); | ||
353 | pdata->name_attr.attr.name = "name"; | 354 | pdata->name_attr.attr.name = "name"; |
354 | pdata->name_attr.attr.mode = S_IRUGO; | 355 | pdata->name_attr.attr.mode = S_IRUGO; |
355 | pdata->name_attr.show = show_name; | 356 | pdata->name_attr.show = show_name; |
@@ -372,6 +373,7 @@ static int create_core_attrs(struct temp_data *tdata, struct device *dev, | |||
372 | for (i = 0; i < MAX_ATTRS; i++) { | 373 | for (i = 0; i < MAX_ATTRS; i++) { |
373 | snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], | 374 | snprintf(tdata->attr_name[i], CORETEMP_NAME_LENGTH, names[i], |
374 | attr_no); | 375 | attr_no); |
376 | sysfs_attr_init(&tdata->sd_attrs[i].dev_attr.attr); | ||
375 | tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; | 377 | tdata->sd_attrs[i].dev_attr.attr.name = tdata->attr_name[i]; |
376 | tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; | 378 | tdata->sd_attrs[i].dev_attr.attr.mode = S_IRUGO; |
377 | tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; | 379 | tdata->sd_attrs[i].dev_attr.show = rd_ptr[i]; |
@@ -422,7 +424,7 @@ static void update_ttarget(__u8 cpu_model, struct temp_data *tdata, | |||
422 | } | 424 | } |
423 | } | 425 | } |
424 | 426 | ||
425 | static int chk_ucode_version(struct platform_device *pdev) | 427 | static int __devinit chk_ucode_version(struct platform_device *pdev) |
426 | { | 428 | { |
427 | struct cpuinfo_x86 *c = &cpu_data(pdev->id); | 429 | struct cpuinfo_x86 *c = &cpu_data(pdev->id); |
428 | int err; | 430 | int err; |
@@ -509,8 +511,8 @@ static int create_core_data(struct platform_data *pdata, | |||
509 | /* | 511 | /* |
510 | * Provide a single set of attributes for all HT siblings of a core | 512 | * Provide a single set of attributes for all HT siblings of a core |
511 | * to avoid duplicate sensors (the processor ID and core ID of all | 513 | * to avoid duplicate sensors (the processor ID and core ID of all |
512 | * HT siblings of a core is the same). | 514 | * HT siblings of a core are the same). |
513 | * Skip if a HT sibling of this core is already online. | 515 | * Skip if a HT sibling of this core is already registered. |
514 | * This is not an error. | 516 | * This is not an error. |
515 | */ | 517 | */ |
516 | if (pdata->core_data[attr_no] != NULL) | 518 | if (pdata->core_data[attr_no] != NULL) |
@@ -770,10 +772,10 @@ static void __cpuinit put_core_offline(unsigned int cpu) | |||
770 | coretemp_remove_core(pdata, &pdev->dev, indx); | 772 | coretemp_remove_core(pdata, &pdev->dev, indx); |
771 | 773 | ||
772 | /* | 774 | /* |
773 | * If a core is taken offline, but a HT sibling of the same core is | 775 | * If a HT sibling of a core is taken offline, but another HT sibling |
774 | * still online, register the alternate sibling. This ensures that | 776 | * of the same core is still online, register the alternate sibling. |
775 | * exactly one set of attributes is provided as long as at least one | 777 | * This ensures that exactly one set of attributes is provided as long |
776 | * HT sibling of a core is online. | 778 | * as at least one HT sibling of a core is online. |
777 | */ | 779 | */ |
778 | for_each_sibling(i, cpu) { | 780 | for_each_sibling(i, cpu) { |
779 | if (i != cpu) { | 781 | if (i != cpu) { |
diff --git a/drivers/hwmon/pmbus_core.c b/drivers/hwmon/pmbus_core.c index 98799bab69ce..354770ed3186 100644 --- a/drivers/hwmon/pmbus_core.c +++ b/drivers/hwmon/pmbus_core.c | |||
@@ -707,6 +707,7 @@ do { \ | |||
707 | struct sensor_device_attribute *a \ | 707 | struct sensor_device_attribute *a \ |
708 | = &data->_type##s[data->num_##_type##s].attribute; \ | 708 | = &data->_type##s[data->num_##_type##s].attribute; \ |
709 | BUG_ON(data->num_attributes >= data->max_attributes); \ | 709 | BUG_ON(data->num_attributes >= data->max_attributes); \ |
710 | sysfs_attr_init(&a->dev_attr.attr); \ | ||
710 | a->dev_attr.attr.name = _name; \ | 711 | a->dev_attr.attr.name = _name; \ |
711 | a->dev_attr.attr.mode = _mode; \ | 712 | a->dev_attr.attr.mode = _mode; \ |
712 | a->dev_attr.show = _show; \ | 713 | a->dev_attr.show = _show; \ |
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c index fee1a2613861..1b46a9d9f907 100644 --- a/drivers/i2c/busses/i2c-ocores.c +++ b/drivers/i2c/busses/i2c-ocores.c | |||
@@ -49,7 +49,6 @@ | |||
49 | #include <linux/init.h> | 49 | #include <linux/init.h> |
50 | #include <linux/errno.h> | 50 | #include <linux/errno.h> |
51 | #include <linux/platform_device.h> | 51 | #include <linux/platform_device.h> |
52 | #include <linux/mfd/core.h> | ||
53 | #include <linux/i2c.h> | 52 | #include <linux/i2c.h> |
54 | #include <linux/interrupt.h> | 53 | #include <linux/interrupt.h> |
55 | #include <linux/wait.h> | 54 | #include <linux/wait.h> |
@@ -306,7 +305,7 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev) | |||
306 | return -EIO; | 305 | return -EIO; |
307 | } | 306 | } |
308 | 307 | ||
309 | pdata = mfd_get_data(pdev); | 308 | pdata = pdev->dev.platform_data; |
310 | if (pdata) { | 309 | if (pdata) { |
311 | i2c->regstep = pdata->regstep; | 310 | i2c->regstep = pdata->regstep; |
312 | i2c->clock_khz = pdata->clock_khz; | 311 | i2c->clock_khz = pdata->clock_khz; |
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index e9d5ff4d1496..4bb68f35caf2 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c | |||
@@ -34,7 +34,6 @@ | |||
34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
35 | #include <linux/delay.h> | 35 | #include <linux/delay.h> |
36 | #include <linux/platform_device.h> | 36 | #include <linux/platform_device.h> |
37 | #include <linux/mfd/core.h> | ||
38 | #include <linux/i2c.h> | 37 | #include <linux/i2c.h> |
39 | #include <linux/interrupt.h> | 38 | #include <linux/interrupt.h> |
40 | #include <linux/wait.h> | 39 | #include <linux/wait.h> |
@@ -705,7 +704,7 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev) | |||
705 | if (irq < 0) | 704 | if (irq < 0) |
706 | goto resource_missing; | 705 | goto resource_missing; |
707 | 706 | ||
708 | pdata = mfd_get_data(pdev); | 707 | pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data; |
709 | if (!pdata) | 708 | if (!pdata) |
710 | return -EINVAL; | 709 | return -EINVAL; |
711 | 710 | ||
diff --git a/drivers/infiniband/Kconfig b/drivers/infiniband/Kconfig index 6e35eccc9caa..0f9a84c1046a 100644 --- a/drivers/infiniband/Kconfig +++ b/drivers/infiniband/Kconfig | |||
@@ -2,6 +2,7 @@ menuconfig INFINIBAND | |||
2 | tristate "InfiniBand support" | 2 | tristate "InfiniBand support" |
3 | depends on PCI || BROKEN | 3 | depends on PCI || BROKEN |
4 | depends on HAS_IOMEM | 4 | depends on HAS_IOMEM |
5 | depends on NET | ||
5 | ---help--- | 6 | ---help--- |
6 | Core support for InfiniBand (IB). Make sure to also select | 7 | Core support for InfiniBand (IB). Make sure to also select |
7 | any protocols you wish to use as well as drivers for your | 8 | any protocols you wish to use as well as drivers for your |
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index cb1ab3ea4998..c8bbaef1becb 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile | |||
@@ -8,7 +8,7 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ | |||
8 | $(user_access-y) | 8 | $(user_access-y) |
9 | 9 | ||
10 | ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ | 10 | ib_core-y := packer.o ud_header.o verbs.o sysfs.o \ |
11 | device.o fmr_pool.o cache.o | 11 | device.o fmr_pool.o cache.o netlink.o |
12 | ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o | 12 | ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o |
13 | 13 | ||
14 | ib_mad-y := mad.o smi.o agent.o mad_rmpp.o | 14 | ib_mad-y := mad.o smi.o agent.o mad_rmpp.o |
diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index f804e28e1ebb..f62f52fb9ece 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c | |||
@@ -3639,8 +3639,16 @@ static struct kobj_type cm_port_obj_type = { | |||
3639 | .release = cm_release_port_obj | 3639 | .release = cm_release_port_obj |
3640 | }; | 3640 | }; |
3641 | 3641 | ||
3642 | static char *cm_devnode(struct device *dev, mode_t *mode) | ||
3643 | { | ||
3644 | *mode = 0666; | ||
3645 | return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); | ||
3646 | } | ||
3647 | |||
3642 | struct class cm_class = { | 3648 | struct class cm_class = { |
3649 | .owner = THIS_MODULE, | ||
3643 | .name = "infiniband_cm", | 3650 | .name = "infiniband_cm", |
3651 | .devnode = cm_devnode, | ||
3644 | }; | 3652 | }; |
3645 | EXPORT_SYMBOL(cm_class); | 3653 | EXPORT_SYMBOL(cm_class); |
3646 | 3654 | ||
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 99dde874fbbd..b6a33b3c516d 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c | |||
@@ -47,6 +47,7 @@ | |||
47 | 47 | ||
48 | #include <rdma/rdma_cm.h> | 48 | #include <rdma/rdma_cm.h> |
49 | #include <rdma/rdma_cm_ib.h> | 49 | #include <rdma/rdma_cm_ib.h> |
50 | #include <rdma/rdma_netlink.h> | ||
50 | #include <rdma/ib_cache.h> | 51 | #include <rdma/ib_cache.h> |
51 | #include <rdma/ib_cm.h> | 52 | #include <rdma/ib_cm.h> |
52 | #include <rdma/ib_sa.h> | 53 | #include <rdma/ib_sa.h> |
@@ -89,20 +90,6 @@ struct cma_device { | |||
89 | struct list_head id_list; | 90 | struct list_head id_list; |
90 | }; | 91 | }; |
91 | 92 | ||
92 | enum cma_state { | ||
93 | CMA_IDLE, | ||
94 | CMA_ADDR_QUERY, | ||
95 | CMA_ADDR_RESOLVED, | ||
96 | CMA_ROUTE_QUERY, | ||
97 | CMA_ROUTE_RESOLVED, | ||
98 | CMA_CONNECT, | ||
99 | CMA_DISCONNECT, | ||
100 | CMA_ADDR_BOUND, | ||
101 | CMA_LISTEN, | ||
102 | CMA_DEVICE_REMOVAL, | ||
103 | CMA_DESTROYING | ||
104 | }; | ||
105 | |||
106 | struct rdma_bind_list { | 93 | struct rdma_bind_list { |
107 | struct idr *ps; | 94 | struct idr *ps; |
108 | struct hlist_head owners; | 95 | struct hlist_head owners; |
@@ -126,7 +113,7 @@ struct rdma_id_private { | |||
126 | struct list_head mc_list; | 113 | struct list_head mc_list; |
127 | 114 | ||
128 | int internal_id; | 115 | int internal_id; |
129 | enum cma_state state; | 116 | enum rdma_cm_state state; |
130 | spinlock_t lock; | 117 | spinlock_t lock; |
131 | struct mutex qp_mutex; | 118 | struct mutex qp_mutex; |
132 | 119 | ||
@@ -146,6 +133,7 @@ struct rdma_id_private { | |||
146 | u32 seq_num; | 133 | u32 seq_num; |
147 | u32 qkey; | 134 | u32 qkey; |
148 | u32 qp_num; | 135 | u32 qp_num; |
136 | pid_t owner; | ||
149 | u8 srq; | 137 | u8 srq; |
150 | u8 tos; | 138 | u8 tos; |
151 | u8 reuseaddr; | 139 | u8 reuseaddr; |
@@ -165,8 +153,8 @@ struct cma_multicast { | |||
165 | struct cma_work { | 153 | struct cma_work { |
166 | struct work_struct work; | 154 | struct work_struct work; |
167 | struct rdma_id_private *id; | 155 | struct rdma_id_private *id; |
168 | enum cma_state old_state; | 156 | enum rdma_cm_state old_state; |
169 | enum cma_state new_state; | 157 | enum rdma_cm_state new_state; |
170 | struct rdma_cm_event event; | 158 | struct rdma_cm_event event; |
171 | }; | 159 | }; |
172 | 160 | ||
@@ -217,7 +205,7 @@ struct sdp_hah { | |||
217 | #define CMA_VERSION 0x00 | 205 | #define CMA_VERSION 0x00 |
218 | #define SDP_MAJ_VERSION 0x2 | 206 | #define SDP_MAJ_VERSION 0x2 |
219 | 207 | ||
220 | static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) | 208 | static int cma_comp(struct rdma_id_private *id_priv, enum rdma_cm_state comp) |
221 | { | 209 | { |
222 | unsigned long flags; | 210 | unsigned long flags; |
223 | int ret; | 211 | int ret; |
@@ -229,7 +217,7 @@ static int cma_comp(struct rdma_id_private *id_priv, enum cma_state comp) | |||
229 | } | 217 | } |
230 | 218 | ||
231 | static int cma_comp_exch(struct rdma_id_private *id_priv, | 219 | static int cma_comp_exch(struct rdma_id_private *id_priv, |
232 | enum cma_state comp, enum cma_state exch) | 220 | enum rdma_cm_state comp, enum rdma_cm_state exch) |
233 | { | 221 | { |
234 | unsigned long flags; | 222 | unsigned long flags; |
235 | int ret; | 223 | int ret; |
@@ -241,11 +229,11 @@ static int cma_comp_exch(struct rdma_id_private *id_priv, | |||
241 | return ret; | 229 | return ret; |
242 | } | 230 | } |
243 | 231 | ||
244 | static enum cma_state cma_exch(struct rdma_id_private *id_priv, | 232 | static enum rdma_cm_state cma_exch(struct rdma_id_private *id_priv, |
245 | enum cma_state exch) | 233 | enum rdma_cm_state exch) |
246 | { | 234 | { |
247 | unsigned long flags; | 235 | unsigned long flags; |
248 | enum cma_state old; | 236 | enum rdma_cm_state old; |
249 | 237 | ||
250 | spin_lock_irqsave(&id_priv->lock, flags); | 238 | spin_lock_irqsave(&id_priv->lock, flags); |
251 | old = id_priv->state; | 239 | old = id_priv->state; |
@@ -279,11 +267,6 @@ static inline void sdp_set_ip_ver(struct sdp_hh *hh, u8 ip_ver) | |||
279 | hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); | 267 | hh->ip_version = (ip_ver << 4) | (hh->ip_version & 0xF); |
280 | } | 268 | } |
281 | 269 | ||
282 | static inline int cma_is_ud_ps(enum rdma_port_space ps) | ||
283 | { | ||
284 | return (ps == RDMA_PS_UDP || ps == RDMA_PS_IPOIB); | ||
285 | } | ||
286 | |||
287 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, | 270 | static void cma_attach_to_dev(struct rdma_id_private *id_priv, |
288 | struct cma_device *cma_dev) | 271 | struct cma_device *cma_dev) |
289 | { | 272 | { |
@@ -413,7 +396,7 @@ static void cma_deref_id(struct rdma_id_private *id_priv) | |||
413 | } | 396 | } |
414 | 397 | ||
415 | static int cma_disable_callback(struct rdma_id_private *id_priv, | 398 | static int cma_disable_callback(struct rdma_id_private *id_priv, |
416 | enum cma_state state) | 399 | enum rdma_cm_state state) |
417 | { | 400 | { |
418 | mutex_lock(&id_priv->handler_mutex); | 401 | mutex_lock(&id_priv->handler_mutex); |
419 | if (id_priv->state != state) { | 402 | if (id_priv->state != state) { |
@@ -429,7 +412,8 @@ static int cma_has_cm_dev(struct rdma_id_private *id_priv) | |||
429 | } | 412 | } |
430 | 413 | ||
431 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, | 414 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, |
432 | void *context, enum rdma_port_space ps) | 415 | void *context, enum rdma_port_space ps, |
416 | enum ib_qp_type qp_type) | ||
433 | { | 417 | { |
434 | struct rdma_id_private *id_priv; | 418 | struct rdma_id_private *id_priv; |
435 | 419 | ||
@@ -437,10 +421,12 @@ struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, | |||
437 | if (!id_priv) | 421 | if (!id_priv) |
438 | return ERR_PTR(-ENOMEM); | 422 | return ERR_PTR(-ENOMEM); |
439 | 423 | ||
440 | id_priv->state = CMA_IDLE; | 424 | id_priv->owner = task_pid_nr(current); |
425 | id_priv->state = RDMA_CM_IDLE; | ||
441 | id_priv->id.context = context; | 426 | id_priv->id.context = context; |
442 | id_priv->id.event_handler = event_handler; | 427 | id_priv->id.event_handler = event_handler; |
443 | id_priv->id.ps = ps; | 428 | id_priv->id.ps = ps; |
429 | id_priv->id.qp_type = qp_type; | ||
444 | spin_lock_init(&id_priv->lock); | 430 | spin_lock_init(&id_priv->lock); |
445 | mutex_init(&id_priv->qp_mutex); | 431 | mutex_init(&id_priv->qp_mutex); |
446 | init_completion(&id_priv->comp); | 432 | init_completion(&id_priv->comp); |
@@ -508,7 +494,7 @@ int rdma_create_qp(struct rdma_cm_id *id, struct ib_pd *pd, | |||
508 | if (IS_ERR(qp)) | 494 | if (IS_ERR(qp)) |
509 | return PTR_ERR(qp); | 495 | return PTR_ERR(qp); |
510 | 496 | ||
511 | if (cma_is_ud_ps(id_priv->id.ps)) | 497 | if (id->qp_type == IB_QPT_UD) |
512 | ret = cma_init_ud_qp(id_priv, qp); | 498 | ret = cma_init_ud_qp(id_priv, qp); |
513 | else | 499 | else |
514 | ret = cma_init_conn_qp(id_priv, qp); | 500 | ret = cma_init_conn_qp(id_priv, qp); |
@@ -636,7 +622,7 @@ static int cma_ib_init_qp_attr(struct rdma_id_private *id_priv, | |||
636 | qp_attr->port_num = id_priv->id.port_num; | 622 | qp_attr->port_num = id_priv->id.port_num; |
637 | *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; | 623 | *qp_attr_mask = IB_QP_STATE | IB_QP_PKEY_INDEX | IB_QP_PORT; |
638 | 624 | ||
639 | if (cma_is_ud_ps(id_priv->id.ps)) { | 625 | if (id_priv->id.qp_type == IB_QPT_UD) { |
640 | ret = cma_set_qkey(id_priv); | 626 | ret = cma_set_qkey(id_priv); |
641 | if (ret) | 627 | if (ret) |
642 | return ret; | 628 | return ret; |
@@ -659,7 +645,7 @@ int rdma_init_qp_attr(struct rdma_cm_id *id, struct ib_qp_attr *qp_attr, | |||
659 | id_priv = container_of(id, struct rdma_id_private, id); | 645 | id_priv = container_of(id, struct rdma_id_private, id); |
660 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { | 646 | switch (rdma_node_get_transport(id_priv->id.device->node_type)) { |
661 | case RDMA_TRANSPORT_IB: | 647 | case RDMA_TRANSPORT_IB: |
662 | if (!id_priv->cm_id.ib || cma_is_ud_ps(id_priv->id.ps)) | 648 | if (!id_priv->cm_id.ib || (id_priv->id.qp_type == IB_QPT_UD)) |
663 | ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); | 649 | ret = cma_ib_init_qp_attr(id_priv, qp_attr, qp_attr_mask); |
664 | else | 650 | else |
665 | ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, | 651 | ret = ib_cm_init_qp_attr(id_priv->cm_id.ib, qp_attr, |
@@ -858,16 +844,16 @@ static void cma_cancel_listens(struct rdma_id_private *id_priv) | |||
858 | } | 844 | } |
859 | 845 | ||
860 | static void cma_cancel_operation(struct rdma_id_private *id_priv, | 846 | static void cma_cancel_operation(struct rdma_id_private *id_priv, |
861 | enum cma_state state) | 847 | enum rdma_cm_state state) |
862 | { | 848 | { |
863 | switch (state) { | 849 | switch (state) { |
864 | case CMA_ADDR_QUERY: | 850 | case RDMA_CM_ADDR_QUERY: |
865 | rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); | 851 | rdma_addr_cancel(&id_priv->id.route.addr.dev_addr); |
866 | break; | 852 | break; |
867 | case CMA_ROUTE_QUERY: | 853 | case RDMA_CM_ROUTE_QUERY: |
868 | cma_cancel_route(id_priv); | 854 | cma_cancel_route(id_priv); |
869 | break; | 855 | break; |
870 | case CMA_LISTEN: | 856 | case RDMA_CM_LISTEN: |
871 | if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) | 857 | if (cma_any_addr((struct sockaddr *) &id_priv->id.route.addr.src_addr) |
872 | && !id_priv->cma_dev) | 858 | && !id_priv->cma_dev) |
873 | cma_cancel_listens(id_priv); | 859 | cma_cancel_listens(id_priv); |
@@ -918,10 +904,10 @@ static void cma_leave_mc_groups(struct rdma_id_private *id_priv) | |||
918 | void rdma_destroy_id(struct rdma_cm_id *id) | 904 | void rdma_destroy_id(struct rdma_cm_id *id) |
919 | { | 905 | { |
920 | struct rdma_id_private *id_priv; | 906 | struct rdma_id_private *id_priv; |
921 | enum cma_state state; | 907 | enum rdma_cm_state state; |
922 | 908 | ||
923 | id_priv = container_of(id, struct rdma_id_private, id); | 909 | id_priv = container_of(id, struct rdma_id_private, id); |
924 | state = cma_exch(id_priv, CMA_DESTROYING); | 910 | state = cma_exch(id_priv, RDMA_CM_DESTROYING); |
925 | cma_cancel_operation(id_priv, state); | 911 | cma_cancel_operation(id_priv, state); |
926 | 912 | ||
927 | /* | 913 | /* |
@@ -1015,9 +1001,9 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1015 | int ret = 0; | 1001 | int ret = 0; |
1016 | 1002 | ||
1017 | if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && | 1003 | if ((ib_event->event != IB_CM_TIMEWAIT_EXIT && |
1018 | cma_disable_callback(id_priv, CMA_CONNECT)) || | 1004 | cma_disable_callback(id_priv, RDMA_CM_CONNECT)) || |
1019 | (ib_event->event == IB_CM_TIMEWAIT_EXIT && | 1005 | (ib_event->event == IB_CM_TIMEWAIT_EXIT && |
1020 | cma_disable_callback(id_priv, CMA_DISCONNECT))) | 1006 | cma_disable_callback(id_priv, RDMA_CM_DISCONNECT))) |
1021 | return 0; | 1007 | return 0; |
1022 | 1008 | ||
1023 | memset(&event, 0, sizeof event); | 1009 | memset(&event, 0, sizeof event); |
@@ -1048,7 +1034,8 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1048 | event.status = -ETIMEDOUT; /* fall through */ | 1034 | event.status = -ETIMEDOUT; /* fall through */ |
1049 | case IB_CM_DREQ_RECEIVED: | 1035 | case IB_CM_DREQ_RECEIVED: |
1050 | case IB_CM_DREP_RECEIVED: | 1036 | case IB_CM_DREP_RECEIVED: |
1051 | if (!cma_comp_exch(id_priv, CMA_CONNECT, CMA_DISCONNECT)) | 1037 | if (!cma_comp_exch(id_priv, RDMA_CM_CONNECT, |
1038 | RDMA_CM_DISCONNECT)) | ||
1052 | goto out; | 1039 | goto out; |
1053 | event.event = RDMA_CM_EVENT_DISCONNECTED; | 1040 | event.event = RDMA_CM_EVENT_DISCONNECTED; |
1054 | break; | 1041 | break; |
@@ -1075,7 +1062,7 @@ static int cma_ib_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1075 | if (ret) { | 1062 | if (ret) { |
1076 | /* Destroy the CM ID by returning a non-zero value. */ | 1063 | /* Destroy the CM ID by returning a non-zero value. */ |
1077 | id_priv->cm_id.ib = NULL; | 1064 | id_priv->cm_id.ib = NULL; |
1078 | cma_exch(id_priv, CMA_DESTROYING); | 1065 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
1079 | mutex_unlock(&id_priv->handler_mutex); | 1066 | mutex_unlock(&id_priv->handler_mutex); |
1080 | rdma_destroy_id(&id_priv->id); | 1067 | rdma_destroy_id(&id_priv->id); |
1081 | return ret; | 1068 | return ret; |
@@ -1101,7 +1088,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, | |||
1101 | goto err; | 1088 | goto err; |
1102 | 1089 | ||
1103 | id = rdma_create_id(listen_id->event_handler, listen_id->context, | 1090 | id = rdma_create_id(listen_id->event_handler, listen_id->context, |
1104 | listen_id->ps); | 1091 | listen_id->ps, ib_event->param.req_rcvd.qp_type); |
1105 | if (IS_ERR(id)) | 1092 | if (IS_ERR(id)) |
1106 | goto err; | 1093 | goto err; |
1107 | 1094 | ||
@@ -1132,7 +1119,7 @@ static struct rdma_id_private *cma_new_conn_id(struct rdma_cm_id *listen_id, | |||
1132 | rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); | 1119 | rdma_addr_set_dgid(&rt->addr.dev_addr, &rt->path_rec[0].dgid); |
1133 | 1120 | ||
1134 | id_priv = container_of(id, struct rdma_id_private, id); | 1121 | id_priv = container_of(id, struct rdma_id_private, id); |
1135 | id_priv->state = CMA_CONNECT; | 1122 | id_priv->state = RDMA_CM_CONNECT; |
1136 | return id_priv; | 1123 | return id_priv; |
1137 | 1124 | ||
1138 | destroy_id: | 1125 | destroy_id: |
@@ -1152,7 +1139,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, | |||
1152 | int ret; | 1139 | int ret; |
1153 | 1140 | ||
1154 | id = rdma_create_id(listen_id->event_handler, listen_id->context, | 1141 | id = rdma_create_id(listen_id->event_handler, listen_id->context, |
1155 | listen_id->ps); | 1142 | listen_id->ps, IB_QPT_UD); |
1156 | if (IS_ERR(id)) | 1143 | if (IS_ERR(id)) |
1157 | return NULL; | 1144 | return NULL; |
1158 | 1145 | ||
@@ -1172,7 +1159,7 @@ static struct rdma_id_private *cma_new_udp_id(struct rdma_cm_id *listen_id, | |||
1172 | } | 1159 | } |
1173 | 1160 | ||
1174 | id_priv = container_of(id, struct rdma_id_private, id); | 1161 | id_priv = container_of(id, struct rdma_id_private, id); |
1175 | id_priv->state = CMA_CONNECT; | 1162 | id_priv->state = RDMA_CM_CONNECT; |
1176 | return id_priv; | 1163 | return id_priv; |
1177 | err: | 1164 | err: |
1178 | rdma_destroy_id(id); | 1165 | rdma_destroy_id(id); |
@@ -1201,13 +1188,13 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1201 | int offset, ret; | 1188 | int offset, ret; |
1202 | 1189 | ||
1203 | listen_id = cm_id->context; | 1190 | listen_id = cm_id->context; |
1204 | if (cma_disable_callback(listen_id, CMA_LISTEN)) | 1191 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) |
1205 | return -ECONNABORTED; | 1192 | return -ECONNABORTED; |
1206 | 1193 | ||
1207 | memset(&event, 0, sizeof event); | 1194 | memset(&event, 0, sizeof event); |
1208 | offset = cma_user_data_offset(listen_id->id.ps); | 1195 | offset = cma_user_data_offset(listen_id->id.ps); |
1209 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; | 1196 | event.event = RDMA_CM_EVENT_CONNECT_REQUEST; |
1210 | if (cma_is_ud_ps(listen_id->id.ps)) { | 1197 | if (listen_id->id.qp_type == IB_QPT_UD) { |
1211 | conn_id = cma_new_udp_id(&listen_id->id, ib_event); | 1198 | conn_id = cma_new_udp_id(&listen_id->id, ib_event); |
1212 | event.param.ud.private_data = ib_event->private_data + offset; | 1199 | event.param.ud.private_data = ib_event->private_data + offset; |
1213 | event.param.ud.private_data_len = | 1200 | event.param.ud.private_data_len = |
@@ -1243,8 +1230,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1243 | * while we're accessing the cm_id. | 1230 | * while we're accessing the cm_id. |
1244 | */ | 1231 | */ |
1245 | mutex_lock(&lock); | 1232 | mutex_lock(&lock); |
1246 | if (cma_comp(conn_id, CMA_CONNECT) && | 1233 | if (cma_comp(conn_id, RDMA_CM_CONNECT) && (conn_id->id.qp_type != IB_QPT_UD)) |
1247 | !cma_is_ud_ps(conn_id->id.ps)) | ||
1248 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); | 1234 | ib_send_cm_mra(cm_id, CMA_CM_MRA_SETTING, NULL, 0); |
1249 | mutex_unlock(&lock); | 1235 | mutex_unlock(&lock); |
1250 | mutex_unlock(&conn_id->handler_mutex); | 1236 | mutex_unlock(&conn_id->handler_mutex); |
@@ -1257,7 +1243,7 @@ static int cma_req_handler(struct ib_cm_id *cm_id, struct ib_cm_event *ib_event) | |||
1257 | conn_id->cm_id.ib = NULL; | 1243 | conn_id->cm_id.ib = NULL; |
1258 | 1244 | ||
1259 | release_conn_id: | 1245 | release_conn_id: |
1260 | cma_exch(conn_id, CMA_DESTROYING); | 1246 | cma_exch(conn_id, RDMA_CM_DESTROYING); |
1261 | mutex_unlock(&conn_id->handler_mutex); | 1247 | mutex_unlock(&conn_id->handler_mutex); |
1262 | rdma_destroy_id(&conn_id->id); | 1248 | rdma_destroy_id(&conn_id->id); |
1263 | 1249 | ||
@@ -1328,7 +1314,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1328 | struct sockaddr_in *sin; | 1314 | struct sockaddr_in *sin; |
1329 | int ret = 0; | 1315 | int ret = 0; |
1330 | 1316 | ||
1331 | if (cma_disable_callback(id_priv, CMA_CONNECT)) | 1317 | if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) |
1332 | return 0; | 1318 | return 0; |
1333 | 1319 | ||
1334 | memset(&event, 0, sizeof event); | 1320 | memset(&event, 0, sizeof event); |
@@ -1371,7 +1357,7 @@ static int cma_iw_handler(struct iw_cm_id *iw_id, struct iw_cm_event *iw_event) | |||
1371 | if (ret) { | 1357 | if (ret) { |
1372 | /* Destroy the CM ID by returning a non-zero value. */ | 1358 | /* Destroy the CM ID by returning a non-zero value. */ |
1373 | id_priv->cm_id.iw = NULL; | 1359 | id_priv->cm_id.iw = NULL; |
1374 | cma_exch(id_priv, CMA_DESTROYING); | 1360 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
1375 | mutex_unlock(&id_priv->handler_mutex); | 1361 | mutex_unlock(&id_priv->handler_mutex); |
1376 | rdma_destroy_id(&id_priv->id); | 1362 | rdma_destroy_id(&id_priv->id); |
1377 | return ret; | 1363 | return ret; |
@@ -1393,20 +1379,20 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1393 | struct ib_device_attr attr; | 1379 | struct ib_device_attr attr; |
1394 | 1380 | ||
1395 | listen_id = cm_id->context; | 1381 | listen_id = cm_id->context; |
1396 | if (cma_disable_callback(listen_id, CMA_LISTEN)) | 1382 | if (cma_disable_callback(listen_id, RDMA_CM_LISTEN)) |
1397 | return -ECONNABORTED; | 1383 | return -ECONNABORTED; |
1398 | 1384 | ||
1399 | /* Create a new RDMA id for the new IW CM ID */ | 1385 | /* Create a new RDMA id for the new IW CM ID */ |
1400 | new_cm_id = rdma_create_id(listen_id->id.event_handler, | 1386 | new_cm_id = rdma_create_id(listen_id->id.event_handler, |
1401 | listen_id->id.context, | 1387 | listen_id->id.context, |
1402 | RDMA_PS_TCP); | 1388 | RDMA_PS_TCP, IB_QPT_RC); |
1403 | if (IS_ERR(new_cm_id)) { | 1389 | if (IS_ERR(new_cm_id)) { |
1404 | ret = -ENOMEM; | 1390 | ret = -ENOMEM; |
1405 | goto out; | 1391 | goto out; |
1406 | } | 1392 | } |
1407 | conn_id = container_of(new_cm_id, struct rdma_id_private, id); | 1393 | conn_id = container_of(new_cm_id, struct rdma_id_private, id); |
1408 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); | 1394 | mutex_lock_nested(&conn_id->handler_mutex, SINGLE_DEPTH_NESTING); |
1409 | conn_id->state = CMA_CONNECT; | 1395 | conn_id->state = RDMA_CM_CONNECT; |
1410 | 1396 | ||
1411 | dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); | 1397 | dev = ip_dev_find(&init_net, iw_event->local_addr.sin_addr.s_addr); |
1412 | if (!dev) { | 1398 | if (!dev) { |
@@ -1461,7 +1447,7 @@ static int iw_conn_req_handler(struct iw_cm_id *cm_id, | |||
1461 | if (ret) { | 1447 | if (ret) { |
1462 | /* User wants to destroy the CM ID */ | 1448 | /* User wants to destroy the CM ID */ |
1463 | conn_id->cm_id.iw = NULL; | 1449 | conn_id->cm_id.iw = NULL; |
1464 | cma_exch(conn_id, CMA_DESTROYING); | 1450 | cma_exch(conn_id, RDMA_CM_DESTROYING); |
1465 | mutex_unlock(&conn_id->handler_mutex); | 1451 | mutex_unlock(&conn_id->handler_mutex); |
1466 | cma_deref_id(conn_id); | 1452 | cma_deref_id(conn_id); |
1467 | rdma_destroy_id(&conn_id->id); | 1453 | rdma_destroy_id(&conn_id->id); |
@@ -1548,13 +1534,14 @@ static void cma_listen_on_dev(struct rdma_id_private *id_priv, | |||
1548 | struct rdma_cm_id *id; | 1534 | struct rdma_cm_id *id; |
1549 | int ret; | 1535 | int ret; |
1550 | 1536 | ||
1551 | id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps); | 1537 | id = rdma_create_id(cma_listen_handler, id_priv, id_priv->id.ps, |
1538 | id_priv->id.qp_type); | ||
1552 | if (IS_ERR(id)) | 1539 | if (IS_ERR(id)) |
1553 | return; | 1540 | return; |
1554 | 1541 | ||
1555 | dev_id_priv = container_of(id, struct rdma_id_private, id); | 1542 | dev_id_priv = container_of(id, struct rdma_id_private, id); |
1556 | 1543 | ||
1557 | dev_id_priv->state = CMA_ADDR_BOUND; | 1544 | dev_id_priv->state = RDMA_CM_ADDR_BOUND; |
1558 | memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, | 1545 | memcpy(&id->route.addr.src_addr, &id_priv->id.route.addr.src_addr, |
1559 | ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); | 1546 | ip_addr_size((struct sockaddr *) &id_priv->id.route.addr.src_addr)); |
1560 | 1547 | ||
@@ -1601,8 +1588,8 @@ static void cma_query_handler(int status, struct ib_sa_path_rec *path_rec, | |||
1601 | route->num_paths = 1; | 1588 | route->num_paths = 1; |
1602 | *route->path_rec = *path_rec; | 1589 | *route->path_rec = *path_rec; |
1603 | } else { | 1590 | } else { |
1604 | work->old_state = CMA_ROUTE_QUERY; | 1591 | work->old_state = RDMA_CM_ROUTE_QUERY; |
1605 | work->new_state = CMA_ADDR_RESOLVED; | 1592 | work->new_state = RDMA_CM_ADDR_RESOLVED; |
1606 | work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; | 1593 | work->event.event = RDMA_CM_EVENT_ROUTE_ERROR; |
1607 | work->event.status = status; | 1594 | work->event.status = status; |
1608 | } | 1595 | } |
@@ -1660,7 +1647,7 @@ static void cma_work_handler(struct work_struct *_work) | |||
1660 | goto out; | 1647 | goto out; |
1661 | 1648 | ||
1662 | if (id_priv->id.event_handler(&id_priv->id, &work->event)) { | 1649 | if (id_priv->id.event_handler(&id_priv->id, &work->event)) { |
1663 | cma_exch(id_priv, CMA_DESTROYING); | 1650 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
1664 | destroy = 1; | 1651 | destroy = 1; |
1665 | } | 1652 | } |
1666 | out: | 1653 | out: |
@@ -1678,12 +1665,12 @@ static void cma_ndev_work_handler(struct work_struct *_work) | |||
1678 | int destroy = 0; | 1665 | int destroy = 0; |
1679 | 1666 | ||
1680 | mutex_lock(&id_priv->handler_mutex); | 1667 | mutex_lock(&id_priv->handler_mutex); |
1681 | if (id_priv->state == CMA_DESTROYING || | 1668 | if (id_priv->state == RDMA_CM_DESTROYING || |
1682 | id_priv->state == CMA_DEVICE_REMOVAL) | 1669 | id_priv->state == RDMA_CM_DEVICE_REMOVAL) |
1683 | goto out; | 1670 | goto out; |
1684 | 1671 | ||
1685 | if (id_priv->id.event_handler(&id_priv->id, &work->event)) { | 1672 | if (id_priv->id.event_handler(&id_priv->id, &work->event)) { |
1686 | cma_exch(id_priv, CMA_DESTROYING); | 1673 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
1687 | destroy = 1; | 1674 | destroy = 1; |
1688 | } | 1675 | } |
1689 | 1676 | ||
@@ -1707,8 +1694,8 @@ static int cma_resolve_ib_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
1707 | 1694 | ||
1708 | work->id = id_priv; | 1695 | work->id = id_priv; |
1709 | INIT_WORK(&work->work, cma_work_handler); | 1696 | INIT_WORK(&work->work, cma_work_handler); |
1710 | work->old_state = CMA_ROUTE_QUERY; | 1697 | work->old_state = RDMA_CM_ROUTE_QUERY; |
1711 | work->new_state = CMA_ROUTE_RESOLVED; | 1698 | work->new_state = RDMA_CM_ROUTE_RESOLVED; |
1712 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1699 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
1713 | 1700 | ||
1714 | route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); | 1701 | route->path_rec = kmalloc(sizeof *route->path_rec, GFP_KERNEL); |
@@ -1737,7 +1724,8 @@ int rdma_set_ib_paths(struct rdma_cm_id *id, | |||
1737 | int ret; | 1724 | int ret; |
1738 | 1725 | ||
1739 | id_priv = container_of(id, struct rdma_id_private, id); | 1726 | id_priv = container_of(id, struct rdma_id_private, id); |
1740 | if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_RESOLVED)) | 1727 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, |
1728 | RDMA_CM_ROUTE_RESOLVED)) | ||
1741 | return -EINVAL; | 1729 | return -EINVAL; |
1742 | 1730 | ||
1743 | id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, | 1731 | id->route.path_rec = kmemdup(path_rec, sizeof *path_rec * num_paths, |
@@ -1750,7 +1738,7 @@ int rdma_set_ib_paths(struct rdma_cm_id *id, | |||
1750 | id->route.num_paths = num_paths; | 1738 | id->route.num_paths = num_paths; |
1751 | return 0; | 1739 | return 0; |
1752 | err: | 1740 | err: |
1753 | cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_ADDR_RESOLVED); | 1741 | cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_ADDR_RESOLVED); |
1754 | return ret; | 1742 | return ret; |
1755 | } | 1743 | } |
1756 | EXPORT_SYMBOL(rdma_set_ib_paths); | 1744 | EXPORT_SYMBOL(rdma_set_ib_paths); |
@@ -1765,8 +1753,8 @@ static int cma_resolve_iw_route(struct rdma_id_private *id_priv, int timeout_ms) | |||
1765 | 1753 | ||
1766 | work->id = id_priv; | 1754 | work->id = id_priv; |
1767 | INIT_WORK(&work->work, cma_work_handler); | 1755 | INIT_WORK(&work->work, cma_work_handler); |
1768 | work->old_state = CMA_ROUTE_QUERY; | 1756 | work->old_state = RDMA_CM_ROUTE_QUERY; |
1769 | work->new_state = CMA_ROUTE_RESOLVED; | 1757 | work->new_state = RDMA_CM_ROUTE_RESOLVED; |
1770 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1758 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
1771 | queue_work(cma_wq, &work->work); | 1759 | queue_work(cma_wq, &work->work); |
1772 | return 0; | 1760 | return 0; |
@@ -1830,8 +1818,8 @@ static int cma_resolve_iboe_route(struct rdma_id_private *id_priv) | |||
1830 | goto err2; | 1818 | goto err2; |
1831 | } | 1819 | } |
1832 | 1820 | ||
1833 | work->old_state = CMA_ROUTE_QUERY; | 1821 | work->old_state = RDMA_CM_ROUTE_QUERY; |
1834 | work->new_state = CMA_ROUTE_RESOLVED; | 1822 | work->new_state = RDMA_CM_ROUTE_RESOLVED; |
1835 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; | 1823 | work->event.event = RDMA_CM_EVENT_ROUTE_RESOLVED; |
1836 | work->event.status = 0; | 1824 | work->event.status = 0; |
1837 | 1825 | ||
@@ -1853,7 +1841,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) | |||
1853 | int ret; | 1841 | int ret; |
1854 | 1842 | ||
1855 | id_priv = container_of(id, struct rdma_id_private, id); | 1843 | id_priv = container_of(id, struct rdma_id_private, id); |
1856 | if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ROUTE_QUERY)) | 1844 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, RDMA_CM_ROUTE_QUERY)) |
1857 | return -EINVAL; | 1845 | return -EINVAL; |
1858 | 1846 | ||
1859 | atomic_inc(&id_priv->refcount); | 1847 | atomic_inc(&id_priv->refcount); |
@@ -1882,7 +1870,7 @@ int rdma_resolve_route(struct rdma_cm_id *id, int timeout_ms) | |||
1882 | 1870 | ||
1883 | return 0; | 1871 | return 0; |
1884 | err: | 1872 | err: |
1885 | cma_comp_exch(id_priv, CMA_ROUTE_QUERY, CMA_ADDR_RESOLVED); | 1873 | cma_comp_exch(id_priv, RDMA_CM_ROUTE_QUERY, RDMA_CM_ADDR_RESOLVED); |
1886 | cma_deref_id(id_priv); | 1874 | cma_deref_id(id_priv); |
1887 | return ret; | 1875 | return ret; |
1888 | } | 1876 | } |
@@ -1941,14 +1929,16 @@ static void addr_handler(int status, struct sockaddr *src_addr, | |||
1941 | 1929 | ||
1942 | memset(&event, 0, sizeof event); | 1930 | memset(&event, 0, sizeof event); |
1943 | mutex_lock(&id_priv->handler_mutex); | 1931 | mutex_lock(&id_priv->handler_mutex); |
1944 | if (!cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_RESOLVED)) | 1932 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, |
1933 | RDMA_CM_ADDR_RESOLVED)) | ||
1945 | goto out; | 1934 | goto out; |
1946 | 1935 | ||
1947 | if (!status && !id_priv->cma_dev) | 1936 | if (!status && !id_priv->cma_dev) |
1948 | status = cma_acquire_dev(id_priv); | 1937 | status = cma_acquire_dev(id_priv); |
1949 | 1938 | ||
1950 | if (status) { | 1939 | if (status) { |
1951 | if (!cma_comp_exch(id_priv, CMA_ADDR_RESOLVED, CMA_ADDR_BOUND)) | 1940 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_RESOLVED, |
1941 | RDMA_CM_ADDR_BOUND)) | ||
1952 | goto out; | 1942 | goto out; |
1953 | event.event = RDMA_CM_EVENT_ADDR_ERROR; | 1943 | event.event = RDMA_CM_EVENT_ADDR_ERROR; |
1954 | event.status = status; | 1944 | event.status = status; |
@@ -1959,7 +1949,7 @@ static void addr_handler(int status, struct sockaddr *src_addr, | |||
1959 | } | 1949 | } |
1960 | 1950 | ||
1961 | if (id_priv->id.event_handler(&id_priv->id, &event)) { | 1951 | if (id_priv->id.event_handler(&id_priv->id, &event)) { |
1962 | cma_exch(id_priv, CMA_DESTROYING); | 1952 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
1963 | mutex_unlock(&id_priv->handler_mutex); | 1953 | mutex_unlock(&id_priv->handler_mutex); |
1964 | cma_deref_id(id_priv); | 1954 | cma_deref_id(id_priv); |
1965 | rdma_destroy_id(&id_priv->id); | 1955 | rdma_destroy_id(&id_priv->id); |
@@ -2004,8 +1994,8 @@ static int cma_resolve_loopback(struct rdma_id_private *id_priv) | |||
2004 | 1994 | ||
2005 | work->id = id_priv; | 1995 | work->id = id_priv; |
2006 | INIT_WORK(&work->work, cma_work_handler); | 1996 | INIT_WORK(&work->work, cma_work_handler); |
2007 | work->old_state = CMA_ADDR_QUERY; | 1997 | work->old_state = RDMA_CM_ADDR_QUERY; |
2008 | work->new_state = CMA_ADDR_RESOLVED; | 1998 | work->new_state = RDMA_CM_ADDR_RESOLVED; |
2009 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; | 1999 | work->event.event = RDMA_CM_EVENT_ADDR_RESOLVED; |
2010 | queue_work(cma_wq, &work->work); | 2000 | queue_work(cma_wq, &work->work); |
2011 | return 0; | 2001 | return 0; |
@@ -2034,13 +2024,13 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, | |||
2034 | int ret; | 2024 | int ret; |
2035 | 2025 | ||
2036 | id_priv = container_of(id, struct rdma_id_private, id); | 2026 | id_priv = container_of(id, struct rdma_id_private, id); |
2037 | if (id_priv->state == CMA_IDLE) { | 2027 | if (id_priv->state == RDMA_CM_IDLE) { |
2038 | ret = cma_bind_addr(id, src_addr, dst_addr); | 2028 | ret = cma_bind_addr(id, src_addr, dst_addr); |
2039 | if (ret) | 2029 | if (ret) |
2040 | return ret; | 2030 | return ret; |
2041 | } | 2031 | } |
2042 | 2032 | ||
2043 | if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_ADDR_QUERY)) | 2033 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_ADDR_QUERY)) |
2044 | return -EINVAL; | 2034 | return -EINVAL; |
2045 | 2035 | ||
2046 | atomic_inc(&id_priv->refcount); | 2036 | atomic_inc(&id_priv->refcount); |
@@ -2056,7 +2046,7 @@ int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr, | |||
2056 | 2046 | ||
2057 | return 0; | 2047 | return 0; |
2058 | err: | 2048 | err: |
2059 | cma_comp_exch(id_priv, CMA_ADDR_QUERY, CMA_ADDR_BOUND); | 2049 | cma_comp_exch(id_priv, RDMA_CM_ADDR_QUERY, RDMA_CM_ADDR_BOUND); |
2060 | cma_deref_id(id_priv); | 2050 | cma_deref_id(id_priv); |
2061 | return ret; | 2051 | return ret; |
2062 | } | 2052 | } |
@@ -2070,7 +2060,7 @@ int rdma_set_reuseaddr(struct rdma_cm_id *id, int reuse) | |||
2070 | 2060 | ||
2071 | id_priv = container_of(id, struct rdma_id_private, id); | 2061 | id_priv = container_of(id, struct rdma_id_private, id); |
2072 | spin_lock_irqsave(&id_priv->lock, flags); | 2062 | spin_lock_irqsave(&id_priv->lock, flags); |
2073 | if (id_priv->state == CMA_IDLE) { | 2063 | if (id_priv->state == RDMA_CM_IDLE) { |
2074 | id_priv->reuseaddr = reuse; | 2064 | id_priv->reuseaddr = reuse; |
2075 | ret = 0; | 2065 | ret = 0; |
2076 | } else { | 2066 | } else { |
@@ -2177,7 +2167,7 @@ static int cma_check_port(struct rdma_bind_list *bind_list, | |||
2177 | if (id_priv == cur_id) | 2167 | if (id_priv == cur_id) |
2178 | continue; | 2168 | continue; |
2179 | 2169 | ||
2180 | if ((cur_id->state == CMA_LISTEN) || | 2170 | if ((cur_id->state == RDMA_CM_LISTEN) || |
2181 | !reuseaddr || !cur_id->reuseaddr) { | 2171 | !reuseaddr || !cur_id->reuseaddr) { |
2182 | cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; | 2172 | cur_addr = (struct sockaddr *) &cur_id->id.route.addr.src_addr; |
2183 | if (cma_any_addr(cur_addr)) | 2173 | if (cma_any_addr(cur_addr)) |
@@ -2280,14 +2270,14 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) | |||
2280 | int ret; | 2270 | int ret; |
2281 | 2271 | ||
2282 | id_priv = container_of(id, struct rdma_id_private, id); | 2272 | id_priv = container_of(id, struct rdma_id_private, id); |
2283 | if (id_priv->state == CMA_IDLE) { | 2273 | if (id_priv->state == RDMA_CM_IDLE) { |
2284 | ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; | 2274 | ((struct sockaddr *) &id->route.addr.src_addr)->sa_family = AF_INET; |
2285 | ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); | 2275 | ret = rdma_bind_addr(id, (struct sockaddr *) &id->route.addr.src_addr); |
2286 | if (ret) | 2276 | if (ret) |
2287 | return ret; | 2277 | return ret; |
2288 | } | 2278 | } |
2289 | 2279 | ||
2290 | if (!cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_LISTEN)) | 2280 | if (!cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_LISTEN)) |
2291 | return -EINVAL; | 2281 | return -EINVAL; |
2292 | 2282 | ||
2293 | if (id_priv->reuseaddr) { | 2283 | if (id_priv->reuseaddr) { |
@@ -2319,7 +2309,7 @@ int rdma_listen(struct rdma_cm_id *id, int backlog) | |||
2319 | return 0; | 2309 | return 0; |
2320 | err: | 2310 | err: |
2321 | id_priv->backlog = 0; | 2311 | id_priv->backlog = 0; |
2322 | cma_comp_exch(id_priv, CMA_LISTEN, CMA_ADDR_BOUND); | 2312 | cma_comp_exch(id_priv, RDMA_CM_LISTEN, RDMA_CM_ADDR_BOUND); |
2323 | return ret; | 2313 | return ret; |
2324 | } | 2314 | } |
2325 | EXPORT_SYMBOL(rdma_listen); | 2315 | EXPORT_SYMBOL(rdma_listen); |
@@ -2333,7 +2323,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr) | |||
2333 | return -EAFNOSUPPORT; | 2323 | return -EAFNOSUPPORT; |
2334 | 2324 | ||
2335 | id_priv = container_of(id, struct rdma_id_private, id); | 2325 | id_priv = container_of(id, struct rdma_id_private, id); |
2336 | if (!cma_comp_exch(id_priv, CMA_IDLE, CMA_ADDR_BOUND)) | 2326 | if (!cma_comp_exch(id_priv, RDMA_CM_IDLE, RDMA_CM_ADDR_BOUND)) |
2337 | return -EINVAL; | 2327 | return -EINVAL; |
2338 | 2328 | ||
2339 | ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); | 2329 | ret = cma_check_linklocal(&id->route.addr.dev_addr, addr); |
@@ -2360,7 +2350,7 @@ err2: | |||
2360 | if (id_priv->cma_dev) | 2350 | if (id_priv->cma_dev) |
2361 | cma_release_dev(id_priv); | 2351 | cma_release_dev(id_priv); |
2362 | err1: | 2352 | err1: |
2363 | cma_comp_exch(id_priv, CMA_ADDR_BOUND, CMA_IDLE); | 2353 | cma_comp_exch(id_priv, RDMA_CM_ADDR_BOUND, RDMA_CM_IDLE); |
2364 | return ret; | 2354 | return ret; |
2365 | } | 2355 | } |
2366 | EXPORT_SYMBOL(rdma_bind_addr); | 2356 | EXPORT_SYMBOL(rdma_bind_addr); |
@@ -2433,7 +2423,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, | |||
2433 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; | 2423 | struct ib_cm_sidr_rep_event_param *rep = &ib_event->param.sidr_rep_rcvd; |
2434 | int ret = 0; | 2424 | int ret = 0; |
2435 | 2425 | ||
2436 | if (cma_disable_callback(id_priv, CMA_CONNECT)) | 2426 | if (cma_disable_callback(id_priv, RDMA_CM_CONNECT)) |
2437 | return 0; | 2427 | return 0; |
2438 | 2428 | ||
2439 | memset(&event, 0, sizeof event); | 2429 | memset(&event, 0, sizeof event); |
@@ -2479,7 +2469,7 @@ static int cma_sidr_rep_handler(struct ib_cm_id *cm_id, | |||
2479 | if (ret) { | 2469 | if (ret) { |
2480 | /* Destroy the CM ID by returning a non-zero value. */ | 2470 | /* Destroy the CM ID by returning a non-zero value. */ |
2481 | id_priv->cm_id.ib = NULL; | 2471 | id_priv->cm_id.ib = NULL; |
2482 | cma_exch(id_priv, CMA_DESTROYING); | 2472 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
2483 | mutex_unlock(&id_priv->handler_mutex); | 2473 | mutex_unlock(&id_priv->handler_mutex); |
2484 | rdma_destroy_id(&id_priv->id); | 2474 | rdma_destroy_id(&id_priv->id); |
2485 | return ret; | 2475 | return ret; |
@@ -2645,7 +2635,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2645 | int ret; | 2635 | int ret; |
2646 | 2636 | ||
2647 | id_priv = container_of(id, struct rdma_id_private, id); | 2637 | id_priv = container_of(id, struct rdma_id_private, id); |
2648 | if (!cma_comp_exch(id_priv, CMA_ROUTE_RESOLVED, CMA_CONNECT)) | 2638 | if (!cma_comp_exch(id_priv, RDMA_CM_ROUTE_RESOLVED, RDMA_CM_CONNECT)) |
2649 | return -EINVAL; | 2639 | return -EINVAL; |
2650 | 2640 | ||
2651 | if (!id->qp) { | 2641 | if (!id->qp) { |
@@ -2655,7 +2645,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2655 | 2645 | ||
2656 | switch (rdma_node_get_transport(id->device->node_type)) { | 2646 | switch (rdma_node_get_transport(id->device->node_type)) { |
2657 | case RDMA_TRANSPORT_IB: | 2647 | case RDMA_TRANSPORT_IB: |
2658 | if (cma_is_ud_ps(id->ps)) | 2648 | if (id->qp_type == IB_QPT_UD) |
2659 | ret = cma_resolve_ib_udp(id_priv, conn_param); | 2649 | ret = cma_resolve_ib_udp(id_priv, conn_param); |
2660 | else | 2650 | else |
2661 | ret = cma_connect_ib(id_priv, conn_param); | 2651 | ret = cma_connect_ib(id_priv, conn_param); |
@@ -2672,7 +2662,7 @@ int rdma_connect(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2672 | 2662 | ||
2673 | return 0; | 2663 | return 0; |
2674 | err: | 2664 | err: |
2675 | cma_comp_exch(id_priv, CMA_CONNECT, CMA_ROUTE_RESOLVED); | 2665 | cma_comp_exch(id_priv, RDMA_CM_CONNECT, RDMA_CM_ROUTE_RESOLVED); |
2676 | return ret; | 2666 | return ret; |
2677 | } | 2667 | } |
2678 | EXPORT_SYMBOL(rdma_connect); | 2668 | EXPORT_SYMBOL(rdma_connect); |
@@ -2758,7 +2748,10 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2758 | int ret; | 2748 | int ret; |
2759 | 2749 | ||
2760 | id_priv = container_of(id, struct rdma_id_private, id); | 2750 | id_priv = container_of(id, struct rdma_id_private, id); |
2761 | if (!cma_comp(id_priv, CMA_CONNECT)) | 2751 | |
2752 | id_priv->owner = task_pid_nr(current); | ||
2753 | |||
2754 | if (!cma_comp(id_priv, RDMA_CM_CONNECT)) | ||
2762 | return -EINVAL; | 2755 | return -EINVAL; |
2763 | 2756 | ||
2764 | if (!id->qp && conn_param) { | 2757 | if (!id->qp && conn_param) { |
@@ -2768,7 +2761,7 @@ int rdma_accept(struct rdma_cm_id *id, struct rdma_conn_param *conn_param) | |||
2768 | 2761 | ||
2769 | switch (rdma_node_get_transport(id->device->node_type)) { | 2762 | switch (rdma_node_get_transport(id->device->node_type)) { |
2770 | case RDMA_TRANSPORT_IB: | 2763 | case RDMA_TRANSPORT_IB: |
2771 | if (cma_is_ud_ps(id->ps)) | 2764 | if (id->qp_type == IB_QPT_UD) |
2772 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, | 2765 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_SUCCESS, |
2773 | conn_param->private_data, | 2766 | conn_param->private_data, |
2774 | conn_param->private_data_len); | 2767 | conn_param->private_data_len); |
@@ -2829,7 +2822,7 @@ int rdma_reject(struct rdma_cm_id *id, const void *private_data, | |||
2829 | 2822 | ||
2830 | switch (rdma_node_get_transport(id->device->node_type)) { | 2823 | switch (rdma_node_get_transport(id->device->node_type)) { |
2831 | case RDMA_TRANSPORT_IB: | 2824 | case RDMA_TRANSPORT_IB: |
2832 | if (cma_is_ud_ps(id->ps)) | 2825 | if (id->qp_type == IB_QPT_UD) |
2833 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, | 2826 | ret = cma_send_sidr_rep(id_priv, IB_SIDR_REJECT, |
2834 | private_data, private_data_len); | 2827 | private_data, private_data_len); |
2835 | else | 2828 | else |
@@ -2887,8 +2880,8 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) | |||
2887 | int ret; | 2880 | int ret; |
2888 | 2881 | ||
2889 | id_priv = mc->id_priv; | 2882 | id_priv = mc->id_priv; |
2890 | if (cma_disable_callback(id_priv, CMA_ADDR_BOUND) && | 2883 | if (cma_disable_callback(id_priv, RDMA_CM_ADDR_BOUND) && |
2891 | cma_disable_callback(id_priv, CMA_ADDR_RESOLVED)) | 2884 | cma_disable_callback(id_priv, RDMA_CM_ADDR_RESOLVED)) |
2892 | return 0; | 2885 | return 0; |
2893 | 2886 | ||
2894 | mutex_lock(&id_priv->qp_mutex); | 2887 | mutex_lock(&id_priv->qp_mutex); |
@@ -2912,7 +2905,7 @@ static int cma_ib_mc_handler(int status, struct ib_sa_multicast *multicast) | |||
2912 | 2905 | ||
2913 | ret = id_priv->id.event_handler(&id_priv->id, &event); | 2906 | ret = id_priv->id.event_handler(&id_priv->id, &event); |
2914 | if (ret) { | 2907 | if (ret) { |
2915 | cma_exch(id_priv, CMA_DESTROYING); | 2908 | cma_exch(id_priv, RDMA_CM_DESTROYING); |
2916 | mutex_unlock(&id_priv->handler_mutex); | 2909 | mutex_unlock(&id_priv->handler_mutex); |
2917 | rdma_destroy_id(&id_priv->id); | 2910 | rdma_destroy_id(&id_priv->id); |
2918 | return 0; | 2911 | return 0; |
@@ -3095,8 +3088,8 @@ int rdma_join_multicast(struct rdma_cm_id *id, struct sockaddr *addr, | |||
3095 | int ret; | 3088 | int ret; |
3096 | 3089 | ||
3097 | id_priv = container_of(id, struct rdma_id_private, id); | 3090 | id_priv = container_of(id, struct rdma_id_private, id); |
3098 | if (!cma_comp(id_priv, CMA_ADDR_BOUND) && | 3091 | if (!cma_comp(id_priv, RDMA_CM_ADDR_BOUND) && |
3099 | !cma_comp(id_priv, CMA_ADDR_RESOLVED)) | 3092 | !cma_comp(id_priv, RDMA_CM_ADDR_RESOLVED)) |
3100 | return -EINVAL; | 3093 | return -EINVAL; |
3101 | 3094 | ||
3102 | mc = kmalloc(sizeof *mc, GFP_KERNEL); | 3095 | mc = kmalloc(sizeof *mc, GFP_KERNEL); |
@@ -3261,19 +3254,19 @@ static void cma_add_one(struct ib_device *device) | |||
3261 | static int cma_remove_id_dev(struct rdma_id_private *id_priv) | 3254 | static int cma_remove_id_dev(struct rdma_id_private *id_priv) |
3262 | { | 3255 | { |
3263 | struct rdma_cm_event event; | 3256 | struct rdma_cm_event event; |
3264 | enum cma_state state; | 3257 | enum rdma_cm_state state; |
3265 | int ret = 0; | 3258 | int ret = 0; |
3266 | 3259 | ||
3267 | /* Record that we want to remove the device */ | 3260 | /* Record that we want to remove the device */ |
3268 | state = cma_exch(id_priv, CMA_DEVICE_REMOVAL); | 3261 | state = cma_exch(id_priv, RDMA_CM_DEVICE_REMOVAL); |
3269 | if (state == CMA_DESTROYING) | 3262 | if (state == RDMA_CM_DESTROYING) |
3270 | return 0; | 3263 | return 0; |
3271 | 3264 | ||
3272 | cma_cancel_operation(id_priv, state); | 3265 | cma_cancel_operation(id_priv, state); |
3273 | mutex_lock(&id_priv->handler_mutex); | 3266 | mutex_lock(&id_priv->handler_mutex); |
3274 | 3267 | ||
3275 | /* Check for destruction from another callback. */ | 3268 | /* Check for destruction from another callback. */ |
3276 | if (!cma_comp(id_priv, CMA_DEVICE_REMOVAL)) | 3269 | if (!cma_comp(id_priv, RDMA_CM_DEVICE_REMOVAL)) |
3277 | goto out; | 3270 | goto out; |
3278 | 3271 | ||
3279 | memset(&event, 0, sizeof event); | 3272 | memset(&event, 0, sizeof event); |
@@ -3328,6 +3321,100 @@ static void cma_remove_one(struct ib_device *device) | |||
3328 | kfree(cma_dev); | 3321 | kfree(cma_dev); |
3329 | } | 3322 | } |
3330 | 3323 | ||
3324 | static int cma_get_id_stats(struct sk_buff *skb, struct netlink_callback *cb) | ||
3325 | { | ||
3326 | struct nlmsghdr *nlh; | ||
3327 | struct rdma_cm_id_stats *id_stats; | ||
3328 | struct rdma_id_private *id_priv; | ||
3329 | struct rdma_cm_id *id = NULL; | ||
3330 | struct cma_device *cma_dev; | ||
3331 | int i_dev = 0, i_id = 0; | ||
3332 | |||
3333 | /* | ||
3334 | * We export all of the IDs as a sequence of messages. Each | ||
3335 | * ID gets its own netlink message. | ||
3336 | */ | ||
3337 | mutex_lock(&lock); | ||
3338 | |||
3339 | list_for_each_entry(cma_dev, &dev_list, list) { | ||
3340 | if (i_dev < cb->args[0]) { | ||
3341 | i_dev++; | ||
3342 | continue; | ||
3343 | } | ||
3344 | |||
3345 | i_id = 0; | ||
3346 | list_for_each_entry(id_priv, &cma_dev->id_list, list) { | ||
3347 | if (i_id < cb->args[1]) { | ||
3348 | i_id++; | ||
3349 | continue; | ||
3350 | } | ||
3351 | |||
3352 | id_stats = ibnl_put_msg(skb, &nlh, cb->nlh->nlmsg_seq, | ||
3353 | sizeof *id_stats, RDMA_NL_RDMA_CM, | ||
3354 | RDMA_NL_RDMA_CM_ID_STATS); | ||
3355 | if (!id_stats) | ||
3356 | goto out; | ||
3357 | |||
3358 | memset(id_stats, 0, sizeof *id_stats); | ||
3359 | id = &id_priv->id; | ||
3360 | id_stats->node_type = id->route.addr.dev_addr.dev_type; | ||
3361 | id_stats->port_num = id->port_num; | ||
3362 | id_stats->bound_dev_if = | ||
3363 | id->route.addr.dev_addr.bound_dev_if; | ||
3364 | |||
3365 | if (id->route.addr.src_addr.ss_family == AF_INET) { | ||
3366 | if (ibnl_put_attr(skb, nlh, | ||
3367 | sizeof(struct sockaddr_in), | ||
3368 | &id->route.addr.src_addr, | ||
3369 | RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { | ||
3370 | goto out; | ||
3371 | } | ||
3372 | if (ibnl_put_attr(skb, nlh, | ||
3373 | sizeof(struct sockaddr_in), | ||
3374 | &id->route.addr.dst_addr, | ||
3375 | RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { | ||
3376 | goto out; | ||
3377 | } | ||
3378 | } else if (id->route.addr.src_addr.ss_family == AF_INET6) { | ||
3379 | if (ibnl_put_attr(skb, nlh, | ||
3380 | sizeof(struct sockaddr_in6), | ||
3381 | &id->route.addr.src_addr, | ||
3382 | RDMA_NL_RDMA_CM_ATTR_SRC_ADDR)) { | ||
3383 | goto out; | ||
3384 | } | ||
3385 | if (ibnl_put_attr(skb, nlh, | ||
3386 | sizeof(struct sockaddr_in6), | ||
3387 | &id->route.addr.dst_addr, | ||
3388 | RDMA_NL_RDMA_CM_ATTR_DST_ADDR)) { | ||
3389 | goto out; | ||
3390 | } | ||
3391 | } | ||
3392 | |||
3393 | id_stats->pid = id_priv->owner; | ||
3394 | id_stats->port_space = id->ps; | ||
3395 | id_stats->cm_state = id_priv->state; | ||
3396 | id_stats->qp_num = id_priv->qp_num; | ||
3397 | id_stats->qp_type = id->qp_type; | ||
3398 | |||
3399 | i_id++; | ||
3400 | } | ||
3401 | |||
3402 | cb->args[1] = 0; | ||
3403 | i_dev++; | ||
3404 | } | ||
3405 | |||
3406 | out: | ||
3407 | mutex_unlock(&lock); | ||
3408 | cb->args[0] = i_dev; | ||
3409 | cb->args[1] = i_id; | ||
3410 | |||
3411 | return skb->len; | ||
3412 | } | ||
3413 | |||
3414 | static const struct ibnl_client_cbs cma_cb_table[] = { | ||
3415 | [RDMA_NL_RDMA_CM_ID_STATS] = { .dump = cma_get_id_stats }, | ||
3416 | }; | ||
3417 | |||
3331 | static int __init cma_init(void) | 3418 | static int __init cma_init(void) |
3332 | { | 3419 | { |
3333 | int ret; | 3420 | int ret; |
@@ -3343,6 +3430,10 @@ static int __init cma_init(void) | |||
3343 | ret = ib_register_client(&cma_client); | 3430 | ret = ib_register_client(&cma_client); |
3344 | if (ret) | 3431 | if (ret) |
3345 | goto err; | 3432 | goto err; |
3433 | |||
3434 | if (ibnl_add_client(RDMA_NL_RDMA_CM, RDMA_NL_RDMA_CM_NUM_OPS, cma_cb_table)) | ||
3435 | printk(KERN_WARNING "RDMA CMA: failed to add netlink callback\n"); | ||
3436 | |||
3346 | return 0; | 3437 | return 0; |
3347 | 3438 | ||
3348 | err: | 3439 | err: |
@@ -3355,6 +3446,7 @@ err: | |||
3355 | 3446 | ||
3356 | static void __exit cma_cleanup(void) | 3447 | static void __exit cma_cleanup(void) |
3357 | { | 3448 | { |
3449 | ibnl_remove_client(RDMA_NL_RDMA_CM); | ||
3358 | ib_unregister_client(&cma_client); | 3450 | ib_unregister_client(&cma_client); |
3359 | unregister_netdevice_notifier(&cma_nb); | 3451 | unregister_netdevice_notifier(&cma_nb); |
3360 | rdma_addr_unregister_client(&addr_client); | 3452 | rdma_addr_unregister_client(&addr_client); |
diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index f793bf2f5da7..4007f721d25d 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/slab.h> | 38 | #include <linux/slab.h> |
39 | #include <linux/init.h> | 39 | #include <linux/init.h> |
40 | #include <linux/mutex.h> | 40 | #include <linux/mutex.h> |
41 | #include <rdma/rdma_netlink.h> | ||
41 | 42 | ||
42 | #include "core_priv.h" | 43 | #include "core_priv.h" |
43 | 44 | ||
@@ -725,22 +726,40 @@ static int __init ib_core_init(void) | |||
725 | return -ENOMEM; | 726 | return -ENOMEM; |
726 | 727 | ||
727 | ret = ib_sysfs_setup(); | 728 | ret = ib_sysfs_setup(); |
728 | if (ret) | 729 | if (ret) { |
729 | printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); | 730 | printk(KERN_WARNING "Couldn't create InfiniBand device class\n"); |
731 | goto err; | ||
732 | } | ||
733 | |||
734 | ret = ibnl_init(); | ||
735 | if (ret) { | ||
736 | printk(KERN_WARNING "Couldn't init IB netlink interface\n"); | ||
737 | goto err_sysfs; | ||
738 | } | ||
730 | 739 | ||
731 | ret = ib_cache_setup(); | 740 | ret = ib_cache_setup(); |
732 | if (ret) { | 741 | if (ret) { |
733 | printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); | 742 | printk(KERN_WARNING "Couldn't set up InfiniBand P_Key/GID cache\n"); |
734 | ib_sysfs_cleanup(); | 743 | goto err_nl; |
735 | destroy_workqueue(ib_wq); | ||
736 | } | 744 | } |
737 | 745 | ||
746 | return 0; | ||
747 | |||
748 | err_nl: | ||
749 | ibnl_cleanup(); | ||
750 | |||
751 | err_sysfs: | ||
752 | ib_sysfs_cleanup(); | ||
753 | |||
754 | err: | ||
755 | destroy_workqueue(ib_wq); | ||
738 | return ret; | 756 | return ret; |
739 | } | 757 | } |
740 | 758 | ||
741 | static void __exit ib_core_cleanup(void) | 759 | static void __exit ib_core_cleanup(void) |
742 | { | 760 | { |
743 | ib_cache_cleanup(); | 761 | ib_cache_cleanup(); |
762 | ibnl_cleanup(); | ||
744 | ib_sysfs_cleanup(); | 763 | ib_sysfs_cleanup(); |
745 | /* Make sure that any pending umem accounting work is done. */ | 764 | /* Make sure that any pending umem accounting work is done. */ |
746 | destroy_workqueue(ib_wq); | 765 | destroy_workqueue(ib_wq); |
diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 822cfdcd9f78..b4d8672a3e4e 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c | |||
@@ -276,6 +276,13 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, | |||
276 | goto error1; | 276 | goto error1; |
277 | } | 277 | } |
278 | 278 | ||
279 | /* Verify the QP requested is supported. For example, Ethernet devices | ||
280 | * will not have QP0 */ | ||
281 | if (!port_priv->qp_info[qpn].qp) { | ||
282 | ret = ERR_PTR(-EPROTONOSUPPORT); | ||
283 | goto error1; | ||
284 | } | ||
285 | |||
279 | /* Allocate structures */ | 286 | /* Allocate structures */ |
280 | mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); | 287 | mad_agent_priv = kzalloc(sizeof *mad_agent_priv, GFP_KERNEL); |
281 | if (!mad_agent_priv) { | 288 | if (!mad_agent_priv) { |
diff --git a/drivers/infiniband/core/netlink.c b/drivers/infiniband/core/netlink.c new file mode 100644 index 000000000000..4a5abaf0a25c --- /dev/null +++ b/drivers/infiniband/core/netlink.c | |||
@@ -0,0 +1,190 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2010 Voltaire Inc. All rights reserved. | ||
3 | * | ||
4 | * This software is available to you under a choice of one of two | ||
5 | * licenses. You may choose to be licensed under the terms of the GNU | ||
6 | * General Public License (GPL) Version 2, available from the file | ||
7 | * COPYING in the main directory of this source tree, or the | ||
8 | * OpenIB.org BSD license below: | ||
9 | * | ||
10 | * Redistribution and use in source and binary forms, with or | ||
11 | * without modification, are permitted provided that the following | ||
12 | * conditions are met: | ||
13 | * | ||
14 | * - Redistributions of source code must retain the above | ||
15 | * copyright notice, this list of conditions and the following | ||
16 | * disclaimer. | ||
17 | * | ||
18 | * - Redistributions in binary form must reproduce the above | ||
19 | * copyright notice, this list of conditions and the following | ||
20 | * disclaimer in the documentation and/or other materials | ||
21 | * provided with the distribution. | ||
22 | * | ||
23 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, | ||
24 | * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF | ||
25 | * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND | ||
26 | * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS | ||
27 | * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN | ||
28 | * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN | ||
29 | * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE | ||
30 | * SOFTWARE. | ||
31 | */ | ||
32 | |||
33 | #define pr_fmt(fmt) "%s:%s: " fmt, KBUILD_MODNAME, __func__ | ||
34 | |||
35 | #include <net/netlink.h> | ||
36 | #include <net/net_namespace.h> | ||
37 | #include <net/sock.h> | ||
38 | #include <rdma/rdma_netlink.h> | ||
39 | |||
40 | struct ibnl_client { | ||
41 | struct list_head list; | ||
42 | int index; | ||
43 | int nops; | ||
44 | const struct ibnl_client_cbs *cb_table; | ||
45 | }; | ||
46 | |||
47 | static DEFINE_MUTEX(ibnl_mutex); | ||
48 | static struct sock *nls; | ||
49 | static LIST_HEAD(client_list); | ||
50 | |||
51 | int ibnl_add_client(int index, int nops, | ||
52 | const struct ibnl_client_cbs cb_table[]) | ||
53 | { | ||
54 | struct ibnl_client *cur; | ||
55 | struct ibnl_client *nl_client; | ||
56 | |||
57 | nl_client = kmalloc(sizeof *nl_client, GFP_KERNEL); | ||
58 | if (!nl_client) | ||
59 | return -ENOMEM; | ||
60 | |||
61 | nl_client->index = index; | ||
62 | nl_client->nops = nops; | ||
63 | nl_client->cb_table = cb_table; | ||
64 | |||
65 | mutex_lock(&ibnl_mutex); | ||
66 | |||
67 | list_for_each_entry(cur, &client_list, list) { | ||
68 | if (cur->index == index) { | ||
69 | pr_warn("Client for %d already exists\n", index); | ||
70 | mutex_unlock(&ibnl_mutex); | ||
71 | kfree(nl_client); | ||
72 | return -EINVAL; | ||
73 | } | ||
74 | } | ||
75 | |||
76 | list_add_tail(&nl_client->list, &client_list); | ||
77 | |||
78 | mutex_unlock(&ibnl_mutex); | ||
79 | |||
80 | return 0; | ||
81 | } | ||
82 | EXPORT_SYMBOL(ibnl_add_client); | ||
83 | |||
84 | int ibnl_remove_client(int index) | ||
85 | { | ||
86 | struct ibnl_client *cur, *next; | ||
87 | |||
88 | mutex_lock(&ibnl_mutex); | ||
89 | list_for_each_entry_safe(cur, next, &client_list, list) { | ||
90 | if (cur->index == index) { | ||
91 | list_del(&(cur->list)); | ||
92 | mutex_unlock(&ibnl_mutex); | ||
93 | kfree(cur); | ||
94 | return 0; | ||
95 | } | ||
96 | } | ||
97 | pr_warn("Can't remove callback for client idx %d. Not found\n", index); | ||
98 | mutex_unlock(&ibnl_mutex); | ||
99 | |||
100 | return -EINVAL; | ||
101 | } | ||
102 | EXPORT_SYMBOL(ibnl_remove_client); | ||
103 | |||
104 | void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, | ||
105 | int len, int client, int op) | ||
106 | { | ||
107 | unsigned char *prev_tail; | ||
108 | |||
109 | prev_tail = skb_tail_pointer(skb); | ||
110 | *nlh = NLMSG_NEW(skb, 0, seq, RDMA_NL_GET_TYPE(client, op), | ||
111 | len, NLM_F_MULTI); | ||
112 | (*nlh)->nlmsg_len = skb_tail_pointer(skb) - prev_tail; | ||
113 | return NLMSG_DATA(*nlh); | ||
114 | |||
115 | nlmsg_failure: | ||
116 | nlmsg_trim(skb, prev_tail); | ||
117 | return NULL; | ||
118 | } | ||
119 | EXPORT_SYMBOL(ibnl_put_msg); | ||
120 | |||
121 | int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, | ||
122 | int len, void *data, int type) | ||
123 | { | ||
124 | unsigned char *prev_tail; | ||
125 | |||
126 | prev_tail = skb_tail_pointer(skb); | ||
127 | NLA_PUT(skb, type, len, data); | ||
128 | nlh->nlmsg_len += skb_tail_pointer(skb) - prev_tail; | ||
129 | return 0; | ||
130 | |||
131 | nla_put_failure: | ||
132 | nlmsg_trim(skb, prev_tail - nlh->nlmsg_len); | ||
133 | return -EMSGSIZE; | ||
134 | } | ||
135 | EXPORT_SYMBOL(ibnl_put_attr); | ||
136 | |||
137 | static int ibnl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh) | ||
138 | { | ||
139 | struct ibnl_client *client; | ||
140 | int type = nlh->nlmsg_type; | ||
141 | int index = RDMA_NL_GET_CLIENT(type); | ||
142 | int op = RDMA_NL_GET_OP(type); | ||
143 | |||
144 | list_for_each_entry(client, &client_list, list) { | ||
145 | if (client->index == index) { | ||
146 | if (op < 0 || op >= client->nops || | ||
147 | !client->cb_table[RDMA_NL_GET_OP(op)].dump) | ||
148 | return -EINVAL; | ||
149 | return netlink_dump_start(nls, skb, nlh, | ||
150 | client->cb_table[op].dump, | ||
151 | NULL); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | pr_info("Index %d wasn't found in client list\n", index); | ||
156 | return -EINVAL; | ||
157 | } | ||
158 | |||
159 | static void ibnl_rcv(struct sk_buff *skb) | ||
160 | { | ||
161 | mutex_lock(&ibnl_mutex); | ||
162 | netlink_rcv_skb(skb, &ibnl_rcv_msg); | ||
163 | mutex_unlock(&ibnl_mutex); | ||
164 | } | ||
165 | |||
166 | int __init ibnl_init(void) | ||
167 | { | ||
168 | nls = netlink_kernel_create(&init_net, NETLINK_RDMA, 0, ibnl_rcv, | ||
169 | NULL, THIS_MODULE); | ||
170 | if (!nls) { | ||
171 | pr_warn("Failed to create netlink socket\n"); | ||
172 | return -ENOMEM; | ||
173 | } | ||
174 | |||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | void ibnl_cleanup(void) | ||
179 | { | ||
180 | struct ibnl_client *cur, *next; | ||
181 | |||
182 | mutex_lock(&ibnl_mutex); | ||
183 | list_for_each_entry_safe(cur, next, &client_list, list) { | ||
184 | list_del(&(cur->list)); | ||
185 | kfree(cur); | ||
186 | } | ||
187 | mutex_unlock(&ibnl_mutex); | ||
188 | |||
189 | netlink_kernel_release(nls); | ||
190 | } | ||
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index b3fa798525b2..71be5eebd683 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c | |||
@@ -367,13 +367,28 @@ done: | |||
367 | return ret; | 367 | return ret; |
368 | } | 368 | } |
369 | 369 | ||
370 | static ssize_t ucma_create_id(struct ucma_file *file, | 370 | static int ucma_get_qp_type(struct rdma_ucm_create_id *cmd, enum ib_qp_type *qp_type) |
371 | const char __user *inbuf, | 371 | { |
372 | int in_len, int out_len) | 372 | switch (cmd->ps) { |
373 | case RDMA_PS_TCP: | ||
374 | *qp_type = IB_QPT_RC; | ||
375 | return 0; | ||
376 | case RDMA_PS_UDP: | ||
377 | case RDMA_PS_IPOIB: | ||
378 | *qp_type = IB_QPT_UD; | ||
379 | return 0; | ||
380 | default: | ||
381 | return -EINVAL; | ||
382 | } | ||
383 | } | ||
384 | |||
385 | static ssize_t ucma_create_id(struct ucma_file *file, const char __user *inbuf, | ||
386 | int in_len, int out_len) | ||
373 | { | 387 | { |
374 | struct rdma_ucm_create_id cmd; | 388 | struct rdma_ucm_create_id cmd; |
375 | struct rdma_ucm_create_id_resp resp; | 389 | struct rdma_ucm_create_id_resp resp; |
376 | struct ucma_context *ctx; | 390 | struct ucma_context *ctx; |
391 | enum ib_qp_type qp_type; | ||
377 | int ret; | 392 | int ret; |
378 | 393 | ||
379 | if (out_len < sizeof(resp)) | 394 | if (out_len < sizeof(resp)) |
@@ -382,6 +397,10 @@ static ssize_t ucma_create_id(struct ucma_file *file, | |||
382 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) | 397 | if (copy_from_user(&cmd, inbuf, sizeof(cmd))) |
383 | return -EFAULT; | 398 | return -EFAULT; |
384 | 399 | ||
400 | ret = ucma_get_qp_type(&cmd, &qp_type); | ||
401 | if (ret) | ||
402 | return ret; | ||
403 | |||
385 | mutex_lock(&file->mut); | 404 | mutex_lock(&file->mut); |
386 | ctx = ucma_alloc_ctx(file); | 405 | ctx = ucma_alloc_ctx(file); |
387 | mutex_unlock(&file->mut); | 406 | mutex_unlock(&file->mut); |
@@ -389,7 +408,7 @@ static ssize_t ucma_create_id(struct ucma_file *file, | |||
389 | return -ENOMEM; | 408 | return -ENOMEM; |
390 | 409 | ||
391 | ctx->uid = cmd.uid; | 410 | ctx->uid = cmd.uid; |
392 | ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps); | 411 | ctx->cm_id = rdma_create_id(ucma_event_handler, ctx, cmd.ps, qp_type); |
393 | if (IS_ERR(ctx->cm_id)) { | 412 | if (IS_ERR(ctx->cm_id)) { |
394 | ret = PTR_ERR(ctx->cm_id); | 413 | ret = PTR_ERR(ctx->cm_id); |
395 | goto err1; | 414 | goto err1; |
@@ -1338,9 +1357,11 @@ static const struct file_operations ucma_fops = { | |||
1338 | }; | 1357 | }; |
1339 | 1358 | ||
1340 | static struct miscdevice ucma_misc = { | 1359 | static struct miscdevice ucma_misc = { |
1341 | .minor = MISC_DYNAMIC_MINOR, | 1360 | .minor = MISC_DYNAMIC_MINOR, |
1342 | .name = "rdma_cm", | 1361 | .name = "rdma_cm", |
1343 | .fops = &ucma_fops, | 1362 | .nodename = "infiniband/rdma_cm", |
1363 | .mode = 0666, | ||
1364 | .fops = &ucma_fops, | ||
1344 | }; | 1365 | }; |
1345 | 1366 | ||
1346 | static ssize_t show_abi_version(struct device *dev, | 1367 | static ssize_t show_abi_version(struct device *dev, |
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c index cd1996d0ad08..8d261b6ea5fe 100644 --- a/drivers/infiniband/core/user_mad.c +++ b/drivers/infiniband/core/user_mad.c | |||
@@ -1176,6 +1176,11 @@ static void ib_umad_remove_one(struct ib_device *device) | |||
1176 | kref_put(&umad_dev->ref, ib_umad_release_dev); | 1176 | kref_put(&umad_dev->ref, ib_umad_release_dev); |
1177 | } | 1177 | } |
1178 | 1178 | ||
1179 | static char *umad_devnode(struct device *dev, mode_t *mode) | ||
1180 | { | ||
1181 | return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); | ||
1182 | } | ||
1183 | |||
1179 | static int __init ib_umad_init(void) | 1184 | static int __init ib_umad_init(void) |
1180 | { | 1185 | { |
1181 | int ret; | 1186 | int ret; |
@@ -1194,6 +1199,8 @@ static int __init ib_umad_init(void) | |||
1194 | goto out_chrdev; | 1199 | goto out_chrdev; |
1195 | } | 1200 | } |
1196 | 1201 | ||
1202 | umad_class->devnode = umad_devnode; | ||
1203 | |||
1197 | ret = class_create_file(umad_class, &class_attr_abi_version.attr); | 1204 | ret = class_create_file(umad_class, &class_attr_abi_version.attr); |
1198 | if (ret) { | 1205 | if (ret) { |
1199 | printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); | 1206 | printk(KERN_ERR "user_mad: couldn't create abi_version attribute\n"); |
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c index ec83e9fe387b..e49a85f8a44d 100644 --- a/drivers/infiniband/core/uverbs_main.c +++ b/drivers/infiniband/core/uverbs_main.c | |||
@@ -824,6 +824,12 @@ static void ib_uverbs_remove_one(struct ib_device *device) | |||
824 | kfree(uverbs_dev); | 824 | kfree(uverbs_dev); |
825 | } | 825 | } |
826 | 826 | ||
827 | static char *uverbs_devnode(struct device *dev, mode_t *mode) | ||
828 | { | ||
829 | *mode = 0666; | ||
830 | return kasprintf(GFP_KERNEL, "infiniband/%s", dev_name(dev)); | ||
831 | } | ||
832 | |||
827 | static int __init ib_uverbs_init(void) | 833 | static int __init ib_uverbs_init(void) |
828 | { | 834 | { |
829 | int ret; | 835 | int ret; |
@@ -842,6 +848,8 @@ static int __init ib_uverbs_init(void) | |||
842 | goto out_chrdev; | 848 | goto out_chrdev; |
843 | } | 849 | } |
844 | 850 | ||
851 | uverbs_class->devnode = uverbs_devnode; | ||
852 | |||
845 | ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); | 853 | ret = class_create_file(uverbs_class, &class_attr_abi_version.attr); |
846 | if (ret) { | 854 | if (ret) { |
847 | printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); | 855 | printk(KERN_ERR "user_verbs: couldn't create abi_version attribute\n"); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_cm.c b/drivers/infiniband/hw/cxgb3/iwch_cm.c index 239184138994..0a5008fbebac 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_cm.c +++ b/drivers/infiniband/hw/cxgb3/iwch_cm.c | |||
@@ -914,7 +914,7 @@ static void process_mpa_reply(struct iwch_ep *ep, struct sk_buff *skb) | |||
914 | goto err; | 914 | goto err; |
915 | 915 | ||
916 | if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { | 916 | if (peer2peer && iwch_rqes_posted(ep->com.qp) == 0) { |
917 | iwch_post_zb_read(ep->com.qp); | 917 | iwch_post_zb_read(ep); |
918 | } | 918 | } |
919 | 919 | ||
920 | goto out; | 920 | goto out; |
@@ -1078,6 +1078,8 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1078 | struct iwch_ep *ep = ctx; | 1078 | struct iwch_ep *ep = ctx; |
1079 | struct cpl_wr_ack *hdr = cplhdr(skb); | 1079 | struct cpl_wr_ack *hdr = cplhdr(skb); |
1080 | unsigned int credits = ntohs(hdr->credits); | 1080 | unsigned int credits = ntohs(hdr->credits); |
1081 | unsigned long flags; | ||
1082 | int post_zb = 0; | ||
1081 | 1083 | ||
1082 | PDBG("%s ep %p credits %u\n", __func__, ep, credits); | 1084 | PDBG("%s ep %p credits %u\n", __func__, ep, credits); |
1083 | 1085 | ||
@@ -1087,28 +1089,34 @@ static int tx_ack(struct t3cdev *tdev, struct sk_buff *skb, void *ctx) | |||
1087 | return CPL_RET_BUF_DONE; | 1089 | return CPL_RET_BUF_DONE; |
1088 | } | 1090 | } |
1089 | 1091 | ||
1092 | spin_lock_irqsave(&ep->com.lock, flags); | ||
1090 | BUG_ON(credits != 1); | 1093 | BUG_ON(credits != 1); |
1091 | dst_confirm(ep->dst); | 1094 | dst_confirm(ep->dst); |
1092 | if (!ep->mpa_skb) { | 1095 | if (!ep->mpa_skb) { |
1093 | PDBG("%s rdma_init wr_ack ep %p state %u\n", | 1096 | PDBG("%s rdma_init wr_ack ep %p state %u\n", |
1094 | __func__, ep, state_read(&ep->com)); | 1097 | __func__, ep, ep->com.state); |
1095 | if (ep->mpa_attr.initiator) { | 1098 | if (ep->mpa_attr.initiator) { |
1096 | PDBG("%s initiator ep %p state %u\n", | 1099 | PDBG("%s initiator ep %p state %u\n", |
1097 | __func__, ep, state_read(&ep->com)); | 1100 | __func__, ep, ep->com.state); |
1098 | if (peer2peer) | 1101 | if (peer2peer && ep->com.state == FPDU_MODE) |
1099 | iwch_post_zb_read(ep->com.qp); | 1102 | post_zb = 1; |
1100 | } else { | 1103 | } else { |
1101 | PDBG("%s responder ep %p state %u\n", | 1104 | PDBG("%s responder ep %p state %u\n", |
1102 | __func__, ep, state_read(&ep->com)); | 1105 | __func__, ep, ep->com.state); |
1103 | ep->com.rpl_done = 1; | 1106 | if (ep->com.state == MPA_REQ_RCVD) { |
1104 | wake_up(&ep->com.waitq); | 1107 | ep->com.rpl_done = 1; |
1108 | wake_up(&ep->com.waitq); | ||
1109 | } | ||
1105 | } | 1110 | } |
1106 | } else { | 1111 | } else { |
1107 | PDBG("%s lsm ack ep %p state %u freeing skb\n", | 1112 | PDBG("%s lsm ack ep %p state %u freeing skb\n", |
1108 | __func__, ep, state_read(&ep->com)); | 1113 | __func__, ep, ep->com.state); |
1109 | kfree_skb(ep->mpa_skb); | 1114 | kfree_skb(ep->mpa_skb); |
1110 | ep->mpa_skb = NULL; | 1115 | ep->mpa_skb = NULL; |
1111 | } | 1116 | } |
1117 | spin_unlock_irqrestore(&ep->com.lock, flags); | ||
1118 | if (post_zb) | ||
1119 | iwch_post_zb_read(ep); | ||
1112 | return CPL_RET_BUF_DONE; | 1120 | return CPL_RET_BUF_DONE; |
1113 | } | 1121 | } |
1114 | 1122 | ||
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.h b/drivers/infiniband/hw/cxgb3/iwch_provider.h index c5406da3f4cd..9a342c9b220d 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_provider.h +++ b/drivers/infiniband/hw/cxgb3/iwch_provider.h | |||
@@ -332,7 +332,7 @@ int iwch_bind_mw(struct ib_qp *qp, | |||
332 | struct ib_mw_bind *mw_bind); | 332 | struct ib_mw_bind *mw_bind); |
333 | int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); | 333 | int iwch_poll_cq(struct ib_cq *ibcq, int num_entries, struct ib_wc *wc); |
334 | int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); | 334 | int iwch_post_terminate(struct iwch_qp *qhp, struct respQ_msg_t *rsp_msg); |
335 | int iwch_post_zb_read(struct iwch_qp *qhp); | 335 | int iwch_post_zb_read(struct iwch_ep *ep); |
336 | int iwch_register_device(struct iwch_dev *dev); | 336 | int iwch_register_device(struct iwch_dev *dev); |
337 | void iwch_unregister_device(struct iwch_dev *dev); | 337 | void iwch_unregister_device(struct iwch_dev *dev); |
338 | void stop_read_rep_timer(struct iwch_qp *qhp); | 338 | void stop_read_rep_timer(struct iwch_qp *qhp); |
diff --git a/drivers/infiniband/hw/cxgb3/iwch_qp.c b/drivers/infiniband/hw/cxgb3/iwch_qp.c index 1b4cd09f74dc..ecd313f359a4 100644 --- a/drivers/infiniband/hw/cxgb3/iwch_qp.c +++ b/drivers/infiniband/hw/cxgb3/iwch_qp.c | |||
@@ -738,7 +738,7 @@ static inline void build_term_codes(struct respQ_msg_t *rsp_msg, | |||
738 | } | 738 | } |
739 | } | 739 | } |
740 | 740 | ||
741 | int iwch_post_zb_read(struct iwch_qp *qhp) | 741 | int iwch_post_zb_read(struct iwch_ep *ep) |
742 | { | 742 | { |
743 | union t3_wr *wqe; | 743 | union t3_wr *wqe; |
744 | struct sk_buff *skb; | 744 | struct sk_buff *skb; |
@@ -761,10 +761,10 @@ int iwch_post_zb_read(struct iwch_qp *qhp) | |||
761 | wqe->read.local_len = cpu_to_be32(0); | 761 | wqe->read.local_len = cpu_to_be32(0); |
762 | wqe->read.local_to = cpu_to_be64(1); | 762 | wqe->read.local_to = cpu_to_be64(1); |
763 | wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ)); | 763 | wqe->send.wrh.op_seop_flags = cpu_to_be32(V_FW_RIWR_OP(T3_WR_READ)); |
764 | wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(qhp->ep->hwtid)| | 764 | wqe->send.wrh.gen_tid_len = cpu_to_be32(V_FW_RIWR_TID(ep->hwtid)| |
765 | V_FW_RIWR_LEN(flit_cnt)); | 765 | V_FW_RIWR_LEN(flit_cnt)); |
766 | skb->priority = CPL_PRIORITY_DATA; | 766 | skb->priority = CPL_PRIORITY_DATA; |
767 | return iwch_cxgb3_ofld_send(qhp->rhp->rdev.t3cdev_p, skb); | 767 | return iwch_cxgb3_ofld_send(ep->com.qp->rhp->rdev.t3cdev_p, skb); |
768 | } | 768 | } |
769 | 769 | ||
770 | /* | 770 | /* |
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h index 35d2a5dd9bb4..4f045375c8e2 100644 --- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h +++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h | |||
@@ -35,7 +35,7 @@ | |||
35 | #include <linux/list.h> | 35 | #include <linux/list.h> |
36 | #include <linux/spinlock.h> | 36 | #include <linux/spinlock.h> |
37 | #include <linux/idr.h> | 37 | #include <linux/idr.h> |
38 | #include <linux/workqueue.h> | 38 | #include <linux/completion.h> |
39 | #include <linux/netdevice.h> | 39 | #include <linux/netdevice.h> |
40 | #include <linux/sched.h> | 40 | #include <linux/sched.h> |
41 | #include <linux/pci.h> | 41 | #include <linux/pci.h> |
@@ -131,28 +131,21 @@ static inline int c4iw_num_stags(struct c4iw_rdev *rdev) | |||
131 | 131 | ||
132 | #define C4IW_WR_TO (10*HZ) | 132 | #define C4IW_WR_TO (10*HZ) |
133 | 133 | ||
134 | enum { | ||
135 | REPLY_READY = 0, | ||
136 | }; | ||
137 | |||
138 | struct c4iw_wr_wait { | 134 | struct c4iw_wr_wait { |
139 | wait_queue_head_t wait; | 135 | struct completion completion; |
140 | unsigned long status; | ||
141 | int ret; | 136 | int ret; |
142 | }; | 137 | }; |
143 | 138 | ||
144 | static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) | 139 | static inline void c4iw_init_wr_wait(struct c4iw_wr_wait *wr_waitp) |
145 | { | 140 | { |
146 | wr_waitp->ret = 0; | 141 | wr_waitp->ret = 0; |
147 | wr_waitp->status = 0; | 142 | init_completion(&wr_waitp->completion); |
148 | init_waitqueue_head(&wr_waitp->wait); | ||
149 | } | 143 | } |
150 | 144 | ||
151 | static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) | 145 | static inline void c4iw_wake_up(struct c4iw_wr_wait *wr_waitp, int ret) |
152 | { | 146 | { |
153 | wr_waitp->ret = ret; | 147 | wr_waitp->ret = ret; |
154 | set_bit(REPLY_READY, &wr_waitp->status); | 148 | complete(&wr_waitp->completion); |
155 | wake_up(&wr_waitp->wait); | ||
156 | } | 149 | } |
157 | 150 | ||
158 | static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, | 151 | static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, |
@@ -164,8 +157,7 @@ static inline int c4iw_wait_for_reply(struct c4iw_rdev *rdev, | |||
164 | int ret; | 157 | int ret; |
165 | 158 | ||
166 | do { | 159 | do { |
167 | ret = wait_event_timeout(wr_waitp->wait, | 160 | ret = wait_for_completion_timeout(&wr_waitp->completion, to); |
168 | test_and_clear_bit(REPLY_READY, &wr_waitp->status), to); | ||
169 | if (!ret) { | 161 | if (!ret) { |
170 | printk(KERN_ERR MOD "%s - Device %s not responding - " | 162 | printk(KERN_ERR MOD "%s - Device %s not responding - " |
171 | "tid %u qpid %u\n", func, | 163 | "tid %u qpid %u\n", func, |
diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c index 13de1192927c..2d668c69f6d9 100644 --- a/drivers/infiniband/hw/nes/nes.c +++ b/drivers/infiniband/hw/nes/nes.c | |||
@@ -1138,7 +1138,9 @@ static ssize_t nes_store_wqm_quanta(struct device_driver *ddp, | |||
1138 | u32 i = 0; | 1138 | u32 i = 0; |
1139 | struct nes_device *nesdev; | 1139 | struct nes_device *nesdev; |
1140 | 1140 | ||
1141 | strict_strtoul(buf, 0, &wqm_quanta_value); | 1141 | if (kstrtoul(buf, 0, &wqm_quanta_value) < 0) |
1142 | return -EINVAL; | ||
1143 | |||
1142 | list_for_each_entry(nesdev, &nes_dev_list, list) { | 1144 | list_for_each_entry(nesdev, &nes_dev_list, list) { |
1143 | if (i == ee_flsh_adapter) { | 1145 | if (i == ee_flsh_adapter) { |
1144 | nesdev->nesadapter->wqm_quanta = wqm_quanta_value; | 1146 | nesdev->nesadapter->wqm_quanta = wqm_quanta_value; |
diff --git a/drivers/infiniband/hw/qib/Kconfig b/drivers/infiniband/hw/qib/Kconfig index 7c03a70c55a2..8349f9c5064c 100644 --- a/drivers/infiniband/hw/qib/Kconfig +++ b/drivers/infiniband/hw/qib/Kconfig | |||
@@ -1,6 +1,6 @@ | |||
1 | config INFINIBAND_QIB | 1 | config INFINIBAND_QIB |
2 | tristate "QLogic PCIe HCA support" | 2 | tristate "QLogic PCIe HCA support" |
3 | depends on 64BIT && NET | 3 | depends on 64BIT |
4 | ---help--- | 4 | ---help--- |
5 | This is a low-level driver for QLogic PCIe QLE InfiniBand host | 5 | This is a low-level driver for QLogic PCIe QLE InfiniBand host |
6 | channel adapters. This driver does not support the QLogic | 6 | channel adapters. This driver does not support the QLogic |
diff --git a/drivers/infiniband/ulp/iser/iser_verbs.c b/drivers/infiniband/ulp/iser/iser_verbs.c index 9876865732f7..ede1475bee09 100644 --- a/drivers/infiniband/ulp/iser/iser_verbs.c +++ b/drivers/infiniband/ulp/iser/iser_verbs.c | |||
@@ -548,7 +548,7 @@ int iser_connect(struct iser_conn *ib_conn, | |||
548 | iser_conn_get(ib_conn); /* ref ib conn's cma id */ | 548 | iser_conn_get(ib_conn); /* ref ib conn's cma id */ |
549 | ib_conn->cma_id = rdma_create_id(iser_cma_handler, | 549 | ib_conn->cma_id = rdma_create_id(iser_cma_handler, |
550 | (void *)ib_conn, | 550 | (void *)ib_conn, |
551 | RDMA_PS_TCP); | 551 | RDMA_PS_TCP, IB_QPT_RC); |
552 | if (IS_ERR(ib_conn->cma_id)) { | 552 | if (IS_ERR(ib_conn->cma_id)) { |
553 | err = PTR_ERR(ib_conn->cma_id); | 553 | err = PTR_ERR(ib_conn->cma_id); |
554 | iser_err("rdma_create_id failed: %d\n", err); | 554 | iser_err("rdma_create_id failed: %d\n", err); |
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c index 376d640487d2..ee165fdcb596 100644 --- a/drivers/infiniband/ulp/srp/ib_srp.c +++ b/drivers/infiniband/ulp/srp/ib_srp.c | |||
@@ -1147,7 +1147,7 @@ static void srp_process_aer_req(struct srp_target_port *target, | |||
1147 | static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) | 1147 | static void srp_handle_recv(struct srp_target_port *target, struct ib_wc *wc) |
1148 | { | 1148 | { |
1149 | struct ib_device *dev = target->srp_host->srp_dev->dev; | 1149 | struct ib_device *dev = target->srp_host->srp_dev->dev; |
1150 | struct srp_iu *iu = (struct srp_iu *) wc->wr_id; | 1150 | struct srp_iu *iu = (struct srp_iu *) (uintptr_t) wc->wr_id; |
1151 | int res; | 1151 | int res; |
1152 | u8 opcode; | 1152 | u8 opcode; |
1153 | 1153 | ||
@@ -1231,7 +1231,7 @@ static void srp_send_completion(struct ib_cq *cq, void *target_ptr) | |||
1231 | break; | 1231 | break; |
1232 | } | 1232 | } |
1233 | 1233 | ||
1234 | iu = (struct srp_iu *) wc.wr_id; | 1234 | iu = (struct srp_iu *) (uintptr_t) wc.wr_id; |
1235 | list_add(&iu->list, &target->free_tx); | 1235 | list_add(&iu->list, &target->free_tx); |
1236 | } | 1236 | } |
1237 | } | 1237 | } |
diff --git a/drivers/input/keyboard/Kconfig b/drivers/input/keyboard/Kconfig index 69badb4e06aa..b4dee9d5a055 100644 --- a/drivers/input/keyboard/Kconfig +++ b/drivers/input/keyboard/Kconfig | |||
@@ -412,6 +412,17 @@ config KEYBOARD_PXA930_ROTARY | |||
412 | To compile this driver as a module, choose M here: the | 412 | To compile this driver as a module, choose M here: the |
413 | module will be called pxa930_rotary. | 413 | module will be called pxa930_rotary. |
414 | 414 | ||
415 | config KEYBOARD_PMIC8XXX | ||
416 | tristate "Qualcomm PMIC8XXX keypad support" | ||
417 | depends on MFD_PM8XXX | ||
418 | help | ||
419 | Say Y here if you want to enable the driver for the PMIC8XXX | ||
420 | keypad provided as a reference design from Qualcomm. This is intended | ||
421 | to support upto 18x8 matrix based keypad design. | ||
422 | |||
423 | To compile this driver as a module, choose M here: the module will | ||
424 | be called pmic8xxx-keypad. | ||
425 | |||
415 | config KEYBOARD_SAMSUNG | 426 | config KEYBOARD_SAMSUNG |
416 | tristate "Samsung keypad support" | 427 | tristate "Samsung keypad support" |
417 | depends on SAMSUNG_DEV_KEYPAD | 428 | depends on SAMSUNG_DEV_KEYPAD |
diff --git a/drivers/input/keyboard/Makefile b/drivers/input/keyboard/Makefile index c49cf8e04cd7..ddde0fd476f7 100644 --- a/drivers/input/keyboard/Makefile +++ b/drivers/input/keyboard/Makefile | |||
@@ -34,6 +34,7 @@ obj-$(CONFIG_KEYBOARD_NOMADIK) += nomadik-ske-keypad.o | |||
34 | obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o | 34 | obj-$(CONFIG_KEYBOARD_OMAP) += omap-keypad.o |
35 | obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o | 35 | obj-$(CONFIG_KEYBOARD_OMAP4) += omap4-keypad.o |
36 | obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o | 36 | obj-$(CONFIG_KEYBOARD_OPENCORES) += opencores-kbd.o |
37 | obj-$(CONFIG_KEYBOARD_PMIC8XXX) += pmic8xxx-keypad.o | ||
37 | obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o | 38 | obj-$(CONFIG_KEYBOARD_PXA27x) += pxa27x_keypad.o |
38 | obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o | 39 | obj-$(CONFIG_KEYBOARD_PXA930_ROTARY) += pxa930_rotary.o |
39 | obj-$(CONFIG_KEYBOARD_QT1070) += qt1070.o | 40 | obj-$(CONFIG_KEYBOARD_QT1070) += qt1070.o |
diff --git a/drivers/input/keyboard/pmic8xxx-keypad.c b/drivers/input/keyboard/pmic8xxx-keypad.c new file mode 100644 index 000000000000..40b02ae96f86 --- /dev/null +++ b/drivers/input/keyboard/pmic8xxx-keypad.c | |||
@@ -0,0 +1,799 @@ | |||
1 | /* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/input.h> | ||
19 | #include <linux/bitops.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/mutex.h> | ||
22 | |||
23 | #include <linux/mfd/pm8xxx/core.h> | ||
24 | #include <linux/mfd/pm8xxx/gpio.h> | ||
25 | #include <linux/input/pmic8xxx-keypad.h> | ||
26 | |||
27 | #define PM8XXX_MAX_ROWS 18 | ||
28 | #define PM8XXX_MAX_COLS 8 | ||
29 | #define PM8XXX_ROW_SHIFT 3 | ||
30 | #define PM8XXX_MATRIX_MAX_SIZE (PM8XXX_MAX_ROWS * PM8XXX_MAX_COLS) | ||
31 | |||
32 | #define PM8XXX_MIN_ROWS 5 | ||
33 | #define PM8XXX_MIN_COLS 5 | ||
34 | |||
35 | #define MAX_SCAN_DELAY 128 | ||
36 | #define MIN_SCAN_DELAY 1 | ||
37 | |||
38 | /* in nanoseconds */ | ||
39 | #define MAX_ROW_HOLD_DELAY 122000 | ||
40 | #define MIN_ROW_HOLD_DELAY 30500 | ||
41 | |||
42 | #define MAX_DEBOUNCE_TIME 20 | ||
43 | #define MIN_DEBOUNCE_TIME 5 | ||
44 | |||
45 | #define KEYP_CTRL 0x148 | ||
46 | |||
47 | #define KEYP_CTRL_EVNTS BIT(0) | ||
48 | #define KEYP_CTRL_EVNTS_MASK 0x3 | ||
49 | |||
50 | #define KEYP_CTRL_SCAN_COLS_SHIFT 5 | ||
51 | #define KEYP_CTRL_SCAN_COLS_MIN 5 | ||
52 | #define KEYP_CTRL_SCAN_COLS_BITS 0x3 | ||
53 | |||
54 | #define KEYP_CTRL_SCAN_ROWS_SHIFT 2 | ||
55 | #define KEYP_CTRL_SCAN_ROWS_MIN 5 | ||
56 | #define KEYP_CTRL_SCAN_ROWS_BITS 0x7 | ||
57 | |||
58 | #define KEYP_CTRL_KEYP_EN BIT(7) | ||
59 | |||
60 | #define KEYP_SCAN 0x149 | ||
61 | |||
62 | #define KEYP_SCAN_READ_STATE BIT(0) | ||
63 | #define KEYP_SCAN_DBOUNCE_SHIFT 1 | ||
64 | #define KEYP_SCAN_PAUSE_SHIFT 3 | ||
65 | #define KEYP_SCAN_ROW_HOLD_SHIFT 6 | ||
66 | |||
67 | #define KEYP_TEST 0x14A | ||
68 | |||
69 | #define KEYP_TEST_CLEAR_RECENT_SCAN BIT(6) | ||
70 | #define KEYP_TEST_CLEAR_OLD_SCAN BIT(5) | ||
71 | #define KEYP_TEST_READ_RESET BIT(4) | ||
72 | #define KEYP_TEST_DTEST_EN BIT(3) | ||
73 | #define KEYP_TEST_ABORT_READ BIT(0) | ||
74 | |||
75 | #define KEYP_TEST_DBG_SELECT_SHIFT 1 | ||
76 | |||
77 | /* bits of these registers represent | ||
78 | * '0' for key press | ||
79 | * '1' for key release | ||
80 | */ | ||
81 | #define KEYP_RECENT_DATA 0x14B | ||
82 | #define KEYP_OLD_DATA 0x14C | ||
83 | |||
84 | #define KEYP_CLOCK_FREQ 32768 | ||
85 | |||
86 | /** | ||
87 | * struct pmic8xxx_kp - internal keypad data structure | ||
88 | * @pdata - keypad platform data pointer | ||
89 | * @input - input device pointer for keypad | ||
90 | * @key_sense_irq - key press/release irq number | ||
91 | * @key_stuck_irq - key stuck notification irq number | ||
92 | * @keycodes - array to hold the key codes | ||
93 | * @dev - parent device pointer | ||
94 | * @keystate - present key press/release state | ||
95 | * @stuckstate - present state when key stuck irq | ||
96 | * @ctrl_reg - control register value | ||
97 | */ | ||
98 | struct pmic8xxx_kp { | ||
99 | const struct pm8xxx_keypad_platform_data *pdata; | ||
100 | struct input_dev *input; | ||
101 | int key_sense_irq; | ||
102 | int key_stuck_irq; | ||
103 | |||
104 | unsigned short keycodes[PM8XXX_MATRIX_MAX_SIZE]; | ||
105 | |||
106 | struct device *dev; | ||
107 | u16 keystate[PM8XXX_MAX_ROWS]; | ||
108 | u16 stuckstate[PM8XXX_MAX_ROWS]; | ||
109 | |||
110 | u8 ctrl_reg; | ||
111 | }; | ||
112 | |||
113 | static int pmic8xxx_kp_write_u8(struct pmic8xxx_kp *kp, | ||
114 | u8 data, u16 reg) | ||
115 | { | ||
116 | int rc; | ||
117 | |||
118 | rc = pm8xxx_writeb(kp->dev->parent, reg, data); | ||
119 | return rc; | ||
120 | } | ||
121 | |||
122 | static int pmic8xxx_kp_read(struct pmic8xxx_kp *kp, | ||
123 | u8 *data, u16 reg, unsigned num_bytes) | ||
124 | { | ||
125 | int rc; | ||
126 | |||
127 | rc = pm8xxx_read_buf(kp->dev->parent, reg, data, num_bytes); | ||
128 | return rc; | ||
129 | } | ||
130 | |||
131 | static int pmic8xxx_kp_read_u8(struct pmic8xxx_kp *kp, | ||
132 | u8 *data, u16 reg) | ||
133 | { | ||
134 | int rc; | ||
135 | |||
136 | rc = pmic8xxx_kp_read(kp, data, reg, 1); | ||
137 | return rc; | ||
138 | } | ||
139 | |||
140 | static u8 pmic8xxx_col_state(struct pmic8xxx_kp *kp, u8 col) | ||
141 | { | ||
142 | /* all keys pressed on that particular row? */ | ||
143 | if (col == 0x00) | ||
144 | return 1 << kp->pdata->num_cols; | ||
145 | else | ||
146 | return col & ((1 << kp->pdata->num_cols) - 1); | ||
147 | } | ||
148 | |||
149 | /* | ||
150 | * Synchronous read protocol for RevB0 onwards: | ||
151 | * | ||
152 | * 1. Write '1' to ReadState bit in KEYP_SCAN register | ||
153 | * 2. Wait 2*32KHz clocks, so that HW can successfully enter read mode | ||
154 | * synchronously | ||
155 | * 3. Read rows in old array first if events are more than one | ||
156 | * 4. Read rows in recent array | ||
157 | * 5. Wait 4*32KHz clocks | ||
158 | * 6. Write '0' to ReadState bit of KEYP_SCAN register so that hw can | ||
159 | * synchronously exit read mode. | ||
160 | */ | ||
161 | static int pmic8xxx_chk_sync_read(struct pmic8xxx_kp *kp) | ||
162 | { | ||
163 | int rc; | ||
164 | u8 scan_val; | ||
165 | |||
166 | rc = pmic8xxx_kp_read_u8(kp, &scan_val, KEYP_SCAN); | ||
167 | if (rc < 0) { | ||
168 | dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc); | ||
169 | return rc; | ||
170 | } | ||
171 | |||
172 | scan_val |= 0x1; | ||
173 | |||
174 | rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN); | ||
175 | if (rc < 0) { | ||
176 | dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc); | ||
177 | return rc; | ||
178 | } | ||
179 | |||
180 | /* 2 * 32KHz clocks */ | ||
181 | udelay((2 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1); | ||
182 | |||
183 | return rc; | ||
184 | } | ||
185 | |||
186 | static int pmic8xxx_kp_read_data(struct pmic8xxx_kp *kp, u16 *state, | ||
187 | u16 data_reg, int read_rows) | ||
188 | { | ||
189 | int rc, row; | ||
190 | u8 new_data[PM8XXX_MAX_ROWS]; | ||
191 | |||
192 | rc = pmic8xxx_kp_read(kp, new_data, data_reg, read_rows); | ||
193 | if (rc) | ||
194 | return rc; | ||
195 | |||
196 | for (row = 0; row < kp->pdata->num_rows; row++) { | ||
197 | dev_dbg(kp->dev, "new_data[%d] = %d\n", row, | ||
198 | new_data[row]); | ||
199 | state[row] = pmic8xxx_col_state(kp, new_data[row]); | ||
200 | } | ||
201 | |||
202 | return rc; | ||
203 | } | ||
204 | |||
205 | static int pmic8xxx_kp_read_matrix(struct pmic8xxx_kp *kp, u16 *new_state, | ||
206 | u16 *old_state) | ||
207 | { | ||
208 | int rc, read_rows; | ||
209 | u8 scan_val; | ||
210 | |||
211 | if (kp->pdata->num_rows < PM8XXX_MIN_ROWS) | ||
212 | read_rows = PM8XXX_MIN_ROWS; | ||
213 | else | ||
214 | read_rows = kp->pdata->num_rows; | ||
215 | |||
216 | pmic8xxx_chk_sync_read(kp); | ||
217 | |||
218 | if (old_state) { | ||
219 | rc = pmic8xxx_kp_read_data(kp, old_state, KEYP_OLD_DATA, | ||
220 | read_rows); | ||
221 | if (rc < 0) { | ||
222 | dev_err(kp->dev, | ||
223 | "Error reading KEYP_OLD_DATA, rc=%d\n", rc); | ||
224 | return rc; | ||
225 | } | ||
226 | } | ||
227 | |||
228 | rc = pmic8xxx_kp_read_data(kp, new_state, KEYP_RECENT_DATA, | ||
229 | read_rows); | ||
230 | if (rc < 0) { | ||
231 | dev_err(kp->dev, | ||
232 | "Error reading KEYP_RECENT_DATA, rc=%d\n", rc); | ||
233 | return rc; | ||
234 | } | ||
235 | |||
236 | /* 4 * 32KHz clocks */ | ||
237 | udelay((4 * DIV_ROUND_UP(USEC_PER_SEC, KEYP_CLOCK_FREQ)) + 1); | ||
238 | |||
239 | rc = pmic8xxx_kp_read_u8(kp, &scan_val, KEYP_SCAN); | ||
240 | if (rc < 0) { | ||
241 | dev_err(kp->dev, "Error reading KEYP_SCAN reg, rc=%d\n", rc); | ||
242 | return rc; | ||
243 | } | ||
244 | |||
245 | scan_val &= 0xFE; | ||
246 | rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN); | ||
247 | if (rc < 0) | ||
248 | dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc); | ||
249 | |||
250 | return rc; | ||
251 | } | ||
252 | |||
253 | static void __pmic8xxx_kp_scan_matrix(struct pmic8xxx_kp *kp, u16 *new_state, | ||
254 | u16 *old_state) | ||
255 | { | ||
256 | int row, col, code; | ||
257 | |||
258 | for (row = 0; row < kp->pdata->num_rows; row++) { | ||
259 | int bits_changed = new_state[row] ^ old_state[row]; | ||
260 | |||
261 | if (!bits_changed) | ||
262 | continue; | ||
263 | |||
264 | for (col = 0; col < kp->pdata->num_cols; col++) { | ||
265 | if (!(bits_changed & (1 << col))) | ||
266 | continue; | ||
267 | |||
268 | dev_dbg(kp->dev, "key [%d:%d] %s\n", row, col, | ||
269 | !(new_state[row] & (1 << col)) ? | ||
270 | "pressed" : "released"); | ||
271 | |||
272 | code = MATRIX_SCAN_CODE(row, col, PM8XXX_ROW_SHIFT); | ||
273 | |||
274 | input_event(kp->input, EV_MSC, MSC_SCAN, code); | ||
275 | input_report_key(kp->input, | ||
276 | kp->keycodes[code], | ||
277 | !(new_state[row] & (1 << col))); | ||
278 | |||
279 | input_sync(kp->input); | ||
280 | } | ||
281 | } | ||
282 | } | ||
283 | |||
284 | static bool pmic8xxx_detect_ghost_keys(struct pmic8xxx_kp *kp, u16 *new_state) | ||
285 | { | ||
286 | int row, found_first = -1; | ||
287 | u16 check, row_state; | ||
288 | |||
289 | check = 0; | ||
290 | for (row = 0; row < kp->pdata->num_rows; row++) { | ||
291 | row_state = (~new_state[row]) & | ||
292 | ((1 << kp->pdata->num_cols) - 1); | ||
293 | |||
294 | if (hweight16(row_state) > 1) { | ||
295 | if (found_first == -1) | ||
296 | found_first = row; | ||
297 | if (check & row_state) { | ||
298 | dev_dbg(kp->dev, "detected ghost key on row[%d]" | ||
299 | " and row[%d]\n", found_first, row); | ||
300 | return true; | ||
301 | } | ||
302 | } | ||
303 | check |= row_state; | ||
304 | } | ||
305 | return false; | ||
306 | } | ||
307 | |||
308 | static int pmic8xxx_kp_scan_matrix(struct pmic8xxx_kp *kp, unsigned int events) | ||
309 | { | ||
310 | u16 new_state[PM8XXX_MAX_ROWS]; | ||
311 | u16 old_state[PM8XXX_MAX_ROWS]; | ||
312 | int rc; | ||
313 | |||
314 | switch (events) { | ||
315 | case 0x1: | ||
316 | rc = pmic8xxx_kp_read_matrix(kp, new_state, NULL); | ||
317 | if (rc < 0) | ||
318 | return rc; | ||
319 | |||
320 | /* detecting ghost key is not an error */ | ||
321 | if (pmic8xxx_detect_ghost_keys(kp, new_state)) | ||
322 | return 0; | ||
323 | __pmic8xxx_kp_scan_matrix(kp, new_state, kp->keystate); | ||
324 | memcpy(kp->keystate, new_state, sizeof(new_state)); | ||
325 | break; | ||
326 | case 0x3: /* two events - eventcounter is gray-coded */ | ||
327 | rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state); | ||
328 | if (rc < 0) | ||
329 | return rc; | ||
330 | |||
331 | __pmic8xxx_kp_scan_matrix(kp, old_state, kp->keystate); | ||
332 | __pmic8xxx_kp_scan_matrix(kp, new_state, old_state); | ||
333 | memcpy(kp->keystate, new_state, sizeof(new_state)); | ||
334 | break; | ||
335 | case 0x2: | ||
336 | dev_dbg(kp->dev, "Some key events were lost\n"); | ||
337 | rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state); | ||
338 | if (rc < 0) | ||
339 | return rc; | ||
340 | __pmic8xxx_kp_scan_matrix(kp, old_state, kp->keystate); | ||
341 | __pmic8xxx_kp_scan_matrix(kp, new_state, old_state); | ||
342 | memcpy(kp->keystate, new_state, sizeof(new_state)); | ||
343 | break; | ||
344 | default: | ||
345 | rc = -EINVAL; | ||
346 | } | ||
347 | return rc; | ||
348 | } | ||
349 | |||
350 | /* | ||
351 | * NOTE: We are reading recent and old data registers blindly | ||
352 | * whenever key-stuck interrupt happens, because events counter doesn't | ||
353 | * get updated when this interrupt happens due to key stuck doesn't get | ||
354 | * considered as key state change. | ||
355 | * | ||
356 | * We are not using old data register contents after they are being read | ||
357 | * because it might report the key which was pressed before the key being stuck | ||
358 | * as stuck key because it's pressed status is stored in the old data | ||
359 | * register. | ||
360 | */ | ||
361 | static irqreturn_t pmic8xxx_kp_stuck_irq(int irq, void *data) | ||
362 | { | ||
363 | u16 new_state[PM8XXX_MAX_ROWS]; | ||
364 | u16 old_state[PM8XXX_MAX_ROWS]; | ||
365 | int rc; | ||
366 | struct pmic8xxx_kp *kp = data; | ||
367 | |||
368 | rc = pmic8xxx_kp_read_matrix(kp, new_state, old_state); | ||
369 | if (rc < 0) { | ||
370 | dev_err(kp->dev, "failed to read keypad matrix\n"); | ||
371 | return IRQ_HANDLED; | ||
372 | } | ||
373 | |||
374 | __pmic8xxx_kp_scan_matrix(kp, new_state, kp->stuckstate); | ||
375 | |||
376 | return IRQ_HANDLED; | ||
377 | } | ||
378 | |||
379 | static irqreturn_t pmic8xxx_kp_irq(int irq, void *data) | ||
380 | { | ||
381 | struct pmic8xxx_kp *kp = data; | ||
382 | u8 ctrl_val, events; | ||
383 | int rc; | ||
384 | |||
385 | rc = pmic8xxx_kp_read(kp, &ctrl_val, KEYP_CTRL, 1); | ||
386 | if (rc < 0) { | ||
387 | dev_err(kp->dev, "failed to read keyp_ctrl register\n"); | ||
388 | return IRQ_HANDLED; | ||
389 | } | ||
390 | |||
391 | events = ctrl_val & KEYP_CTRL_EVNTS_MASK; | ||
392 | |||
393 | rc = pmic8xxx_kp_scan_matrix(kp, events); | ||
394 | if (rc < 0) | ||
395 | dev_err(kp->dev, "failed to scan matrix\n"); | ||
396 | |||
397 | return IRQ_HANDLED; | ||
398 | } | ||
399 | |||
400 | static int __devinit pmic8xxx_kpd_init(struct pmic8xxx_kp *kp) | ||
401 | { | ||
402 | int bits, rc, cycles; | ||
403 | u8 scan_val = 0, ctrl_val = 0; | ||
404 | static const u8 row_bits[] = { | ||
405 | 0, 1, 2, 3, 4, 4, 5, 5, 6, 6, 6, 7, 7, 7, | ||
406 | }; | ||
407 | |||
408 | /* Find column bits */ | ||
409 | if (kp->pdata->num_cols < KEYP_CTRL_SCAN_COLS_MIN) | ||
410 | bits = 0; | ||
411 | else | ||
412 | bits = kp->pdata->num_cols - KEYP_CTRL_SCAN_COLS_MIN; | ||
413 | ctrl_val = (bits & KEYP_CTRL_SCAN_COLS_BITS) << | ||
414 | KEYP_CTRL_SCAN_COLS_SHIFT; | ||
415 | |||
416 | /* Find row bits */ | ||
417 | if (kp->pdata->num_rows < KEYP_CTRL_SCAN_ROWS_MIN) | ||
418 | bits = 0; | ||
419 | else | ||
420 | bits = row_bits[kp->pdata->num_rows - KEYP_CTRL_SCAN_ROWS_MIN]; | ||
421 | |||
422 | ctrl_val |= (bits << KEYP_CTRL_SCAN_ROWS_SHIFT); | ||
423 | |||
424 | rc = pmic8xxx_kp_write_u8(kp, ctrl_val, KEYP_CTRL); | ||
425 | if (rc < 0) { | ||
426 | dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc); | ||
427 | return rc; | ||
428 | } | ||
429 | |||
430 | bits = (kp->pdata->debounce_ms / 5) - 1; | ||
431 | |||
432 | scan_val |= (bits << KEYP_SCAN_DBOUNCE_SHIFT); | ||
433 | |||
434 | bits = fls(kp->pdata->scan_delay_ms) - 1; | ||
435 | scan_val |= (bits << KEYP_SCAN_PAUSE_SHIFT); | ||
436 | |||
437 | /* Row hold time is a multiple of 32KHz cycles. */ | ||
438 | cycles = (kp->pdata->row_hold_ns * KEYP_CLOCK_FREQ) / NSEC_PER_SEC; | ||
439 | |||
440 | scan_val |= (cycles << KEYP_SCAN_ROW_HOLD_SHIFT); | ||
441 | |||
442 | rc = pmic8xxx_kp_write_u8(kp, scan_val, KEYP_SCAN); | ||
443 | if (rc) | ||
444 | dev_err(kp->dev, "Error writing KEYP_SCAN reg, rc=%d\n", rc); | ||
445 | |||
446 | return rc; | ||
447 | |||
448 | } | ||
449 | |||
450 | static int __devinit pmic8xxx_kp_config_gpio(int gpio_start, int num_gpios, | ||
451 | struct pmic8xxx_kp *kp, struct pm_gpio *gpio_config) | ||
452 | { | ||
453 | int rc, i; | ||
454 | |||
455 | if (gpio_start < 0 || num_gpios < 0) | ||
456 | return -EINVAL; | ||
457 | |||
458 | for (i = 0; i < num_gpios; i++) { | ||
459 | rc = pm8xxx_gpio_config(gpio_start + i, gpio_config); | ||
460 | if (rc) { | ||
461 | dev_err(kp->dev, "%s: FAIL pm8xxx_gpio_config():" | ||
462 | "for PM GPIO [%d] rc=%d.\n", | ||
463 | __func__, gpio_start + i, rc); | ||
464 | return rc; | ||
465 | } | ||
466 | } | ||
467 | |||
468 | return 0; | ||
469 | } | ||
470 | |||
471 | static int pmic8xxx_kp_enable(struct pmic8xxx_kp *kp) | ||
472 | { | ||
473 | int rc; | ||
474 | |||
475 | kp->ctrl_reg |= KEYP_CTRL_KEYP_EN; | ||
476 | |||
477 | rc = pmic8xxx_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL); | ||
478 | if (rc < 0) | ||
479 | dev_err(kp->dev, "Error writing KEYP_CTRL reg, rc=%d\n", rc); | ||
480 | |||
481 | return rc; | ||
482 | } | ||
483 | |||
484 | static int pmic8xxx_kp_disable(struct pmic8xxx_kp *kp) | ||
485 | { | ||
486 | int rc; | ||
487 | |||
488 | kp->ctrl_reg &= ~KEYP_CTRL_KEYP_EN; | ||
489 | |||
490 | rc = pmic8xxx_kp_write_u8(kp, kp->ctrl_reg, KEYP_CTRL); | ||
491 | if (rc < 0) | ||
492 | return rc; | ||
493 | |||
494 | return rc; | ||
495 | } | ||
496 | |||
497 | static int pmic8xxx_kp_open(struct input_dev *dev) | ||
498 | { | ||
499 | struct pmic8xxx_kp *kp = input_get_drvdata(dev); | ||
500 | |||
501 | return pmic8xxx_kp_enable(kp); | ||
502 | } | ||
503 | |||
504 | static void pmic8xxx_kp_close(struct input_dev *dev) | ||
505 | { | ||
506 | struct pmic8xxx_kp *kp = input_get_drvdata(dev); | ||
507 | |||
508 | pmic8xxx_kp_disable(kp); | ||
509 | } | ||
510 | |||
511 | /* | ||
512 | * keypad controller should be initialized in the following sequence | ||
513 | * only, otherwise it might get into FSM stuck state. | ||
514 | * | ||
515 | * - Initialize keypad control parameters, like no. of rows, columns, | ||
516 | * timing values etc., | ||
517 | * - configure rows and column gpios pull up/down. | ||
518 | * - set irq edge type. | ||
519 | * - enable the keypad controller. | ||
520 | */ | ||
521 | static int __devinit pmic8xxx_kp_probe(struct platform_device *pdev) | ||
522 | { | ||
523 | const struct pm8xxx_keypad_platform_data *pdata = mfd_get_data(pdev); | ||
524 | const struct matrix_keymap_data *keymap_data; | ||
525 | struct pmic8xxx_kp *kp; | ||
526 | int rc; | ||
527 | u8 ctrl_val; | ||
528 | |||
529 | struct pm_gpio kypd_drv = { | ||
530 | .direction = PM_GPIO_DIR_OUT, | ||
531 | .output_buffer = PM_GPIO_OUT_BUF_OPEN_DRAIN, | ||
532 | .output_value = 0, | ||
533 | .pull = PM_GPIO_PULL_NO, | ||
534 | .vin_sel = PM_GPIO_VIN_S3, | ||
535 | .out_strength = PM_GPIO_STRENGTH_LOW, | ||
536 | .function = PM_GPIO_FUNC_1, | ||
537 | .inv_int_pol = 1, | ||
538 | }; | ||
539 | |||
540 | struct pm_gpio kypd_sns = { | ||
541 | .direction = PM_GPIO_DIR_IN, | ||
542 | .pull = PM_GPIO_PULL_UP_31P5, | ||
543 | .vin_sel = PM_GPIO_VIN_S3, | ||
544 | .out_strength = PM_GPIO_STRENGTH_NO, | ||
545 | .function = PM_GPIO_FUNC_NORMAL, | ||
546 | .inv_int_pol = 1, | ||
547 | }; | ||
548 | |||
549 | |||
550 | if (!pdata || !pdata->num_cols || !pdata->num_rows || | ||
551 | pdata->num_cols > PM8XXX_MAX_COLS || | ||
552 | pdata->num_rows > PM8XXX_MAX_ROWS || | ||
553 | pdata->num_cols < PM8XXX_MIN_COLS) { | ||
554 | dev_err(&pdev->dev, "invalid platform data\n"); | ||
555 | return -EINVAL; | ||
556 | } | ||
557 | |||
558 | if (!pdata->scan_delay_ms || | ||
559 | pdata->scan_delay_ms > MAX_SCAN_DELAY || | ||
560 | pdata->scan_delay_ms < MIN_SCAN_DELAY || | ||
561 | !is_power_of_2(pdata->scan_delay_ms)) { | ||
562 | dev_err(&pdev->dev, "invalid keypad scan time supplied\n"); | ||
563 | return -EINVAL; | ||
564 | } | ||
565 | |||
566 | if (!pdata->row_hold_ns || | ||
567 | pdata->row_hold_ns > MAX_ROW_HOLD_DELAY || | ||
568 | pdata->row_hold_ns < MIN_ROW_HOLD_DELAY || | ||
569 | ((pdata->row_hold_ns % MIN_ROW_HOLD_DELAY) != 0)) { | ||
570 | dev_err(&pdev->dev, "invalid keypad row hold time supplied\n"); | ||
571 | return -EINVAL; | ||
572 | } | ||
573 | |||
574 | if (!pdata->debounce_ms || | ||
575 | ((pdata->debounce_ms % 5) != 0) || | ||
576 | pdata->debounce_ms > MAX_DEBOUNCE_TIME || | ||
577 | pdata->debounce_ms < MIN_DEBOUNCE_TIME) { | ||
578 | dev_err(&pdev->dev, "invalid debounce time supplied\n"); | ||
579 | return -EINVAL; | ||
580 | } | ||
581 | |||
582 | keymap_data = pdata->keymap_data; | ||
583 | if (!keymap_data) { | ||
584 | dev_err(&pdev->dev, "no keymap data supplied\n"); | ||
585 | return -EINVAL; | ||
586 | } | ||
587 | |||
588 | kp = kzalloc(sizeof(*kp), GFP_KERNEL); | ||
589 | if (!kp) | ||
590 | return -ENOMEM; | ||
591 | |||
592 | platform_set_drvdata(pdev, kp); | ||
593 | |||
594 | kp->pdata = pdata; | ||
595 | kp->dev = &pdev->dev; | ||
596 | |||
597 | kp->input = input_allocate_device(); | ||
598 | if (!kp->input) { | ||
599 | dev_err(&pdev->dev, "unable to allocate input device\n"); | ||
600 | rc = -ENOMEM; | ||
601 | goto err_alloc_device; | ||
602 | } | ||
603 | |||
604 | kp->key_sense_irq = platform_get_irq(pdev, 0); | ||
605 | if (kp->key_sense_irq < 0) { | ||
606 | dev_err(&pdev->dev, "unable to get keypad sense irq\n"); | ||
607 | rc = -ENXIO; | ||
608 | goto err_get_irq; | ||
609 | } | ||
610 | |||
611 | kp->key_stuck_irq = platform_get_irq(pdev, 1); | ||
612 | if (kp->key_stuck_irq < 0) { | ||
613 | dev_err(&pdev->dev, "unable to get keypad stuck irq\n"); | ||
614 | rc = -ENXIO; | ||
615 | goto err_get_irq; | ||
616 | } | ||
617 | |||
618 | kp->input->name = pdata->input_name ? : "PMIC8XXX keypad"; | ||
619 | kp->input->phys = pdata->input_phys_device ? : "pmic8xxx_keypad/input0"; | ||
620 | |||
621 | kp->input->dev.parent = &pdev->dev; | ||
622 | |||
623 | kp->input->id.bustype = BUS_I2C; | ||
624 | kp->input->id.version = 0x0001; | ||
625 | kp->input->id.product = 0x0001; | ||
626 | kp->input->id.vendor = 0x0001; | ||
627 | |||
628 | kp->input->evbit[0] = BIT_MASK(EV_KEY); | ||
629 | |||
630 | if (pdata->rep) | ||
631 | __set_bit(EV_REP, kp->input->evbit); | ||
632 | |||
633 | kp->input->keycode = kp->keycodes; | ||
634 | kp->input->keycodemax = PM8XXX_MATRIX_MAX_SIZE; | ||
635 | kp->input->keycodesize = sizeof(kp->keycodes); | ||
636 | kp->input->open = pmic8xxx_kp_open; | ||
637 | kp->input->close = pmic8xxx_kp_close; | ||
638 | |||
639 | matrix_keypad_build_keymap(keymap_data, PM8XXX_ROW_SHIFT, | ||
640 | kp->input->keycode, kp->input->keybit); | ||
641 | |||
642 | input_set_capability(kp->input, EV_MSC, MSC_SCAN); | ||
643 | input_set_drvdata(kp->input, kp); | ||
644 | |||
645 | /* initialize keypad state */ | ||
646 | memset(kp->keystate, 0xff, sizeof(kp->keystate)); | ||
647 | memset(kp->stuckstate, 0xff, sizeof(kp->stuckstate)); | ||
648 | |||
649 | rc = pmic8xxx_kpd_init(kp); | ||
650 | if (rc < 0) { | ||
651 | dev_err(&pdev->dev, "unable to initialize keypad controller\n"); | ||
652 | goto err_get_irq; | ||
653 | } | ||
654 | |||
655 | rc = pmic8xxx_kp_config_gpio(pdata->cols_gpio_start, | ||
656 | pdata->num_cols, kp, &kypd_sns); | ||
657 | if (rc < 0) { | ||
658 | dev_err(&pdev->dev, "unable to configure keypad sense lines\n"); | ||
659 | goto err_gpio_config; | ||
660 | } | ||
661 | |||
662 | rc = pmic8xxx_kp_config_gpio(pdata->rows_gpio_start, | ||
663 | pdata->num_rows, kp, &kypd_drv); | ||
664 | if (rc < 0) { | ||
665 | dev_err(&pdev->dev, "unable to configure keypad drive lines\n"); | ||
666 | goto err_gpio_config; | ||
667 | } | ||
668 | |||
669 | rc = request_any_context_irq(kp->key_sense_irq, pmic8xxx_kp_irq, | ||
670 | IRQF_TRIGGER_RISING, "pmic-keypad", kp); | ||
671 | if (rc < 0) { | ||
672 | dev_err(&pdev->dev, "failed to request keypad sense irq\n"); | ||
673 | goto err_get_irq; | ||
674 | } | ||
675 | |||
676 | rc = request_any_context_irq(kp->key_stuck_irq, pmic8xxx_kp_stuck_irq, | ||
677 | IRQF_TRIGGER_RISING, "pmic-keypad-stuck", kp); | ||
678 | if (rc < 0) { | ||
679 | dev_err(&pdev->dev, "failed to request keypad stuck irq\n"); | ||
680 | goto err_req_stuck_irq; | ||
681 | } | ||
682 | |||
683 | rc = pmic8xxx_kp_read_u8(kp, &ctrl_val, KEYP_CTRL); | ||
684 | if (rc < 0) { | ||
685 | dev_err(&pdev->dev, "failed to read KEYP_CTRL register\n"); | ||
686 | goto err_pmic_reg_read; | ||
687 | } | ||
688 | |||
689 | kp->ctrl_reg = ctrl_val; | ||
690 | |||
691 | rc = input_register_device(kp->input); | ||
692 | if (rc < 0) { | ||
693 | dev_err(&pdev->dev, "unable to register keypad input device\n"); | ||
694 | goto err_pmic_reg_read; | ||
695 | } | ||
696 | |||
697 | device_init_wakeup(&pdev->dev, pdata->wakeup); | ||
698 | |||
699 | return 0; | ||
700 | |||
701 | err_pmic_reg_read: | ||
702 | free_irq(kp->key_stuck_irq, NULL); | ||
703 | err_req_stuck_irq: | ||
704 | free_irq(kp->key_sense_irq, NULL); | ||
705 | err_gpio_config: | ||
706 | err_get_irq: | ||
707 | input_free_device(kp->input); | ||
708 | err_alloc_device: | ||
709 | platform_set_drvdata(pdev, NULL); | ||
710 | kfree(kp); | ||
711 | return rc; | ||
712 | } | ||
713 | |||
714 | static int __devexit pmic8xxx_kp_remove(struct platform_device *pdev) | ||
715 | { | ||
716 | struct pmic8xxx_kp *kp = platform_get_drvdata(pdev); | ||
717 | |||
718 | device_init_wakeup(&pdev->dev, 0); | ||
719 | free_irq(kp->key_stuck_irq, NULL); | ||
720 | free_irq(kp->key_sense_irq, NULL); | ||
721 | input_unregister_device(kp->input); | ||
722 | kfree(kp); | ||
723 | |||
724 | platform_set_drvdata(pdev, NULL); | ||
725 | return 0; | ||
726 | } | ||
727 | |||
728 | #ifdef CONFIG_PM_SLEEP | ||
729 | static int pmic8xxx_kp_suspend(struct device *dev) | ||
730 | { | ||
731 | struct platform_device *pdev = to_platform_device(dev); | ||
732 | struct pmic8xxx_kp *kp = platform_get_drvdata(pdev); | ||
733 | struct input_dev *input_dev = kp->input; | ||
734 | |||
735 | if (device_may_wakeup(dev)) { | ||
736 | enable_irq_wake(kp->key_sense_irq); | ||
737 | } else { | ||
738 | mutex_lock(&input_dev->mutex); | ||
739 | |||
740 | if (input_dev->users) | ||
741 | pmic8xxx_kp_disable(kp); | ||
742 | |||
743 | mutex_unlock(&input_dev->mutex); | ||
744 | } | ||
745 | |||
746 | return 0; | ||
747 | } | ||
748 | |||
749 | static int pmic8xxx_kp_resume(struct device *dev) | ||
750 | { | ||
751 | struct platform_device *pdev = to_platform_device(dev); | ||
752 | struct pmic8xxx_kp *kp = platform_get_drvdata(pdev); | ||
753 | struct input_dev *input_dev = kp->input; | ||
754 | |||
755 | if (device_may_wakeup(dev)) { | ||
756 | disable_irq_wake(kp->key_sense_irq); | ||
757 | } else { | ||
758 | mutex_lock(&input_dev->mutex); | ||
759 | |||
760 | if (input_dev->users) | ||
761 | pmic8xxx_kp_enable(kp); | ||
762 | |||
763 | mutex_unlock(&input_dev->mutex); | ||
764 | } | ||
765 | |||
766 | return 0; | ||
767 | } | ||
768 | #endif | ||
769 | |||
770 | static SIMPLE_DEV_PM_OPS(pm8xxx_kp_pm_ops, | ||
771 | pmic8xxx_kp_suspend, pmic8xxx_kp_resume); | ||
772 | |||
773 | static struct platform_driver pmic8xxx_kp_driver = { | ||
774 | .probe = pmic8xxx_kp_probe, | ||
775 | .remove = __devexit_p(pmic8xxx_kp_remove), | ||
776 | .driver = { | ||
777 | .name = PM8XXX_KEYPAD_DEV_NAME, | ||
778 | .owner = THIS_MODULE, | ||
779 | .pm = &pm8xxx_kp_pm_ops, | ||
780 | }, | ||
781 | }; | ||
782 | |||
783 | static int __init pmic8xxx_kp_init(void) | ||
784 | { | ||
785 | return platform_driver_register(&pmic8xxx_kp_driver); | ||
786 | } | ||
787 | module_init(pmic8xxx_kp_init); | ||
788 | |||
789 | static void __exit pmic8xxx_kp_exit(void) | ||
790 | { | ||
791 | platform_driver_unregister(&pmic8xxx_kp_driver); | ||
792 | } | ||
793 | module_exit(pmic8xxx_kp_exit); | ||
794 | |||
795 | MODULE_LICENSE("GPL v2"); | ||
796 | MODULE_DESCRIPTION("PMIC8XXX keypad driver"); | ||
797 | MODULE_VERSION("1.0"); | ||
798 | MODULE_ALIAS("platform:pmic8xxx_keypad"); | ||
799 | MODULE_AUTHOR("Trilok Soni <tsoni@codeaurora.org>"); | ||
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig index f9cf0881b0e3..45dc6aa62ba4 100644 --- a/drivers/input/misc/Kconfig +++ b/drivers/input/misc/Kconfig | |||
@@ -330,6 +330,17 @@ config INPUT_PWM_BEEPER | |||
330 | To compile this driver as a module, choose M here: the module will be | 330 | To compile this driver as a module, choose M here: the module will be |
331 | called pwm-beeper. | 331 | called pwm-beeper. |
332 | 332 | ||
333 | config INPUT_PMIC8XXX_PWRKEY | ||
334 | tristate "PMIC8XXX power key support" | ||
335 | depends on MFD_PM8XXX | ||
336 | help | ||
337 | Say Y here if you want support for the PMIC8XXX power key. | ||
338 | |||
339 | If unsure, say N. | ||
340 | |||
341 | To compile this driver as a module, choose M here: the | ||
342 | module will be called pmic8xxx-pwrkey. | ||
343 | |||
333 | config INPUT_GPIO_ROTARY_ENCODER | 344 | config INPUT_GPIO_ROTARY_ENCODER |
334 | tristate "Rotary encoders connected to GPIO pins" | 345 | tristate "Rotary encoders connected to GPIO pins" |
335 | depends on GPIOLIB && GENERIC_GPIO | 346 | depends on GPIOLIB && GENERIC_GPIO |
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile index e3f7984e6274..38efb2cb182b 100644 --- a/drivers/input/misc/Makefile +++ b/drivers/input/misc/Makefile | |||
@@ -33,6 +33,7 @@ obj-$(CONFIG_INPUT_PCF8574) += pcf8574_keypad.o | |||
33 | obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o | 33 | obj-$(CONFIG_INPUT_PCSPKR) += pcspkr.o |
34 | obj-$(CONFIG_INPUT_POWERMATE) += powermate.o | 34 | obj-$(CONFIG_INPUT_POWERMATE) += powermate.o |
35 | obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o | 35 | obj-$(CONFIG_INPUT_PWM_BEEPER) += pwm-beeper.o |
36 | obj-$(CONFIG_INPUT_PMIC8XXX_PWRKEY) += pmic8xxx-pwrkey.o | ||
36 | obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o | 37 | obj-$(CONFIG_INPUT_RB532_BUTTON) += rb532_button.o |
37 | obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o | 38 | obj-$(CONFIG_INPUT_GPIO_ROTARY_ENCODER) += rotary_encoder.o |
38 | obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o | 39 | obj-$(CONFIG_INPUT_SGI_BTNS) += sgi_btns.o |
diff --git a/drivers/input/misc/pmic8xxx-pwrkey.c b/drivers/input/misc/pmic8xxx-pwrkey.c new file mode 100644 index 000000000000..97e07e786e41 --- /dev/null +++ b/drivers/input/misc/pmic8xxx-pwrkey.c | |||
@@ -0,0 +1,231 @@ | |||
1 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | */ | ||
12 | |||
13 | #include <linux/module.h> | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/errno.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/input.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/log2.h> | ||
22 | |||
23 | #include <linux/mfd/pm8xxx/core.h> | ||
24 | #include <linux/input/pmic8xxx-pwrkey.h> | ||
25 | |||
26 | #define PON_CNTL_1 0x1C | ||
27 | #define PON_CNTL_PULL_UP BIT(7) | ||
28 | #define PON_CNTL_TRIG_DELAY_MASK (0x7) | ||
29 | |||
30 | /** | ||
31 | * struct pmic8xxx_pwrkey - pmic8xxx pwrkey information | ||
32 | * @key_press_irq: key press irq number | ||
33 | */ | ||
34 | struct pmic8xxx_pwrkey { | ||
35 | struct input_dev *pwr; | ||
36 | int key_press_irq; | ||
37 | }; | ||
38 | |||
39 | static irqreturn_t pwrkey_press_irq(int irq, void *_pwrkey) | ||
40 | { | ||
41 | struct pmic8xxx_pwrkey *pwrkey = _pwrkey; | ||
42 | |||
43 | input_report_key(pwrkey->pwr, KEY_POWER, 1); | ||
44 | input_sync(pwrkey->pwr); | ||
45 | |||
46 | return IRQ_HANDLED; | ||
47 | } | ||
48 | |||
49 | static irqreturn_t pwrkey_release_irq(int irq, void *_pwrkey) | ||
50 | { | ||
51 | struct pmic8xxx_pwrkey *pwrkey = _pwrkey; | ||
52 | |||
53 | input_report_key(pwrkey->pwr, KEY_POWER, 0); | ||
54 | input_sync(pwrkey->pwr); | ||
55 | |||
56 | return IRQ_HANDLED; | ||
57 | } | ||
58 | |||
59 | #ifdef CONFIG_PM_SLEEP | ||
60 | static int pmic8xxx_pwrkey_suspend(struct device *dev) | ||
61 | { | ||
62 | struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev); | ||
63 | |||
64 | if (device_may_wakeup(dev)) | ||
65 | enable_irq_wake(pwrkey->key_press_irq); | ||
66 | |||
67 | return 0; | ||
68 | } | ||
69 | |||
70 | static int pmic8xxx_pwrkey_resume(struct device *dev) | ||
71 | { | ||
72 | struct pmic8xxx_pwrkey *pwrkey = dev_get_drvdata(dev); | ||
73 | |||
74 | if (device_may_wakeup(dev)) | ||
75 | disable_irq_wake(pwrkey->key_press_irq); | ||
76 | |||
77 | return 0; | ||
78 | } | ||
79 | #endif | ||
80 | |||
81 | static SIMPLE_DEV_PM_OPS(pm8xxx_pwr_key_pm_ops, | ||
82 | pmic8xxx_pwrkey_suspend, pmic8xxx_pwrkey_resume); | ||
83 | |||
84 | static int __devinit pmic8xxx_pwrkey_probe(struct platform_device *pdev) | ||
85 | { | ||
86 | struct input_dev *pwr; | ||
87 | int key_release_irq = platform_get_irq(pdev, 0); | ||
88 | int key_press_irq = platform_get_irq(pdev, 1); | ||
89 | int err; | ||
90 | unsigned int delay; | ||
91 | u8 pon_cntl; | ||
92 | struct pmic8xxx_pwrkey *pwrkey; | ||
93 | const struct pm8xxx_pwrkey_platform_data *pdata = mfd_get_data(pdev); | ||
94 | |||
95 | if (!pdata) { | ||
96 | dev_err(&pdev->dev, "power key platform data not supplied\n"); | ||
97 | return -EINVAL; | ||
98 | } | ||
99 | |||
100 | if (pdata->kpd_trigger_delay_us > 62500) { | ||
101 | dev_err(&pdev->dev, "invalid power key trigger delay\n"); | ||
102 | return -EINVAL; | ||
103 | } | ||
104 | |||
105 | pwrkey = kzalloc(sizeof(*pwrkey), GFP_KERNEL); | ||
106 | if (!pwrkey) | ||
107 | return -ENOMEM; | ||
108 | |||
109 | pwr = input_allocate_device(); | ||
110 | if (!pwr) { | ||
111 | dev_dbg(&pdev->dev, "Can't allocate power button\n"); | ||
112 | err = -ENOMEM; | ||
113 | goto free_pwrkey; | ||
114 | } | ||
115 | |||
116 | input_set_capability(pwr, EV_KEY, KEY_POWER); | ||
117 | |||
118 | pwr->name = "pmic8xxx_pwrkey"; | ||
119 | pwr->phys = "pmic8xxx_pwrkey/input0"; | ||
120 | pwr->dev.parent = &pdev->dev; | ||
121 | |||
122 | delay = (pdata->kpd_trigger_delay_us << 10) / USEC_PER_SEC; | ||
123 | delay = 1 + ilog2(delay); | ||
124 | |||
125 | err = pm8xxx_readb(pdev->dev.parent, PON_CNTL_1, &pon_cntl); | ||
126 | if (err < 0) { | ||
127 | dev_err(&pdev->dev, "failed reading PON_CNTL_1 err=%d\n", err); | ||
128 | goto free_input_dev; | ||
129 | } | ||
130 | |||
131 | pon_cntl &= ~PON_CNTL_TRIG_DELAY_MASK; | ||
132 | pon_cntl |= (delay & PON_CNTL_TRIG_DELAY_MASK); | ||
133 | if (pdata->pull_up) | ||
134 | pon_cntl |= PON_CNTL_PULL_UP; | ||
135 | else | ||
136 | pon_cntl &= ~PON_CNTL_PULL_UP; | ||
137 | |||
138 | err = pm8xxx_writeb(pdev->dev.parent, PON_CNTL_1, pon_cntl); | ||
139 | if (err < 0) { | ||
140 | dev_err(&pdev->dev, "failed writing PON_CNTL_1 err=%d\n", err); | ||
141 | goto free_input_dev; | ||
142 | } | ||
143 | |||
144 | err = input_register_device(pwr); | ||
145 | if (err) { | ||
146 | dev_dbg(&pdev->dev, "Can't register power key: %d\n", err); | ||
147 | goto free_input_dev; | ||
148 | } | ||
149 | |||
150 | pwrkey->key_press_irq = key_press_irq; | ||
151 | pwrkey->pwr = pwr; | ||
152 | |||
153 | platform_set_drvdata(pdev, pwrkey); | ||
154 | |||
155 | err = request_irq(key_press_irq, pwrkey_press_irq, | ||
156 | IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_press", pwrkey); | ||
157 | if (err < 0) { | ||
158 | dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n", | ||
159 | key_press_irq, err); | ||
160 | goto unreg_input_dev; | ||
161 | } | ||
162 | |||
163 | err = request_irq(key_release_irq, pwrkey_release_irq, | ||
164 | IRQF_TRIGGER_RISING, "pmic8xxx_pwrkey_release", pwrkey); | ||
165 | if (err < 0) { | ||
166 | dev_dbg(&pdev->dev, "Can't get %d IRQ for pwrkey: %d\n", | ||
167 | key_release_irq, err); | ||
168 | |||
169 | goto free_press_irq; | ||
170 | } | ||
171 | |||
172 | device_init_wakeup(&pdev->dev, pdata->wakeup); | ||
173 | |||
174 | return 0; | ||
175 | |||
176 | free_press_irq: | ||
177 | free_irq(key_press_irq, NULL); | ||
178 | unreg_input_dev: | ||
179 | platform_set_drvdata(pdev, NULL); | ||
180 | input_unregister_device(pwr); | ||
181 | pwr = NULL; | ||
182 | free_input_dev: | ||
183 | input_free_device(pwr); | ||
184 | free_pwrkey: | ||
185 | kfree(pwrkey); | ||
186 | return err; | ||
187 | } | ||
188 | |||
189 | static int __devexit pmic8xxx_pwrkey_remove(struct platform_device *pdev) | ||
190 | { | ||
191 | struct pmic8xxx_pwrkey *pwrkey = platform_get_drvdata(pdev); | ||
192 | int key_release_irq = platform_get_irq(pdev, 0); | ||
193 | int key_press_irq = platform_get_irq(pdev, 1); | ||
194 | |||
195 | device_init_wakeup(&pdev->dev, 0); | ||
196 | |||
197 | free_irq(key_press_irq, pwrkey); | ||
198 | free_irq(key_release_irq, pwrkey); | ||
199 | input_unregister_device(pwrkey->pwr); | ||
200 | platform_set_drvdata(pdev, NULL); | ||
201 | kfree(pwrkey); | ||
202 | |||
203 | return 0; | ||
204 | } | ||
205 | |||
206 | static struct platform_driver pmic8xxx_pwrkey_driver = { | ||
207 | .probe = pmic8xxx_pwrkey_probe, | ||
208 | .remove = __devexit_p(pmic8xxx_pwrkey_remove), | ||
209 | .driver = { | ||
210 | .name = PM8XXX_PWRKEY_DEV_NAME, | ||
211 | .owner = THIS_MODULE, | ||
212 | .pm = &pm8xxx_pwr_key_pm_ops, | ||
213 | }, | ||
214 | }; | ||
215 | |||
216 | static int __init pmic8xxx_pwrkey_init(void) | ||
217 | { | ||
218 | return platform_driver_register(&pmic8xxx_pwrkey_driver); | ||
219 | } | ||
220 | module_init(pmic8xxx_pwrkey_init); | ||
221 | |||
222 | static void __exit pmic8xxx_pwrkey_exit(void) | ||
223 | { | ||
224 | platform_driver_unregister(&pmic8xxx_pwrkey_driver); | ||
225 | } | ||
226 | module_exit(pmic8xxx_pwrkey_exit); | ||
227 | |||
228 | MODULE_ALIAS("platform:pmic8xxx_pwrkey"); | ||
229 | MODULE_DESCRIPTION("PMIC8XXX Power Key driver"); | ||
230 | MODULE_LICENSE("GPL v2"); | ||
231 | MODULE_AUTHOR("Trilok Soni <tsoni@codeaurora.org>"); | ||
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c index 6a11694e3fc7..014dd4ad0d4f 100644 --- a/drivers/input/misc/twl4030-vibra.c +++ b/drivers/input/misc/twl4030-vibra.c | |||
@@ -29,7 +29,6 @@ | |||
29 | #include <linux/workqueue.h> | 29 | #include <linux/workqueue.h> |
30 | #include <linux/i2c/twl.h> | 30 | #include <linux/i2c/twl.h> |
31 | #include <linux/mfd/twl4030-codec.h> | 31 | #include <linux/mfd/twl4030-codec.h> |
32 | #include <linux/mfd/core.h> | ||
33 | #include <linux/input.h> | 32 | #include <linux/input.h> |
34 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
35 | 34 | ||
@@ -197,7 +196,7 @@ static SIMPLE_DEV_PM_OPS(twl4030_vibra_pm_ops, | |||
197 | 196 | ||
198 | static int __devinit twl4030_vibra_probe(struct platform_device *pdev) | 197 | static int __devinit twl4030_vibra_probe(struct platform_device *pdev) |
199 | { | 198 | { |
200 | struct twl4030_codec_vibra_data *pdata = mfd_get_data(pdev); | 199 | struct twl4030_codec_vibra_data *pdata = pdev->dev.platform_data; |
201 | struct vibra_info *info; | 200 | struct vibra_info *info; |
202 | int ret; | 201 | int ret; |
203 | 202 | ||
diff --git a/drivers/isdn/hardware/eicon/divasfunc.c b/drivers/isdn/hardware/eicon/divasfunc.c index d36a4c09e25d..0bbee7824d78 100644 --- a/drivers/isdn/hardware/eicon/divasfunc.c +++ b/drivers/isdn/hardware/eicon/divasfunc.c | |||
@@ -113,9 +113,8 @@ void diva_xdi_didd_remove_adapter(int card) | |||
113 | static void start_dbg(void) | 113 | static void start_dbg(void) |
114 | { | 114 | { |
115 | DbgRegister("DIVAS", DRIVERRELEASE_DIVAS, (debugmask) ? debugmask : DBG_DEFAULT); | 115 | DbgRegister("DIVAS", DRIVERRELEASE_DIVAS, (debugmask) ? debugmask : DBG_DEFAULT); |
116 | DBG_LOG(("DIVA ISDNXDI BUILD (%s[%s]-%s-%s)", | 116 | DBG_LOG(("DIVA ISDNXDI BUILD (%s[%s])", |
117 | DIVA_BUILD, diva_xdi_common_code_build, __DATE__, | 117 | DIVA_BUILD, diva_xdi_common_code_build)) |
118 | __TIME__)) | ||
119 | } | 118 | } |
120 | 119 | ||
121 | /* | 120 | /* |
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 1d027b475b22..23f0d5e99f35 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig | |||
@@ -389,6 +389,16 @@ config LEDS_NETXBIG | |||
389 | and 5Big Network v2 boards. The LEDs are wired to a CPLD and are | 389 | and 5Big Network v2 boards. The LEDs are wired to a CPLD and are |
390 | controlled through a GPIO extension bus. | 390 | controlled through a GPIO extension bus. |
391 | 391 | ||
392 | config LEDS_ASIC3 | ||
393 | bool "LED support for the HTC ASIC3" | ||
394 | depends on MFD_ASIC3 | ||
395 | default y | ||
396 | help | ||
397 | This option enables support for the LEDs on the HTC ASIC3. The HTC | ||
398 | ASIC3 LED GPIOs are inputs, not outputs, thus the leds-gpio driver | ||
399 | cannot be used. This driver supports hardware blinking with an on+off | ||
400 | period from 62ms to 125s. Say Y to enable LEDs on the HP iPAQ hx4700. | ||
401 | |||
392 | config LEDS_TRIGGERS | 402 | config LEDS_TRIGGERS |
393 | bool "LED Trigger support" | 403 | bool "LED Trigger support" |
394 | depends on LEDS_CLASS | 404 | depends on LEDS_CLASS |
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index bccb96c9bb45..bbfd2e367dc0 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile | |||
@@ -42,6 +42,7 @@ obj-$(CONFIG_LEDS_DELL_NETBOOKS) += dell-led.o | |||
42 | obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o | 42 | obj-$(CONFIG_LEDS_MC13783) += leds-mc13783.o |
43 | obj-$(CONFIG_LEDS_NS2) += leds-ns2.o | 43 | obj-$(CONFIG_LEDS_NS2) += leds-ns2.o |
44 | obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o | 44 | obj-$(CONFIG_LEDS_NETXBIG) += leds-netxbig.o |
45 | obj-$(CONFIG_LEDS_ASIC3) += leds-asic3.o | ||
45 | 46 | ||
46 | # LED SPI Drivers | 47 | # LED SPI Drivers |
47 | obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o | 48 | obj-$(CONFIG_LEDS_DAC124S085) += leds-dac124s085.o |
diff --git a/drivers/leds/leds-88pm860x.c b/drivers/leds/leds-88pm860x.c index 416def84d045..0d4c16678ace 100644 --- a/drivers/leds/leds-88pm860x.c +++ b/drivers/leds/leds-88pm860x.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/leds.h> | 17 | #include <linux/leds.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/workqueue.h> | 19 | #include <linux/workqueue.h> |
20 | #include <linux/mfd/core.h> | ||
21 | #include <linux/mfd/88pm860x.h> | 20 | #include <linux/mfd/88pm860x.h> |
22 | 21 | ||
23 | #define LED_PWM_SHIFT (3) | 22 | #define LED_PWM_SHIFT (3) |
@@ -171,7 +170,6 @@ static int pm860x_led_probe(struct platform_device *pdev) | |||
171 | struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); | 170 | struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); |
172 | struct pm860x_led_pdata *pdata; | 171 | struct pm860x_led_pdata *pdata; |
173 | struct pm860x_led *data; | 172 | struct pm860x_led *data; |
174 | struct mfd_cell *cell; | ||
175 | struct resource *res; | 173 | struct resource *res; |
176 | int ret; | 174 | int ret; |
177 | 175 | ||
@@ -181,10 +179,7 @@ static int pm860x_led_probe(struct platform_device *pdev) | |||
181 | return -EINVAL; | 179 | return -EINVAL; |
182 | } | 180 | } |
183 | 181 | ||
184 | cell = pdev->dev.platform_data; | 182 | pdata = pdev->dev.platform_data; |
185 | if (cell == NULL) | ||
186 | return -ENODEV; | ||
187 | pdata = cell->mfd_data; | ||
188 | if (pdata == NULL) { | 183 | if (pdata == NULL) { |
189 | dev_err(&pdev->dev, "No platform data!\n"); | 184 | dev_err(&pdev->dev, "No platform data!\n"); |
190 | return -EINVAL; | 185 | return -EINVAL; |
diff --git a/drivers/leds/leds-asic3.c b/drivers/leds/leds-asic3.c new file mode 100644 index 000000000000..22f847c890c9 --- /dev/null +++ b/drivers/leds/leds-asic3.c | |||
@@ -0,0 +1,165 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Paul Parsons <lost.distance@yahoo.com> | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/platform_device.h> | ||
12 | #include <linux/leds.h> | ||
13 | #include <linux/slab.h> | ||
14 | |||
15 | #include <linux/mfd/asic3.h> | ||
16 | #include <linux/mfd/core.h> | ||
17 | |||
18 | /* | ||
19 | * The HTC ASIC3 LED GPIOs are inputs, not outputs. | ||
20 | * Hence we turn the LEDs on/off via the TimeBase register. | ||
21 | */ | ||
22 | |||
23 | /* | ||
24 | * When TimeBase is 4 the clock resolution is about 32Hz. | ||
25 | * This driver supports hardware blinking with an on+off | ||
26 | * period from 62ms (2 clocks) to 125s (4000 clocks). | ||
27 | */ | ||
28 | #define MS_TO_CLK(ms) DIV_ROUND_CLOSEST(((ms)*1024), 32000) | ||
29 | #define CLK_TO_MS(clk) (((clk)*32000)/1024) | ||
30 | #define MAX_CLK 4000 /* Fits into 12-bit Time registers */ | ||
31 | #define MAX_MS CLK_TO_MS(MAX_CLK) | ||
32 | |||
33 | static const unsigned int led_n_base[ASIC3_NUM_LEDS] = { | ||
34 | [0] = ASIC3_LED_0_Base, | ||
35 | [1] = ASIC3_LED_1_Base, | ||
36 | [2] = ASIC3_LED_2_Base, | ||
37 | }; | ||
38 | |||
39 | static void brightness_set(struct led_classdev *cdev, | ||
40 | enum led_brightness value) | ||
41 | { | ||
42 | struct platform_device *pdev = to_platform_device(cdev->dev->parent); | ||
43 | const struct mfd_cell *cell = mfd_get_cell(pdev); | ||
44 | struct asic3 *asic = dev_get_drvdata(pdev->dev.parent); | ||
45 | u32 timebase; | ||
46 | unsigned int base; | ||
47 | |||
48 | timebase = (value == LED_OFF) ? 0 : (LED_EN|0x4); | ||
49 | |||
50 | base = led_n_base[cell->id]; | ||
51 | asic3_write_register(asic, (base + ASIC3_LED_PeriodTime), 32); | ||
52 | asic3_write_register(asic, (base + ASIC3_LED_DutyTime), 32); | ||
53 | asic3_write_register(asic, (base + ASIC3_LED_AutoStopCount), 0); | ||
54 | asic3_write_register(asic, (base + ASIC3_LED_TimeBase), timebase); | ||
55 | } | ||
56 | |||
57 | static int blink_set(struct led_classdev *cdev, | ||
58 | unsigned long *delay_on, | ||
59 | unsigned long *delay_off) | ||
60 | { | ||
61 | struct platform_device *pdev = to_platform_device(cdev->dev->parent); | ||
62 | const struct mfd_cell *cell = mfd_get_cell(pdev); | ||
63 | struct asic3 *asic = dev_get_drvdata(pdev->dev.parent); | ||
64 | u32 on; | ||
65 | u32 off; | ||
66 | unsigned int base; | ||
67 | |||
68 | if (*delay_on > MAX_MS || *delay_off > MAX_MS) | ||
69 | return -EINVAL; | ||
70 | |||
71 | if (*delay_on == 0 && *delay_off == 0) { | ||
72 | /* If both are zero then a sensible default should be chosen */ | ||
73 | on = MS_TO_CLK(500); | ||
74 | off = MS_TO_CLK(500); | ||
75 | } else { | ||
76 | on = MS_TO_CLK(*delay_on); | ||
77 | off = MS_TO_CLK(*delay_off); | ||
78 | if ((on + off) > MAX_CLK) | ||
79 | return -EINVAL; | ||
80 | } | ||
81 | |||
82 | base = led_n_base[cell->id]; | ||
83 | asic3_write_register(asic, (base + ASIC3_LED_PeriodTime), (on + off)); | ||
84 | asic3_write_register(asic, (base + ASIC3_LED_DutyTime), on); | ||
85 | asic3_write_register(asic, (base + ASIC3_LED_AutoStopCount), 0); | ||
86 | asic3_write_register(asic, (base + ASIC3_LED_TimeBase), (LED_EN|0x4)); | ||
87 | |||
88 | *delay_on = CLK_TO_MS(on); | ||
89 | *delay_off = CLK_TO_MS(off); | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static int __devinit asic3_led_probe(struct platform_device *pdev) | ||
95 | { | ||
96 | struct asic3_led *led = pdev->dev.platform_data; | ||
97 | int ret; | ||
98 | |||
99 | ret = mfd_cell_enable(pdev); | ||
100 | if (ret < 0) | ||
101 | goto ret0; | ||
102 | |||
103 | led->cdev = kzalloc(sizeof(struct led_classdev), GFP_KERNEL); | ||
104 | if (!led->cdev) { | ||
105 | ret = -ENOMEM; | ||
106 | goto ret1; | ||
107 | } | ||
108 | |||
109 | led->cdev->name = led->name; | ||
110 | led->cdev->default_trigger = led->default_trigger; | ||
111 | led->cdev->brightness_set = brightness_set; | ||
112 | led->cdev->blink_set = blink_set; | ||
113 | |||
114 | ret = led_classdev_register(&pdev->dev, led->cdev); | ||
115 | if (ret < 0) | ||
116 | goto ret2; | ||
117 | |||
118 | return 0; | ||
119 | |||
120 | ret2: | ||
121 | kfree(led->cdev); | ||
122 | ret1: | ||
123 | (void) mfd_cell_disable(pdev); | ||
124 | ret0: | ||
125 | return ret; | ||
126 | } | ||
127 | |||
128 | static int __devexit asic3_led_remove(struct platform_device *pdev) | ||
129 | { | ||
130 | struct asic3_led *led = pdev->dev.platform_data; | ||
131 | |||
132 | led_classdev_unregister(led->cdev); | ||
133 | |||
134 | kfree(led->cdev); | ||
135 | |||
136 | return mfd_cell_disable(pdev); | ||
137 | } | ||
138 | |||
139 | static struct platform_driver asic3_led_driver = { | ||
140 | .probe = asic3_led_probe, | ||
141 | .remove = __devexit_p(asic3_led_remove), | ||
142 | .driver = { | ||
143 | .name = "leds-asic3", | ||
144 | .owner = THIS_MODULE, | ||
145 | }, | ||
146 | }; | ||
147 | |||
148 | MODULE_ALIAS("platform:leds-asic3"); | ||
149 | |||
150 | static int __init asic3_led_init(void) | ||
151 | { | ||
152 | return platform_driver_register(&asic3_led_driver); | ||
153 | } | ||
154 | |||
155 | static void __exit asic3_led_exit(void) | ||
156 | { | ||
157 | platform_driver_unregister(&asic3_led_driver); | ||
158 | } | ||
159 | |||
160 | module_init(asic3_led_init); | ||
161 | module_exit(asic3_led_exit); | ||
162 | |||
163 | MODULE_AUTHOR("Paul Parsons <lost.distance@yahoo.com>"); | ||
164 | MODULE_DESCRIPTION("HTC ASIC3 LED driver"); | ||
165 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c index 126ca7955f6e..f369e56d6547 100644 --- a/drivers/leds/leds-mc13783.c +++ b/drivers/leds/leds-mc13783.c | |||
@@ -22,7 +22,6 @@ | |||
22 | #include <linux/leds.h> | 22 | #include <linux/leds.h> |
23 | #include <linux/workqueue.h> | 23 | #include <linux/workqueue.h> |
24 | #include <linux/mfd/mc13783.h> | 24 | #include <linux/mfd/mc13783.h> |
25 | #include <linux/mfd/core.h> | ||
26 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
27 | 26 | ||
28 | struct mc13783_led { | 27 | struct mc13783_led { |
@@ -184,7 +183,7 @@ static int __devinit mc13783_led_setup(struct mc13783_led *led, int max_current) | |||
184 | 183 | ||
185 | static int __devinit mc13783_leds_prepare(struct platform_device *pdev) | 184 | static int __devinit mc13783_leds_prepare(struct platform_device *pdev) |
186 | { | 185 | { |
187 | struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev); | 186 | struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev); |
188 | struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent); | 187 | struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent); |
189 | int ret = 0; | 188 | int ret = 0; |
190 | int reg = 0; | 189 | int reg = 0; |
@@ -265,7 +264,7 @@ out: | |||
265 | 264 | ||
266 | static int __devinit mc13783_led_probe(struct platform_device *pdev) | 265 | static int __devinit mc13783_led_probe(struct platform_device *pdev) |
267 | { | 266 | { |
268 | struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev); | 267 | struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev); |
269 | struct mc13783_led_platform_data *led_cur; | 268 | struct mc13783_led_platform_data *led_cur; |
270 | struct mc13783_led *led, *led_dat; | 269 | struct mc13783_led *led, *led_dat; |
271 | int ret, i; | 270 | int ret, i; |
@@ -352,7 +351,7 @@ err_free: | |||
352 | 351 | ||
353 | static int __devexit mc13783_led_remove(struct platform_device *pdev) | 352 | static int __devexit mc13783_led_remove(struct platform_device *pdev) |
354 | { | 353 | { |
355 | struct mc13783_leds_platform_data *pdata = mfd_get_data(pdev); | 354 | struct mc13783_leds_platform_data *pdata = dev_get_platdata(&pdev->dev); |
356 | struct mc13783_led *led = platform_get_drvdata(pdev); | 355 | struct mc13783_led *led = platform_get_drvdata(pdev); |
357 | struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent); | 356 | struct mc13783 *dev = dev_get_drvdata(pdev->dev.parent); |
358 | int i; | 357 | int i; |
diff --git a/drivers/media/dvb/dm1105/dm1105.c b/drivers/media/dvb/dm1105/dm1105.c index 2d8b4044be36..b2b0c45f32a9 100644 --- a/drivers/media/dvb/dm1105/dm1105.c +++ b/drivers/media/dvb/dm1105/dm1105.c | |||
@@ -20,6 +20,7 @@ | |||
20 | */ | 20 | */ |
21 | 21 | ||
22 | #include <linux/i2c.h> | 22 | #include <linux/i2c.h> |
23 | #include <linux/i2c-algo-bit.h> | ||
23 | #include <linux/init.h> | 24 | #include <linux/init.h> |
24 | #include <linux/kernel.h> | 25 | #include <linux/kernel.h> |
25 | #include <linux/module.h> | 26 | #include <linux/module.h> |
@@ -49,11 +50,12 @@ | |||
49 | 50 | ||
50 | #define UNSET (-1U) | 51 | #define UNSET (-1U) |
51 | 52 | ||
52 | #define DM1105_BOARD_NOAUTO UNSET | 53 | #define DM1105_BOARD_NOAUTO UNSET |
53 | #define DM1105_BOARD_UNKNOWN 0 | 54 | #define DM1105_BOARD_UNKNOWN 0 |
54 | #define DM1105_BOARD_DVBWORLD_2002 1 | 55 | #define DM1105_BOARD_DVBWORLD_2002 1 |
55 | #define DM1105_BOARD_DVBWORLD_2004 2 | 56 | #define DM1105_BOARD_DVBWORLD_2004 2 |
56 | #define DM1105_BOARD_AXESS_DM05 3 | 57 | #define DM1105_BOARD_AXESS_DM05 3 |
58 | #define DM1105_BOARD_UNBRANDED_I2C_ON_GPIO 4 | ||
57 | 59 | ||
58 | /* ----------------------------------------------- */ | 60 | /* ----------------------------------------------- */ |
59 | /* | 61 | /* |
@@ -157,22 +159,38 @@ | |||
157 | #define DM1105_MAX 0x04 | 159 | #define DM1105_MAX 0x04 |
158 | 160 | ||
159 | #define DRIVER_NAME "dm1105" | 161 | #define DRIVER_NAME "dm1105" |
162 | #define DM1105_I2C_GPIO_NAME "dm1105-gpio" | ||
160 | 163 | ||
161 | #define DM1105_DMA_PACKETS 47 | 164 | #define DM1105_DMA_PACKETS 47 |
162 | #define DM1105_DMA_PACKET_LENGTH (128*4) | 165 | #define DM1105_DMA_PACKET_LENGTH (128*4) |
163 | #define DM1105_DMA_BYTES (128 * 4 * DM1105_DMA_PACKETS) | 166 | #define DM1105_DMA_BYTES (128 * 4 * DM1105_DMA_PACKETS) |
164 | 167 | ||
168 | /* */ | ||
169 | #define GPIO08 (1 << 8) | ||
170 | #define GPIO13 (1 << 13) | ||
171 | #define GPIO14 (1 << 14) | ||
172 | #define GPIO15 (1 << 15) | ||
173 | #define GPIO16 (1 << 16) | ||
174 | #define GPIO17 (1 << 17) | ||
175 | #define GPIO_ALL 0x03ffff | ||
176 | |||
165 | /* GPIO's for LNB power control */ | 177 | /* GPIO's for LNB power control */ |
166 | #define DM1105_LNB_MASK 0x00000000 | 178 | #define DM1105_LNB_MASK (GPIO_ALL & ~(GPIO14 | GPIO13)) |
167 | #define DM1105_LNB_OFF 0x00020000 | 179 | #define DM1105_LNB_OFF GPIO17 |
168 | #define DM1105_LNB_13V 0x00010100 | 180 | #define DM1105_LNB_13V (GPIO16 | GPIO08) |
169 | #define DM1105_LNB_18V 0x00000100 | 181 | #define DM1105_LNB_18V GPIO08 |
170 | 182 | ||
171 | /* GPIO's for LNB power control for Axess DM05 */ | 183 | /* GPIO's for LNB power control for Axess DM05 */ |
172 | #define DM05_LNB_MASK 0x00000000 | 184 | #define DM05_LNB_MASK (GPIO_ALL & ~(GPIO14 | GPIO13)) |
173 | #define DM05_LNB_OFF 0x00020000/* actually 13v */ | 185 | #define DM05_LNB_OFF GPIO17/* actually 13v */ |
174 | #define DM05_LNB_13V 0x00020000 | 186 | #define DM05_LNB_13V GPIO17 |
175 | #define DM05_LNB_18V 0x00030000 | 187 | #define DM05_LNB_18V (GPIO17 | GPIO16) |
188 | |||
189 | /* GPIO's for LNB power control for unbranded with I2C on GPIO */ | ||
190 | #define UNBR_LNB_MASK (GPIO17 | GPIO16) | ||
191 | #define UNBR_LNB_OFF 0 | ||
192 | #define UNBR_LNB_13V GPIO17 | ||
193 | #define UNBR_LNB_18V (GPIO17 | GPIO16) | ||
176 | 194 | ||
177 | static unsigned int card[] = {[0 ... 3] = UNSET }; | 195 | static unsigned int card[] = {[0 ... 3] = UNSET }; |
178 | module_param_array(card, int, NULL, 0444); | 196 | module_param_array(card, int, NULL, 0444); |
@@ -187,7 +205,11 @@ static unsigned int dm1105_devcount; | |||
187 | DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); | 205 | DVB_DEFINE_MOD_OPT_ADAPTER_NR(adapter_nr); |
188 | 206 | ||
189 | struct dm1105_board { | 207 | struct dm1105_board { |
190 | char *name; | 208 | char *name; |
209 | struct { | ||
210 | u32 mask, off, v13, v18; | ||
211 | } lnb; | ||
212 | u32 gpio_scl, gpio_sda; | ||
191 | }; | 213 | }; |
192 | 214 | ||
193 | struct dm1105_subid { | 215 | struct dm1105_subid { |
@@ -199,15 +221,50 @@ struct dm1105_subid { | |||
199 | static const struct dm1105_board dm1105_boards[] = { | 221 | static const struct dm1105_board dm1105_boards[] = { |
200 | [DM1105_BOARD_UNKNOWN] = { | 222 | [DM1105_BOARD_UNKNOWN] = { |
201 | .name = "UNKNOWN/GENERIC", | 223 | .name = "UNKNOWN/GENERIC", |
224 | .lnb = { | ||
225 | .mask = DM1105_LNB_MASK, | ||
226 | .off = DM1105_LNB_OFF, | ||
227 | .v13 = DM1105_LNB_13V, | ||
228 | .v18 = DM1105_LNB_18V, | ||
229 | }, | ||
202 | }, | 230 | }, |
203 | [DM1105_BOARD_DVBWORLD_2002] = { | 231 | [DM1105_BOARD_DVBWORLD_2002] = { |
204 | .name = "DVBWorld PCI 2002", | 232 | .name = "DVBWorld PCI 2002", |
233 | .lnb = { | ||
234 | .mask = DM1105_LNB_MASK, | ||
235 | .off = DM1105_LNB_OFF, | ||
236 | .v13 = DM1105_LNB_13V, | ||
237 | .v18 = DM1105_LNB_18V, | ||
238 | }, | ||
205 | }, | 239 | }, |
206 | [DM1105_BOARD_DVBWORLD_2004] = { | 240 | [DM1105_BOARD_DVBWORLD_2004] = { |
207 | .name = "DVBWorld PCI 2004", | 241 | .name = "DVBWorld PCI 2004", |
242 | .lnb = { | ||
243 | .mask = DM1105_LNB_MASK, | ||
244 | .off = DM1105_LNB_OFF, | ||
245 | .v13 = DM1105_LNB_13V, | ||
246 | .v18 = DM1105_LNB_18V, | ||
247 | }, | ||
208 | }, | 248 | }, |
209 | [DM1105_BOARD_AXESS_DM05] = { | 249 | [DM1105_BOARD_AXESS_DM05] = { |
210 | .name = "Axess/EasyTv DM05", | 250 | .name = "Axess/EasyTv DM05", |
251 | .lnb = { | ||
252 | .mask = DM05_LNB_MASK, | ||
253 | .off = DM05_LNB_OFF, | ||
254 | .v13 = DM05_LNB_13V, | ||
255 | .v18 = DM05_LNB_18V, | ||
256 | }, | ||
257 | }, | ||
258 | [DM1105_BOARD_UNBRANDED_I2C_ON_GPIO] = { | ||
259 | .name = "Unbranded DM1105 with i2c on GPIOs", | ||
260 | .lnb = { | ||
261 | .mask = UNBR_LNB_MASK, | ||
262 | .off = UNBR_LNB_OFF, | ||
263 | .v13 = UNBR_LNB_13V, | ||
264 | .v18 = UNBR_LNB_18V, | ||
265 | }, | ||
266 | .gpio_scl = GPIO14, | ||
267 | .gpio_sda = GPIO13, | ||
211 | }, | 268 | }, |
212 | }; | 269 | }; |
213 | 270 | ||
@@ -293,6 +350,8 @@ struct dm1105_dev { | |||
293 | 350 | ||
294 | /* i2c */ | 351 | /* i2c */ |
295 | struct i2c_adapter i2c_adap; | 352 | struct i2c_adapter i2c_adap; |
353 | struct i2c_adapter i2c_bb_adap; | ||
354 | struct i2c_algo_bit_data i2c_bit; | ||
296 | 355 | ||
297 | /* irq */ | 356 | /* irq */ |
298 | struct work_struct work; | 357 | struct work_struct work; |
@@ -328,6 +387,103 @@ struct dm1105_dev { | |||
328 | #define dm_setl(reg, bit) dm_andorl((reg), (bit), (bit)) | 387 | #define dm_setl(reg, bit) dm_andorl((reg), (bit), (bit)) |
329 | #define dm_clearl(reg, bit) dm_andorl((reg), (bit), 0) | 388 | #define dm_clearl(reg, bit) dm_andorl((reg), (bit), 0) |
330 | 389 | ||
390 | /* The chip has 18 GPIOs. In HOST mode GPIO's used as 15 bit address lines, | ||
391 | so we can use only 3 GPIO's from GPIO15 to GPIO17. | ||
392 | Here I don't check whether HOST is enebled as it is not implemented yet. | ||
393 | */ | ||
394 | static void dm1105_gpio_set(struct dm1105_dev *dev, u32 mask) | ||
395 | { | ||
396 | if (mask & 0xfffc0000) | ||
397 | printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__); | ||
398 | |||
399 | if (mask & 0x0003ffff) | ||
400 | dm_setl(DM1105_GPIOVAL, mask & 0x0003ffff); | ||
401 | |||
402 | } | ||
403 | |||
404 | static void dm1105_gpio_clear(struct dm1105_dev *dev, u32 mask) | ||
405 | { | ||
406 | if (mask & 0xfffc0000) | ||
407 | printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__); | ||
408 | |||
409 | if (mask & 0x0003ffff) | ||
410 | dm_clearl(DM1105_GPIOVAL, mask & 0x0003ffff); | ||
411 | |||
412 | } | ||
413 | |||
414 | static void dm1105_gpio_andor(struct dm1105_dev *dev, u32 mask, u32 val) | ||
415 | { | ||
416 | if (mask & 0xfffc0000) | ||
417 | printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__); | ||
418 | |||
419 | if (mask & 0x0003ffff) | ||
420 | dm_andorl(DM1105_GPIOVAL, mask & 0x0003ffff, val); | ||
421 | |||
422 | } | ||
423 | |||
424 | static u32 dm1105_gpio_get(struct dm1105_dev *dev, u32 mask) | ||
425 | { | ||
426 | if (mask & 0xfffc0000) | ||
427 | printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__); | ||
428 | |||
429 | if (mask & 0x0003ffff) | ||
430 | return dm_readl(DM1105_GPIOVAL) & mask & 0x0003ffff; | ||
431 | |||
432 | return 0; | ||
433 | } | ||
434 | |||
435 | static void dm1105_gpio_enable(struct dm1105_dev *dev, u32 mask, int asoutput) | ||
436 | { | ||
437 | if (mask & 0xfffc0000) | ||
438 | printk(KERN_ERR "%s: Only 18 GPIO's are allowed\n", __func__); | ||
439 | |||
440 | if ((mask & 0x0003ffff) && asoutput) | ||
441 | dm_clearl(DM1105_GPIOCTR, mask & 0x0003ffff); | ||
442 | else if ((mask & 0x0003ffff) && !asoutput) | ||
443 | dm_setl(DM1105_GPIOCTR, mask & 0x0003ffff); | ||
444 | |||
445 | } | ||
446 | |||
447 | static void dm1105_setline(struct dm1105_dev *dev, u32 line, int state) | ||
448 | { | ||
449 | if (state) | ||
450 | dm1105_gpio_enable(dev, line, 0); | ||
451 | else { | ||
452 | dm1105_gpio_enable(dev, line, 1); | ||
453 | dm1105_gpio_clear(dev, line); | ||
454 | } | ||
455 | } | ||
456 | |||
457 | static void dm1105_setsda(void *data, int state) | ||
458 | { | ||
459 | struct dm1105_dev *dev = data; | ||
460 | |||
461 | dm1105_setline(dev, dm1105_boards[dev->boardnr].gpio_sda, state); | ||
462 | } | ||
463 | |||
464 | static void dm1105_setscl(void *data, int state) | ||
465 | { | ||
466 | struct dm1105_dev *dev = data; | ||
467 | |||
468 | dm1105_setline(dev, dm1105_boards[dev->boardnr].gpio_scl, state); | ||
469 | } | ||
470 | |||
471 | static int dm1105_getsda(void *data) | ||
472 | { | ||
473 | struct dm1105_dev *dev = data; | ||
474 | |||
475 | return dm1105_gpio_get(dev, dm1105_boards[dev->boardnr].gpio_sda) | ||
476 | ? 1 : 0; | ||
477 | } | ||
478 | |||
479 | static int dm1105_getscl(void *data) | ||
480 | { | ||
481 | struct dm1105_dev *dev = data; | ||
482 | |||
483 | return dm1105_gpio_get(dev, dm1105_boards[dev->boardnr].gpio_scl) | ||
484 | ? 1 : 0; | ||
485 | } | ||
486 | |||
331 | static int dm1105_i2c_xfer(struct i2c_adapter *i2c_adap, | 487 | static int dm1105_i2c_xfer(struct i2c_adapter *i2c_adap, |
332 | struct i2c_msg *msgs, int num) | 488 | struct i2c_msg *msgs, int num) |
333 | { | 489 | { |
@@ -436,31 +592,20 @@ static inline struct dm1105_dev *frontend_to_dm1105_dev(struct dvb_frontend *fe) | |||
436 | static int dm1105_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) | 592 | static int dm1105_set_voltage(struct dvb_frontend *fe, fe_sec_voltage_t voltage) |
437 | { | 593 | { |
438 | struct dm1105_dev *dev = frontend_to_dm1105_dev(fe); | 594 | struct dm1105_dev *dev = frontend_to_dm1105_dev(fe); |
439 | u32 lnb_mask, lnb_13v, lnb_18v, lnb_off; | ||
440 | 595 | ||
441 | switch (dev->boardnr) { | 596 | dm1105_gpio_enable(dev, dm1105_boards[dev->boardnr].lnb.mask, 1); |
442 | case DM1105_BOARD_AXESS_DM05: | ||
443 | lnb_mask = DM05_LNB_MASK; | ||
444 | lnb_off = DM05_LNB_OFF; | ||
445 | lnb_13v = DM05_LNB_13V; | ||
446 | lnb_18v = DM05_LNB_18V; | ||
447 | break; | ||
448 | case DM1105_BOARD_DVBWORLD_2002: | ||
449 | case DM1105_BOARD_DVBWORLD_2004: | ||
450 | default: | ||
451 | lnb_mask = DM1105_LNB_MASK; | ||
452 | lnb_off = DM1105_LNB_OFF; | ||
453 | lnb_13v = DM1105_LNB_13V; | ||
454 | lnb_18v = DM1105_LNB_18V; | ||
455 | } | ||
456 | |||
457 | dm_writel(DM1105_GPIOCTR, lnb_mask); | ||
458 | if (voltage == SEC_VOLTAGE_18) | 597 | if (voltage == SEC_VOLTAGE_18) |
459 | dm_writel(DM1105_GPIOVAL, lnb_18v); | 598 | dm1105_gpio_andor(dev, |
599 | dm1105_boards[dev->boardnr].lnb.mask, | ||
600 | dm1105_boards[dev->boardnr].lnb.v18); | ||
460 | else if (voltage == SEC_VOLTAGE_13) | 601 | else if (voltage == SEC_VOLTAGE_13) |
461 | dm_writel(DM1105_GPIOVAL, lnb_13v); | 602 | dm1105_gpio_andor(dev, |
603 | dm1105_boards[dev->boardnr].lnb.mask, | ||
604 | dm1105_boards[dev->boardnr].lnb.v13); | ||
462 | else | 605 | else |
463 | dm_writel(DM1105_GPIOVAL, lnb_off); | 606 | dm1105_gpio_andor(dev, |
607 | dm1105_boards[dev->boardnr].lnb.mask, | ||
608 | dm1105_boards[dev->boardnr].lnb.off); | ||
464 | 609 | ||
465 | return 0; | 610 | return 0; |
466 | } | 611 | } |
@@ -708,6 +853,38 @@ static int __devinit frontend_init(struct dm1105_dev *dev) | |||
708 | int ret; | 853 | int ret; |
709 | 854 | ||
710 | switch (dev->boardnr) { | 855 | switch (dev->boardnr) { |
856 | case DM1105_BOARD_UNBRANDED_I2C_ON_GPIO: | ||
857 | dm1105_gpio_enable(dev, GPIO15, 1); | ||
858 | dm1105_gpio_clear(dev, GPIO15); | ||
859 | msleep(100); | ||
860 | dm1105_gpio_set(dev, GPIO15); | ||
861 | msleep(200); | ||
862 | dev->fe = dvb_attach( | ||
863 | stv0299_attach, &sharp_z0194a_config, | ||
864 | &dev->i2c_bb_adap); | ||
865 | if (dev->fe) { | ||
866 | dev->fe->ops.set_voltage = dm1105_set_voltage; | ||
867 | dvb_attach(dvb_pll_attach, dev->fe, 0x60, | ||
868 | &dev->i2c_bb_adap, DVB_PLL_OPERA1); | ||
869 | break; | ||
870 | } | ||
871 | |||
872 | dev->fe = dvb_attach( | ||
873 | stv0288_attach, &earda_config, | ||
874 | &dev->i2c_bb_adap); | ||
875 | if (dev->fe) { | ||
876 | dev->fe->ops.set_voltage = dm1105_set_voltage; | ||
877 | dvb_attach(stb6000_attach, dev->fe, 0x61, | ||
878 | &dev->i2c_bb_adap); | ||
879 | break; | ||
880 | } | ||
881 | |||
882 | dev->fe = dvb_attach( | ||
883 | si21xx_attach, &serit_config, | ||
884 | &dev->i2c_bb_adap); | ||
885 | if (dev->fe) | ||
886 | dev->fe->ops.set_voltage = dm1105_set_voltage; | ||
887 | break; | ||
711 | case DM1105_BOARD_DVBWORLD_2004: | 888 | case DM1105_BOARD_DVBWORLD_2004: |
712 | dev->fe = dvb_attach( | 889 | dev->fe = dvb_attach( |
713 | cx24116_attach, &serit_sp2633_config, | 890 | cx24116_attach, &serit_sp2633_config, |
@@ -870,11 +1047,32 @@ static int __devinit dm1105_probe(struct pci_dev *pdev, | |||
870 | if (ret < 0) | 1047 | if (ret < 0) |
871 | goto err_dm1105_hw_exit; | 1048 | goto err_dm1105_hw_exit; |
872 | 1049 | ||
1050 | i2c_set_adapdata(&dev->i2c_bb_adap, dev); | ||
1051 | strcpy(dev->i2c_bb_adap.name, DM1105_I2C_GPIO_NAME); | ||
1052 | dev->i2c_bb_adap.owner = THIS_MODULE; | ||
1053 | dev->i2c_bb_adap.dev.parent = &pdev->dev; | ||
1054 | dev->i2c_bb_adap.algo_data = &dev->i2c_bit; | ||
1055 | dev->i2c_bit.data = dev; | ||
1056 | dev->i2c_bit.setsda = dm1105_setsda; | ||
1057 | dev->i2c_bit.setscl = dm1105_setscl; | ||
1058 | dev->i2c_bit.getsda = dm1105_getsda; | ||
1059 | dev->i2c_bit.getscl = dm1105_getscl; | ||
1060 | dev->i2c_bit.udelay = 10; | ||
1061 | dev->i2c_bit.timeout = 10; | ||
1062 | |||
1063 | /* Raise SCL and SDA */ | ||
1064 | dm1105_setsda(dev, 1); | ||
1065 | dm1105_setscl(dev, 1); | ||
1066 | |||
1067 | ret = i2c_bit_add_bus(&dev->i2c_bb_adap); | ||
1068 | if (ret < 0) | ||
1069 | goto err_i2c_del_adapter; | ||
1070 | |||
873 | /* dvb */ | 1071 | /* dvb */ |
874 | ret = dvb_register_adapter(&dev->dvb_adapter, DRIVER_NAME, | 1072 | ret = dvb_register_adapter(&dev->dvb_adapter, DRIVER_NAME, |
875 | THIS_MODULE, &pdev->dev, adapter_nr); | 1073 | THIS_MODULE, &pdev->dev, adapter_nr); |
876 | if (ret < 0) | 1074 | if (ret < 0) |
877 | goto err_i2c_del_adapter; | 1075 | goto err_i2c_del_adapters; |
878 | 1076 | ||
879 | dvb_adapter = &dev->dvb_adapter; | 1077 | dvb_adapter = &dev->dvb_adapter; |
880 | 1078 | ||
@@ -952,6 +1150,8 @@ err_dvb_dmx_release: | |||
952 | dvb_dmx_release(dvbdemux); | 1150 | dvb_dmx_release(dvbdemux); |
953 | err_dvb_unregister_adapter: | 1151 | err_dvb_unregister_adapter: |
954 | dvb_unregister_adapter(dvb_adapter); | 1152 | dvb_unregister_adapter(dvb_adapter); |
1153 | err_i2c_del_adapters: | ||
1154 | i2c_del_adapter(&dev->i2c_bb_adap); | ||
955 | err_i2c_del_adapter: | 1155 | err_i2c_del_adapter: |
956 | i2c_del_adapter(&dev->i2c_adap); | 1156 | i2c_del_adapter(&dev->i2c_adap); |
957 | err_dm1105_hw_exit: | 1157 | err_dm1105_hw_exit: |
diff --git a/drivers/media/dvb/dvb-usb/lmedm04.c b/drivers/media/dvb/dvb-usb/lmedm04.c index f36f471deae2..37b146961ae2 100644 --- a/drivers/media/dvb/dvb-usb/lmedm04.c +++ b/drivers/media/dvb/dvb-usb/lmedm04.c | |||
@@ -207,17 +207,6 @@ static int lme2510_stream_restart(struct dvb_usb_device *d) | |||
207 | rbuff, sizeof(rbuff)); | 207 | rbuff, sizeof(rbuff)); |
208 | return ret; | 208 | return ret; |
209 | } | 209 | } |
210 | static int lme2510_remote_keypress(struct dvb_usb_adapter *adap, u32 keypress) | ||
211 | { | ||
212 | struct dvb_usb_device *d = adap->dev; | ||
213 | |||
214 | deb_info(1, "INT Key Keypress =%04x", keypress); | ||
215 | |||
216 | if (keypress > 0) | ||
217 | rc_keydown(d->rc_dev, keypress, 0); | ||
218 | |||
219 | return 0; | ||
220 | } | ||
221 | 210 | ||
222 | static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out) | 211 | static int lme2510_enable_pid(struct dvb_usb_device *d, u8 index, u16 pid_out) |
223 | { | 212 | { |
@@ -256,6 +245,7 @@ static void lme2510_int_response(struct urb *lme_urb) | |||
256 | struct lme2510_state *st = adap->dev->priv; | 245 | struct lme2510_state *st = adap->dev->priv; |
257 | static u8 *ibuf, *rbuf; | 246 | static u8 *ibuf, *rbuf; |
258 | int i = 0, offset; | 247 | int i = 0, offset; |
248 | u32 key; | ||
259 | 249 | ||
260 | switch (lme_urb->status) { | 250 | switch (lme_urb->status) { |
261 | case 0: | 251 | case 0: |
@@ -282,10 +272,16 @@ static void lme2510_int_response(struct urb *lme_urb) | |||
282 | 272 | ||
283 | switch (ibuf[0]) { | 273 | switch (ibuf[0]) { |
284 | case 0xaa: | 274 | case 0xaa: |
285 | debug_data_snipet(1, "INT Remote data snipet in", ibuf); | 275 | debug_data_snipet(1, "INT Remote data snipet", ibuf); |
286 | lme2510_remote_keypress(adap, | 276 | if ((ibuf[4] + ibuf[5]) == 0xff) { |
287 | (u32)(ibuf[2] << 24) + (ibuf[3] << 16) + | 277 | key = ibuf[5]; |
288 | (ibuf[4] << 8) + ibuf[5]); | 278 | key += (ibuf[3] > 0) |
279 | ? (ibuf[3] ^ 0xff) << 8 : 0; | ||
280 | key += (ibuf[2] ^ 0xff) << 16; | ||
281 | deb_info(1, "INT Key =%08x", key); | ||
282 | if (adap->dev->rc_dev != NULL) | ||
283 | rc_keydown(adap->dev->rc_dev, key, 0); | ||
284 | } | ||
289 | break; | 285 | break; |
290 | case 0xbb: | 286 | case 0xbb: |
291 | switch (st->tuner_config) { | 287 | switch (st->tuner_config) { |
@@ -691,45 +687,6 @@ static int lme2510_streaming_ctrl(struct dvb_usb_adapter *adap, int onoff) | |||
691 | return (ret < 0) ? -ENODEV : 0; | 687 | return (ret < 0) ? -ENODEV : 0; |
692 | } | 688 | } |
693 | 689 | ||
694 | static int lme2510_int_service(struct dvb_usb_adapter *adap) | ||
695 | { | ||
696 | struct dvb_usb_device *d = adap->dev; | ||
697 | struct rc_dev *rc; | ||
698 | int ret; | ||
699 | |||
700 | info("STA Configuring Remote"); | ||
701 | |||
702 | rc = rc_allocate_device(); | ||
703 | if (!rc) | ||
704 | return -ENOMEM; | ||
705 | |||
706 | usb_make_path(d->udev, d->rc_phys, sizeof(d->rc_phys)); | ||
707 | strlcat(d->rc_phys, "/ir0", sizeof(d->rc_phys)); | ||
708 | |||
709 | rc->input_name = "LME2510 Remote Control"; | ||
710 | rc->input_phys = d->rc_phys; | ||
711 | rc->map_name = RC_MAP_LME2510; | ||
712 | rc->driver_name = "LME 2510"; | ||
713 | usb_to_input_id(d->udev, &rc->input_id); | ||
714 | |||
715 | ret = rc_register_device(rc); | ||
716 | if (ret) { | ||
717 | rc_free_device(rc); | ||
718 | return ret; | ||
719 | } | ||
720 | d->rc_dev = rc; | ||
721 | |||
722 | /* Start the Interrupt */ | ||
723 | ret = lme2510_int_read(adap); | ||
724 | if (ret < 0) { | ||
725 | rc_unregister_device(rc); | ||
726 | info("INT Unable to start Interrupt Service"); | ||
727 | return -ENODEV; | ||
728 | } | ||
729 | |||
730 | return 0; | ||
731 | } | ||
732 | |||
733 | static u8 check_sum(u8 *p, u8 len) | 690 | static u8 check_sum(u8 *p, u8 len) |
734 | { | 691 | { |
735 | u8 sum = 0; | 692 | u8 sum = 0; |
@@ -831,7 +788,7 @@ static int lme_firmware_switch(struct usb_device *udev, int cold) | |||
831 | 788 | ||
832 | cold_fw = !cold; | 789 | cold_fw = !cold; |
833 | 790 | ||
834 | if (udev->descriptor.idProduct == 0x1122) { | 791 | if (le16_to_cpu(udev->descriptor.idProduct) == 0x1122) { |
835 | switch (dvb_usb_lme2510_firmware) { | 792 | switch (dvb_usb_lme2510_firmware) { |
836 | default: | 793 | default: |
837 | dvb_usb_lme2510_firmware = TUNER_S0194; | 794 | dvb_usb_lme2510_firmware = TUNER_S0194; |
@@ -1053,8 +1010,11 @@ static int dm04_lme2510_frontend_attach(struct dvb_usb_adapter *adap) | |||
1053 | 1010 | ||
1054 | 1011 | ||
1055 | end: if (ret) { | 1012 | end: if (ret) { |
1056 | kfree(adap->fe); | 1013 | if (adap->fe) { |
1057 | adap->fe = NULL; | 1014 | dvb_frontend_detach(adap->fe); |
1015 | adap->fe = NULL; | ||
1016 | } | ||
1017 | adap->dev->props.rc.core.rc_codes = NULL; | ||
1058 | return -ENODEV; | 1018 | return -ENODEV; |
1059 | } | 1019 | } |
1060 | 1020 | ||
@@ -1097,8 +1057,12 @@ static int dm04_lme2510_tuner(struct dvb_usb_adapter *adap) | |||
1097 | return -ENODEV; | 1057 | return -ENODEV; |
1098 | } | 1058 | } |
1099 | 1059 | ||
1100 | /* Start the Interrupt & Remote*/ | 1060 | /* Start the Interrupt*/ |
1101 | ret = lme2510_int_service(adap); | 1061 | ret = lme2510_int_read(adap); |
1062 | if (ret < 0) { | ||
1063 | info("INT Unable to start Interrupt Service"); | ||
1064 | return -ENODEV; | ||
1065 | } | ||
1102 | 1066 | ||
1103 | return ret; | 1067 | return ret; |
1104 | } | 1068 | } |
@@ -1204,6 +1168,12 @@ static struct dvb_usb_device_properties lme2510_properties = { | |||
1204 | } | 1168 | } |
1205 | } | 1169 | } |
1206 | }, | 1170 | }, |
1171 | .rc.core = { | ||
1172 | .protocol = RC_TYPE_NEC, | ||
1173 | .module_name = "LME2510 Remote Control", | ||
1174 | .allowed_protos = RC_TYPE_NEC, | ||
1175 | .rc_codes = RC_MAP_LME2510, | ||
1176 | }, | ||
1207 | .power_ctrl = lme2510_powerup, | 1177 | .power_ctrl = lme2510_powerup, |
1208 | .identify_state = lme2510_identify_state, | 1178 | .identify_state = lme2510_identify_state, |
1209 | .i2c_algo = &lme2510_i2c_algo, | 1179 | .i2c_algo = &lme2510_i2c_algo, |
@@ -1246,6 +1216,12 @@ static struct dvb_usb_device_properties lme2510c_properties = { | |||
1246 | } | 1216 | } |
1247 | } | 1217 | } |
1248 | }, | 1218 | }, |
1219 | .rc.core = { | ||
1220 | .protocol = RC_TYPE_NEC, | ||
1221 | .module_name = "LME2510 Remote Control", | ||
1222 | .allowed_protos = RC_TYPE_NEC, | ||
1223 | .rc_codes = RC_MAP_LME2510, | ||
1224 | }, | ||
1249 | .power_ctrl = lme2510_powerup, | 1225 | .power_ctrl = lme2510_powerup, |
1250 | .identify_state = lme2510_identify_state, | 1226 | .identify_state = lme2510_identify_state, |
1251 | .i2c_algo = &lme2510_i2c_algo, | 1227 | .i2c_algo = &lme2510_i2c_algo, |
@@ -1269,19 +1245,21 @@ static void *lme2510_exit_int(struct dvb_usb_device *d) | |||
1269 | adap->feedcount = 0; | 1245 | adap->feedcount = 0; |
1270 | } | 1246 | } |
1271 | 1247 | ||
1272 | if (st->lme_urb != NULL) { | 1248 | if (st->usb_buffer != NULL) { |
1273 | st->i2c_talk_onoff = 1; | 1249 | st->i2c_talk_onoff = 1; |
1274 | st->signal_lock = 0; | 1250 | st->signal_lock = 0; |
1275 | st->signal_level = 0; | 1251 | st->signal_level = 0; |
1276 | st->signal_sn = 0; | 1252 | st->signal_sn = 0; |
1277 | buffer = st->usb_buffer; | 1253 | buffer = st->usb_buffer; |
1254 | } | ||
1255 | |||
1256 | if (st->lme_urb != NULL) { | ||
1278 | usb_kill_urb(st->lme_urb); | 1257 | usb_kill_urb(st->lme_urb); |
1279 | usb_free_coherent(d->udev, 5000, st->buffer, | 1258 | usb_free_coherent(d->udev, 5000, st->buffer, |
1280 | st->lme_urb->transfer_dma); | 1259 | st->lme_urb->transfer_dma); |
1281 | info("Interrupt Service Stopped"); | 1260 | info("Interrupt Service Stopped"); |
1282 | rc_unregister_device(d->rc_dev); | ||
1283 | info("Remote Stopped"); | ||
1284 | } | 1261 | } |
1262 | |||
1285 | return buffer; | 1263 | return buffer; |
1286 | } | 1264 | } |
1287 | 1265 | ||
@@ -1293,7 +1271,8 @@ static void lme2510_exit(struct usb_interface *intf) | |||
1293 | if (d != NULL) { | 1271 | if (d != NULL) { |
1294 | usb_buffer = lme2510_exit_int(d); | 1272 | usb_buffer = lme2510_exit_int(d); |
1295 | dvb_usb_device_exit(intf); | 1273 | dvb_usb_device_exit(intf); |
1296 | kfree(usb_buffer); | 1274 | if (usb_buffer != NULL) |
1275 | kfree(usb_buffer); | ||
1297 | } | 1276 | } |
1298 | } | 1277 | } |
1299 | 1278 | ||
@@ -1327,5 +1306,5 @@ module_exit(lme2510_module_exit); | |||
1327 | 1306 | ||
1328 | MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); | 1307 | MODULE_AUTHOR("Malcolm Priestley <tvboxspy@gmail.com>"); |
1329 | MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); | 1308 | MODULE_DESCRIPTION("LME2510(C) DVB-S USB2.0"); |
1330 | MODULE_VERSION("1.86"); | 1309 | MODULE_VERSION("1.88"); |
1331 | MODULE_LICENSE("GPL"); | 1310 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/media/dvb/frontends/stb0899_algo.c b/drivers/media/dvb/frontends/stb0899_algo.c index 2da55ec20392..d70eee00f33a 100644 --- a/drivers/media/dvb/frontends/stb0899_algo.c +++ b/drivers/media/dvb/frontends/stb0899_algo.c | |||
@@ -23,7 +23,7 @@ | |||
23 | #include "stb0899_priv.h" | 23 | #include "stb0899_priv.h" |
24 | #include "stb0899_reg.h" | 24 | #include "stb0899_reg.h" |
25 | 25 | ||
26 | inline u32 stb0899_do_div(u64 n, u32 d) | 26 | static inline u32 stb0899_do_div(u64 n, u32 d) |
27 | { | 27 | { |
28 | /* wrap do_div() for ease of use */ | 28 | /* wrap do_div() for ease of use */ |
29 | 29 | ||
diff --git a/drivers/media/dvb/frontends/tda8261.c b/drivers/media/dvb/frontends/tda8261.c index 1742056a34e8..53c7d8f1df28 100644 --- a/drivers/media/dvb/frontends/tda8261.c +++ b/drivers/media/dvb/frontends/tda8261.c | |||
@@ -224,7 +224,6 @@ exit: | |||
224 | } | 224 | } |
225 | 225 | ||
226 | EXPORT_SYMBOL(tda8261_attach); | 226 | EXPORT_SYMBOL(tda8261_attach); |
227 | MODULE_PARM_DESC(verbose, "Set verbosity level"); | ||
228 | 227 | ||
229 | MODULE_AUTHOR("Manu Abraham"); | 228 | MODULE_AUTHOR("Manu Abraham"); |
230 | MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner"); | 229 | MODULE_DESCRIPTION("TDA8261 8PSK/QPSK Tuner"); |
diff --git a/drivers/media/radio/radio-maxiradio.c b/drivers/media/radio/radio-maxiradio.c index 5c2a9058c09f..e83e84003025 100644 --- a/drivers/media/radio/radio-maxiradio.c +++ b/drivers/media/radio/radio-maxiradio.c | |||
@@ -412,8 +412,7 @@ static int __devinit maxiradio_init_one(struct pci_dev *pdev, const struct pci_d | |||
412 | goto err_out_free_region; | 412 | goto err_out_free_region; |
413 | } | 413 | } |
414 | 414 | ||
415 | v4l2_info(v4l2_dev, "version " DRIVER_VERSION | 415 | v4l2_info(v4l2_dev, "version " DRIVER_VERSION "\n"); |
416 | " time " __TIME__ " " __DATE__ "\n"); | ||
417 | 416 | ||
418 | v4l2_info(v4l2_dev, "found Guillemot MAXI Radio device (io = 0x%x)\n", | 417 | v4l2_info(v4l2_dev, "found Guillemot MAXI Radio device (io = 0x%x)\n", |
419 | dev->io); | 418 | dev->io); |
diff --git a/drivers/media/radio/radio-timb.c b/drivers/media/radio/radio-timb.c index 1e3a8dd820a4..a185610b376b 100644 --- a/drivers/media/radio/radio-timb.c +++ b/drivers/media/radio/radio-timb.c | |||
@@ -21,7 +21,6 @@ | |||
21 | #include <media/v4l2-ioctl.h> | 21 | #include <media/v4l2-ioctl.h> |
22 | #include <media/v4l2-device.h> | 22 | #include <media/v4l2-device.h> |
23 | #include <linux/platform_device.h> | 23 | #include <linux/platform_device.h> |
24 | #include <linux/mfd/core.h> | ||
25 | #include <linux/interrupt.h> | 24 | #include <linux/interrupt.h> |
26 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
27 | #include <linux/i2c.h> | 26 | #include <linux/i2c.h> |
@@ -149,7 +148,7 @@ static const struct v4l2_file_operations timbradio_fops = { | |||
149 | 148 | ||
150 | static int __devinit timbradio_probe(struct platform_device *pdev) | 149 | static int __devinit timbradio_probe(struct platform_device *pdev) |
151 | { | 150 | { |
152 | struct timb_radio_platform_data *pdata = mfd_get_data(pdev); | 151 | struct timb_radio_platform_data *pdata = pdev->dev.platform_data; |
153 | struct timbradio *tr; | 152 | struct timbradio *tr; |
154 | int err; | 153 | int err; |
155 | 154 | ||
diff --git a/drivers/media/radio/radio-wl1273.c b/drivers/media/radio/radio-wl1273.c index e2550dc2944f..459f7272d326 100644 --- a/drivers/media/radio/radio-wl1273.c +++ b/drivers/media/radio/radio-wl1273.c | |||
@@ -1382,7 +1382,7 @@ static int wl1273_fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl) | |||
1382 | 1382 | ||
1383 | switch (ctrl->id) { | 1383 | switch (ctrl->id) { |
1384 | case V4L2_CID_TUNE_ANTENNA_CAPACITOR: | 1384 | case V4L2_CID_TUNE_ANTENNA_CAPACITOR: |
1385 | ctrl->val = wl1273_fm_get_tx_ctune(radio); | 1385 | ctrl->cur.val = wl1273_fm_get_tx_ctune(radio); |
1386 | break; | 1386 | break; |
1387 | 1387 | ||
1388 | default: | 1388 | default: |
@@ -1990,7 +1990,7 @@ static int wl1273_fm_radio_remove(struct platform_device *pdev) | |||
1990 | 1990 | ||
1991 | static int __devinit wl1273_fm_radio_probe(struct platform_device *pdev) | 1991 | static int __devinit wl1273_fm_radio_probe(struct platform_device *pdev) |
1992 | { | 1992 | { |
1993 | struct wl1273_core **core = mfd_get_data(pdev); | 1993 | struct wl1273_core **core = pdev->dev.platform_data; |
1994 | struct wl1273_device *radio; | 1994 | struct wl1273_device *radio; |
1995 | struct v4l2_ctrl *ctrl; | 1995 | struct v4l2_ctrl *ctrl; |
1996 | int r = 0; | 1996 | int r = 0; |
diff --git a/drivers/media/radio/wl128x/fmdrv_v4l2.c b/drivers/media/radio/wl128x/fmdrv_v4l2.c index d50e5ac75ab6..87010724f914 100644 --- a/drivers/media/radio/wl128x/fmdrv_v4l2.c +++ b/drivers/media/radio/wl128x/fmdrv_v4l2.c | |||
@@ -191,7 +191,7 @@ static int fm_g_volatile_ctrl(struct v4l2_ctrl *ctrl) | |||
191 | 191 | ||
192 | switch (ctrl->id) { | 192 | switch (ctrl->id) { |
193 | case V4L2_CID_TUNE_ANTENNA_CAPACITOR: | 193 | case V4L2_CID_TUNE_ANTENNA_CAPACITOR: |
194 | ctrl->val = fm_tx_get_tune_cap_val(fmdev); | 194 | ctrl->cur.val = fm_tx_get_tune_cap_val(fmdev); |
195 | break; | 195 | break; |
196 | default: | 196 | default: |
197 | fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id); | 197 | fmwarn("%s: Unknown IOCTL: %d\n", __func__, ctrl->id); |
diff --git a/drivers/media/rc/Kconfig b/drivers/media/rc/Kconfig index 154c337f00fd..7d4bbc226d06 100644 --- a/drivers/media/rc/Kconfig +++ b/drivers/media/rc/Kconfig | |||
@@ -148,6 +148,18 @@ config IR_ITE_CIR | |||
148 | To compile this driver as a module, choose M here: the | 148 | To compile this driver as a module, choose M here: the |
149 | module will be called ite-cir. | 149 | module will be called ite-cir. |
150 | 150 | ||
151 | config IR_FINTEK | ||
152 | tristate "Fintek Consumer Infrared Transceiver" | ||
153 | depends on PNP | ||
154 | depends on RC_CORE | ||
155 | ---help--- | ||
156 | Say Y here to enable support for integrated infrared receiver | ||
157 | /transciever made by Fintek. This chip is found on assorted | ||
158 | Jetway motherboards (and of course, possibly others). | ||
159 | |||
160 | To compile this driver as a module, choose M here: the | ||
161 | module will be called fintek-cir. | ||
162 | |||
151 | config IR_NUVOTON | 163 | config IR_NUVOTON |
152 | tristate "Nuvoton w836x7hg Consumer Infrared Transceiver" | 164 | tristate "Nuvoton w836x7hg Consumer Infrared Transceiver" |
153 | depends on PNP | 165 | depends on PNP |
diff --git a/drivers/media/rc/Makefile b/drivers/media/rc/Makefile index 1f90a219a162..52830e5f4eaa 100644 --- a/drivers/media/rc/Makefile +++ b/drivers/media/rc/Makefile | |||
@@ -16,6 +16,7 @@ obj-$(CONFIG_IR_LIRC_CODEC) += ir-lirc-codec.o | |||
16 | obj-$(CONFIG_IR_IMON) += imon.o | 16 | obj-$(CONFIG_IR_IMON) += imon.o |
17 | obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o | 17 | obj-$(CONFIG_IR_ITE_CIR) += ite-cir.o |
18 | obj-$(CONFIG_IR_MCEUSB) += mceusb.o | 18 | obj-$(CONFIG_IR_MCEUSB) += mceusb.o |
19 | obj-$(CONFIG_IR_FINTEK) += fintek-cir.o | ||
19 | obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o | 20 | obj-$(CONFIG_IR_NUVOTON) += nuvoton-cir.o |
20 | obj-$(CONFIG_IR_ENE) += ene_ir.o | 21 | obj-$(CONFIG_IR_ENE) += ene_ir.o |
21 | obj-$(CONFIG_IR_REDRAT3) += redrat3.o | 22 | obj-$(CONFIG_IR_REDRAT3) += redrat3.o |
diff --git a/drivers/media/rc/fintek-cir.c b/drivers/media/rc/fintek-cir.c new file mode 100644 index 000000000000..8fa539dde1b4 --- /dev/null +++ b/drivers/media/rc/fintek-cir.c | |||
@@ -0,0 +1,684 @@ | |||
1 | /* | ||
2 | * Driver for Feature Integration Technology Inc. (aka Fintek) LPC CIR | ||
3 | * | ||
4 | * Copyright (C) 2011 Jarod Wilson <jarod@redhat.com> | ||
5 | * | ||
6 | * Special thanks to Fintek for providing hardware and spec sheets. | ||
7 | * This driver is based upon the nuvoton, ite and ene drivers for | ||
8 | * similar hardware. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License as | ||
12 | * published by the Free Software Foundation; either version 2 of the | ||
13 | * License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
23 | * USA | ||
24 | */ | ||
25 | |||
26 | #include <linux/kernel.h> | ||
27 | #include <linux/module.h> | ||
28 | #include <linux/pnp.h> | ||
29 | #include <linux/io.h> | ||
30 | #include <linux/interrupt.h> | ||
31 | #include <linux/sched.h> | ||
32 | #include <linux/slab.h> | ||
33 | #include <media/rc-core.h> | ||
34 | #include <linux/pci_ids.h> | ||
35 | |||
36 | #include "fintek-cir.h" | ||
37 | |||
38 | /* write val to config reg */ | ||
39 | static inline void fintek_cr_write(struct fintek_dev *fintek, u8 val, u8 reg) | ||
40 | { | ||
41 | fit_dbg("%s: reg 0x%02x, val 0x%02x (ip/dp: %02x/%02x)", | ||
42 | __func__, reg, val, fintek->cr_ip, fintek->cr_dp); | ||
43 | outb(reg, fintek->cr_ip); | ||
44 | outb(val, fintek->cr_dp); | ||
45 | } | ||
46 | |||
47 | /* read val from config reg */ | ||
48 | static inline u8 fintek_cr_read(struct fintek_dev *fintek, u8 reg) | ||
49 | { | ||
50 | u8 val; | ||
51 | |||
52 | outb(reg, fintek->cr_ip); | ||
53 | val = inb(fintek->cr_dp); | ||
54 | |||
55 | fit_dbg("%s: reg 0x%02x, val 0x%02x (ip/dp: %02x/%02x)", | ||
56 | __func__, reg, val, fintek->cr_ip, fintek->cr_dp); | ||
57 | return val; | ||
58 | } | ||
59 | |||
60 | /* update config register bit without changing other bits */ | ||
61 | static inline void fintek_set_reg_bit(struct fintek_dev *fintek, u8 val, u8 reg) | ||
62 | { | ||
63 | u8 tmp = fintek_cr_read(fintek, reg) | val; | ||
64 | fintek_cr_write(fintek, tmp, reg); | ||
65 | } | ||
66 | |||
67 | /* clear config register bit without changing other bits */ | ||
68 | static inline void fintek_clear_reg_bit(struct fintek_dev *fintek, u8 val, u8 reg) | ||
69 | { | ||
70 | u8 tmp = fintek_cr_read(fintek, reg) & ~val; | ||
71 | fintek_cr_write(fintek, tmp, reg); | ||
72 | } | ||
73 | |||
74 | /* enter config mode */ | ||
75 | static inline void fintek_config_mode_enable(struct fintek_dev *fintek) | ||
76 | { | ||
77 | /* Enabling Config Mode explicitly requires writing 2x */ | ||
78 | outb(CONFIG_REG_ENABLE, fintek->cr_ip); | ||
79 | outb(CONFIG_REG_ENABLE, fintek->cr_ip); | ||
80 | } | ||
81 | |||
82 | /* exit config mode */ | ||
83 | static inline void fintek_config_mode_disable(struct fintek_dev *fintek) | ||
84 | { | ||
85 | outb(CONFIG_REG_DISABLE, fintek->cr_ip); | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * When you want to address a specific logical device, write its logical | ||
90 | * device number to GCR_LOGICAL_DEV_NO | ||
91 | */ | ||
92 | static inline void fintek_select_logical_dev(struct fintek_dev *fintek, u8 ldev) | ||
93 | { | ||
94 | fintek_cr_write(fintek, ldev, GCR_LOGICAL_DEV_NO); | ||
95 | } | ||
96 | |||
97 | /* write val to cir config register */ | ||
98 | static inline void fintek_cir_reg_write(struct fintek_dev *fintek, u8 val, u8 offset) | ||
99 | { | ||
100 | outb(val, fintek->cir_addr + offset); | ||
101 | } | ||
102 | |||
103 | /* read val from cir config register */ | ||
104 | static u8 fintek_cir_reg_read(struct fintek_dev *fintek, u8 offset) | ||
105 | { | ||
106 | u8 val; | ||
107 | |||
108 | val = inb(fintek->cir_addr + offset); | ||
109 | |||
110 | return val; | ||
111 | } | ||
112 | |||
113 | #define pr_reg(text, ...) \ | ||
114 | printk(KERN_INFO KBUILD_MODNAME ": " text, ## __VA_ARGS__) | ||
115 | |||
116 | /* dump current cir register contents */ | ||
117 | static void cir_dump_regs(struct fintek_dev *fintek) | ||
118 | { | ||
119 | fintek_config_mode_enable(fintek); | ||
120 | fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR); | ||
121 | |||
122 | pr_reg("%s: Dump CIR logical device registers:\n", FINTEK_DRIVER_NAME); | ||
123 | pr_reg(" * CR CIR BASE ADDR: 0x%x\n", | ||
124 | (fintek_cr_read(fintek, CIR_CR_BASE_ADDR_HI) << 8) | | ||
125 | fintek_cr_read(fintek, CIR_CR_BASE_ADDR_LO)); | ||
126 | pr_reg(" * CR CIR IRQ NUM: 0x%x\n", | ||
127 | fintek_cr_read(fintek, CIR_CR_IRQ_SEL)); | ||
128 | |||
129 | fintek_config_mode_disable(fintek); | ||
130 | |||
131 | pr_reg("%s: Dump CIR registers:\n", FINTEK_DRIVER_NAME); | ||
132 | pr_reg(" * STATUS: 0x%x\n", fintek_cir_reg_read(fintek, CIR_STATUS)); | ||
133 | pr_reg(" * CONTROL: 0x%x\n", fintek_cir_reg_read(fintek, CIR_CONTROL)); | ||
134 | pr_reg(" * RX_DATA: 0x%x\n", fintek_cir_reg_read(fintek, CIR_RX_DATA)); | ||
135 | pr_reg(" * TX_CONTROL: 0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_CONTROL)); | ||
136 | pr_reg(" * TX_DATA: 0x%x\n", fintek_cir_reg_read(fintek, CIR_TX_DATA)); | ||
137 | } | ||
138 | |||
139 | /* detect hardware features */ | ||
140 | static int fintek_hw_detect(struct fintek_dev *fintek) | ||
141 | { | ||
142 | unsigned long flags; | ||
143 | u8 chip_major, chip_minor; | ||
144 | u8 vendor_major, vendor_minor; | ||
145 | u8 portsel, ir_class; | ||
146 | u16 vendor; | ||
147 | int ret = 0; | ||
148 | |||
149 | fintek_config_mode_enable(fintek); | ||
150 | |||
151 | /* Check if we're using config port 0x4e or 0x2e */ | ||
152 | portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL); | ||
153 | if (portsel == 0xff) { | ||
154 | fit_pr(KERN_INFO, "first portsel read was bunk, trying alt"); | ||
155 | fintek_config_mode_disable(fintek); | ||
156 | fintek->cr_ip = CR_INDEX_PORT2; | ||
157 | fintek->cr_dp = CR_DATA_PORT2; | ||
158 | fintek_config_mode_enable(fintek); | ||
159 | portsel = fintek_cr_read(fintek, GCR_CONFIG_PORT_SEL); | ||
160 | } | ||
161 | fit_dbg("portsel reg: 0x%02x", portsel); | ||
162 | |||
163 | ir_class = fintek_cir_reg_read(fintek, CIR_CR_CLASS); | ||
164 | fit_dbg("ir_class reg: 0x%02x", ir_class); | ||
165 | |||
166 | switch (ir_class) { | ||
167 | case CLASS_RX_2TX: | ||
168 | case CLASS_RX_1TX: | ||
169 | fintek->hw_tx_capable = true; | ||
170 | break; | ||
171 | case CLASS_RX_ONLY: | ||
172 | default: | ||
173 | fintek->hw_tx_capable = false; | ||
174 | break; | ||
175 | } | ||
176 | |||
177 | chip_major = fintek_cr_read(fintek, GCR_CHIP_ID_HI); | ||
178 | chip_minor = fintek_cr_read(fintek, GCR_CHIP_ID_LO); | ||
179 | |||
180 | vendor_major = fintek_cr_read(fintek, GCR_VENDOR_ID_HI); | ||
181 | vendor_minor = fintek_cr_read(fintek, GCR_VENDOR_ID_LO); | ||
182 | vendor = vendor_major << 8 | vendor_minor; | ||
183 | |||
184 | if (vendor != VENDOR_ID_FINTEK) | ||
185 | fit_pr(KERN_WARNING, "Unknown vendor ID: 0x%04x", vendor); | ||
186 | else | ||
187 | fit_dbg("Read Fintek vendor ID from chip"); | ||
188 | |||
189 | fintek_config_mode_disable(fintek); | ||
190 | |||
191 | spin_lock_irqsave(&fintek->fintek_lock, flags); | ||
192 | fintek->chip_major = chip_major; | ||
193 | fintek->chip_minor = chip_minor; | ||
194 | fintek->chip_vendor = vendor; | ||
195 | spin_unlock_irqrestore(&fintek->fintek_lock, flags); | ||
196 | |||
197 | return ret; | ||
198 | } | ||
199 | |||
200 | static void fintek_cir_ldev_init(struct fintek_dev *fintek) | ||
201 | { | ||
202 | /* Select CIR logical device and enable */ | ||
203 | fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR); | ||
204 | fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN); | ||
205 | |||
206 | /* Write allocated CIR address and IRQ information to hardware */ | ||
207 | fintek_cr_write(fintek, fintek->cir_addr >> 8, CIR_CR_BASE_ADDR_HI); | ||
208 | fintek_cr_write(fintek, fintek->cir_addr & 0xff, CIR_CR_BASE_ADDR_LO); | ||
209 | |||
210 | fintek_cr_write(fintek, fintek->cir_irq, CIR_CR_IRQ_SEL); | ||
211 | |||
212 | fit_dbg("CIR initialized, base io address: 0x%lx, irq: %d (len: %d)", | ||
213 | fintek->cir_addr, fintek->cir_irq, fintek->cir_port_len); | ||
214 | } | ||
215 | |||
216 | /* enable CIR interrupts */ | ||
217 | static void fintek_enable_cir_irq(struct fintek_dev *fintek) | ||
218 | { | ||
219 | fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS); | ||
220 | } | ||
221 | |||
222 | static void fintek_cir_regs_init(struct fintek_dev *fintek) | ||
223 | { | ||
224 | /* clear any and all stray interrupts */ | ||
225 | fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS); | ||
226 | |||
227 | /* and finally, enable interrupts */ | ||
228 | fintek_enable_cir_irq(fintek); | ||
229 | } | ||
230 | |||
231 | static void fintek_enable_wake(struct fintek_dev *fintek) | ||
232 | { | ||
233 | fintek_config_mode_enable(fintek); | ||
234 | fintek_select_logical_dev(fintek, LOGICAL_DEV_ACPI); | ||
235 | |||
236 | /* Allow CIR PME's to wake system */ | ||
237 | fintek_set_reg_bit(fintek, ACPI_WAKE_EN_CIR_BIT, LDEV_ACPI_WAKE_EN_REG); | ||
238 | /* Enable CIR PME's */ | ||
239 | fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_EN_REG); | ||
240 | /* Clear CIR PME status register */ | ||
241 | fintek_set_reg_bit(fintek, ACPI_PME_CIR_BIT, LDEV_ACPI_PME_CLR_REG); | ||
242 | /* Save state */ | ||
243 | fintek_set_reg_bit(fintek, ACPI_STATE_CIR_BIT, LDEV_ACPI_STATE_REG); | ||
244 | |||
245 | fintek_config_mode_disable(fintek); | ||
246 | } | ||
247 | |||
248 | static int fintek_cmdsize(u8 cmd, u8 subcmd) | ||
249 | { | ||
250 | int datasize = 0; | ||
251 | |||
252 | switch (cmd) { | ||
253 | case BUF_COMMAND_NULL: | ||
254 | if (subcmd == BUF_HW_CMD_HEADER) | ||
255 | datasize = 1; | ||
256 | break; | ||
257 | case BUF_HW_CMD_HEADER: | ||
258 | if (subcmd == BUF_CMD_G_REVISION) | ||
259 | datasize = 2; | ||
260 | break; | ||
261 | case BUF_COMMAND_HEADER: | ||
262 | switch (subcmd) { | ||
263 | case BUF_CMD_S_CARRIER: | ||
264 | case BUF_CMD_S_TIMEOUT: | ||
265 | case BUF_RSP_PULSE_COUNT: | ||
266 | datasize = 2; | ||
267 | break; | ||
268 | case BUF_CMD_SIG_END: | ||
269 | case BUF_CMD_S_TXMASK: | ||
270 | case BUF_CMD_S_RXSENSOR: | ||
271 | datasize = 1; | ||
272 | break; | ||
273 | } | ||
274 | } | ||
275 | |||
276 | return datasize; | ||
277 | } | ||
278 | |||
279 | /* process ir data stored in driver buffer */ | ||
280 | static void fintek_process_rx_ir_data(struct fintek_dev *fintek) | ||
281 | { | ||
282 | DEFINE_IR_RAW_EVENT(rawir); | ||
283 | u8 sample; | ||
284 | int i; | ||
285 | |||
286 | for (i = 0; i < fintek->pkts; i++) { | ||
287 | sample = fintek->buf[i]; | ||
288 | switch (fintek->parser_state) { | ||
289 | case CMD_HEADER: | ||
290 | fintek->cmd = sample; | ||
291 | if ((fintek->cmd == BUF_COMMAND_HEADER) || | ||
292 | ((fintek->cmd & BUF_COMMAND_MASK) != | ||
293 | BUF_PULSE_BIT)) { | ||
294 | fintek->parser_state = SUBCMD; | ||
295 | continue; | ||
296 | } | ||
297 | fintek->rem = (fintek->cmd & BUF_LEN_MASK); | ||
298 | fit_dbg("%s: rem: 0x%02x", __func__, fintek->rem); | ||
299 | if (fintek->rem) | ||
300 | fintek->parser_state = PARSE_IRDATA; | ||
301 | else | ||
302 | ir_raw_event_reset(fintek->rdev); | ||
303 | break; | ||
304 | case SUBCMD: | ||
305 | fintek->rem = fintek_cmdsize(fintek->cmd, sample); | ||
306 | fintek->parser_state = CMD_DATA; | ||
307 | break; | ||
308 | case CMD_DATA: | ||
309 | fintek->rem--; | ||
310 | break; | ||
311 | case PARSE_IRDATA: | ||
312 | fintek->rem--; | ||
313 | init_ir_raw_event(&rawir); | ||
314 | rawir.pulse = ((sample & BUF_PULSE_BIT) != 0); | ||
315 | rawir.duration = US_TO_NS((sample & BUF_SAMPLE_MASK) | ||
316 | * CIR_SAMPLE_PERIOD); | ||
317 | |||
318 | fit_dbg("Storing %s with duration %d", | ||
319 | rawir.pulse ? "pulse" : "space", | ||
320 | rawir.duration); | ||
321 | ir_raw_event_store_with_filter(fintek->rdev, &rawir); | ||
322 | break; | ||
323 | } | ||
324 | |||
325 | if ((fintek->parser_state != CMD_HEADER) && !fintek->rem) | ||
326 | fintek->parser_state = CMD_HEADER; | ||
327 | } | ||
328 | |||
329 | fintek->pkts = 0; | ||
330 | |||
331 | fit_dbg("Calling ir_raw_event_handle"); | ||
332 | ir_raw_event_handle(fintek->rdev); | ||
333 | } | ||
334 | |||
335 | /* copy data from hardware rx register into driver buffer */ | ||
336 | static void fintek_get_rx_ir_data(struct fintek_dev *fintek, u8 rx_irqs) | ||
337 | { | ||
338 | unsigned long flags; | ||
339 | u8 sample, status; | ||
340 | |||
341 | spin_lock_irqsave(&fintek->fintek_lock, flags); | ||
342 | |||
343 | /* | ||
344 | * We must read data from CIR_RX_DATA until the hardware IR buffer | ||
345 | * is empty and clears the RX_TIMEOUT and/or RX_RECEIVE flags in | ||
346 | * the CIR_STATUS register | ||
347 | */ | ||
348 | do { | ||
349 | sample = fintek_cir_reg_read(fintek, CIR_RX_DATA); | ||
350 | fit_dbg("%s: sample: 0x%02x", __func__, sample); | ||
351 | |||
352 | fintek->buf[fintek->pkts] = sample; | ||
353 | fintek->pkts++; | ||
354 | |||
355 | status = fintek_cir_reg_read(fintek, CIR_STATUS); | ||
356 | if (!(status & CIR_STATUS_IRQ_EN)) | ||
357 | break; | ||
358 | } while (status & rx_irqs); | ||
359 | |||
360 | fintek_process_rx_ir_data(fintek); | ||
361 | |||
362 | spin_unlock_irqrestore(&fintek->fintek_lock, flags); | ||
363 | } | ||
364 | |||
365 | static void fintek_cir_log_irqs(u8 status) | ||
366 | { | ||
367 | fit_pr(KERN_INFO, "IRQ 0x%02x:%s%s%s%s%s", status, | ||
368 | status & CIR_STATUS_IRQ_EN ? " IRQEN" : "", | ||
369 | status & CIR_STATUS_TX_FINISH ? " TXF" : "", | ||
370 | status & CIR_STATUS_TX_UNDERRUN ? " TXU" : "", | ||
371 | status & CIR_STATUS_RX_TIMEOUT ? " RXTO" : "", | ||
372 | status & CIR_STATUS_RX_RECEIVE ? " RXOK" : ""); | ||
373 | } | ||
374 | |||
375 | /* interrupt service routine for incoming and outgoing CIR data */ | ||
376 | static irqreturn_t fintek_cir_isr(int irq, void *data) | ||
377 | { | ||
378 | struct fintek_dev *fintek = data; | ||
379 | u8 status, rx_irqs; | ||
380 | |||
381 | fit_dbg_verbose("%s firing", __func__); | ||
382 | |||
383 | fintek_config_mode_enable(fintek); | ||
384 | fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR); | ||
385 | fintek_config_mode_disable(fintek); | ||
386 | |||
387 | /* | ||
388 | * Get IR Status register contents. Write 1 to ack/clear | ||
389 | * | ||
390 | * bit: reg name - description | ||
391 | * 3: TX_FINISH - TX is finished | ||
392 | * 2: TX_UNDERRUN - TX underrun | ||
393 | * 1: RX_TIMEOUT - RX data timeout | ||
394 | * 0: RX_RECEIVE - RX data received | ||
395 | */ | ||
396 | status = fintek_cir_reg_read(fintek, CIR_STATUS); | ||
397 | if (!(status & CIR_STATUS_IRQ_MASK) || status == 0xff) { | ||
398 | fit_dbg_verbose("%s exiting, IRSTS 0x%02x", __func__, status); | ||
399 | fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS); | ||
400 | return IRQ_RETVAL(IRQ_NONE); | ||
401 | } | ||
402 | |||
403 | if (debug) | ||
404 | fintek_cir_log_irqs(status); | ||
405 | |||
406 | rx_irqs = status & (CIR_STATUS_RX_RECEIVE | CIR_STATUS_RX_TIMEOUT); | ||
407 | if (rx_irqs) | ||
408 | fintek_get_rx_ir_data(fintek, rx_irqs); | ||
409 | |||
410 | /* ack/clear all irq flags we've got */ | ||
411 | fintek_cir_reg_write(fintek, status, CIR_STATUS); | ||
412 | |||
413 | fit_dbg_verbose("%s done", __func__); | ||
414 | return IRQ_RETVAL(IRQ_HANDLED); | ||
415 | } | ||
416 | |||
417 | static void fintek_enable_cir(struct fintek_dev *fintek) | ||
418 | { | ||
419 | /* set IRQ enabled */ | ||
420 | fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_EN, CIR_STATUS); | ||
421 | |||
422 | fintek_config_mode_enable(fintek); | ||
423 | |||
424 | /* enable the CIR logical device */ | ||
425 | fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR); | ||
426 | fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN); | ||
427 | |||
428 | fintek_config_mode_disable(fintek); | ||
429 | |||
430 | /* clear all pending interrupts */ | ||
431 | fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS); | ||
432 | |||
433 | /* enable interrupts */ | ||
434 | fintek_enable_cir_irq(fintek); | ||
435 | } | ||
436 | |||
437 | static void fintek_disable_cir(struct fintek_dev *fintek) | ||
438 | { | ||
439 | fintek_config_mode_enable(fintek); | ||
440 | |||
441 | /* disable the CIR logical device */ | ||
442 | fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR); | ||
443 | fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN); | ||
444 | |||
445 | fintek_config_mode_disable(fintek); | ||
446 | } | ||
447 | |||
448 | static int fintek_open(struct rc_dev *dev) | ||
449 | { | ||
450 | struct fintek_dev *fintek = dev->priv; | ||
451 | unsigned long flags; | ||
452 | |||
453 | spin_lock_irqsave(&fintek->fintek_lock, flags); | ||
454 | fintek_enable_cir(fintek); | ||
455 | spin_unlock_irqrestore(&fintek->fintek_lock, flags); | ||
456 | |||
457 | return 0; | ||
458 | } | ||
459 | |||
460 | static void fintek_close(struct rc_dev *dev) | ||
461 | { | ||
462 | struct fintek_dev *fintek = dev->priv; | ||
463 | unsigned long flags; | ||
464 | |||
465 | spin_lock_irqsave(&fintek->fintek_lock, flags); | ||
466 | fintek_disable_cir(fintek); | ||
467 | spin_unlock_irqrestore(&fintek->fintek_lock, flags); | ||
468 | } | ||
469 | |||
470 | /* Allocate memory, probe hardware, and initialize everything */ | ||
471 | static int fintek_probe(struct pnp_dev *pdev, const struct pnp_device_id *dev_id) | ||
472 | { | ||
473 | struct fintek_dev *fintek; | ||
474 | struct rc_dev *rdev; | ||
475 | int ret = -ENOMEM; | ||
476 | |||
477 | fintek = kzalloc(sizeof(struct fintek_dev), GFP_KERNEL); | ||
478 | if (!fintek) | ||
479 | return ret; | ||
480 | |||
481 | /* input device for IR remote (and tx) */ | ||
482 | rdev = rc_allocate_device(); | ||
483 | if (!rdev) | ||
484 | goto failure; | ||
485 | |||
486 | ret = -ENODEV; | ||
487 | /* validate pnp resources */ | ||
488 | if (!pnp_port_valid(pdev, 0)) { | ||
489 | dev_err(&pdev->dev, "IR PNP Port not valid!\n"); | ||
490 | goto failure; | ||
491 | } | ||
492 | |||
493 | if (!pnp_irq_valid(pdev, 0)) { | ||
494 | dev_err(&pdev->dev, "IR PNP IRQ not valid!\n"); | ||
495 | goto failure; | ||
496 | } | ||
497 | |||
498 | fintek->cir_addr = pnp_port_start(pdev, 0); | ||
499 | fintek->cir_irq = pnp_irq(pdev, 0); | ||
500 | fintek->cir_port_len = pnp_port_len(pdev, 0); | ||
501 | |||
502 | fintek->cr_ip = CR_INDEX_PORT; | ||
503 | fintek->cr_dp = CR_DATA_PORT; | ||
504 | |||
505 | spin_lock_init(&fintek->fintek_lock); | ||
506 | |||
507 | ret = -EBUSY; | ||
508 | /* now claim resources */ | ||
509 | if (!request_region(fintek->cir_addr, | ||
510 | fintek->cir_port_len, FINTEK_DRIVER_NAME)) | ||
511 | goto failure; | ||
512 | |||
513 | if (request_irq(fintek->cir_irq, fintek_cir_isr, IRQF_SHARED, | ||
514 | FINTEK_DRIVER_NAME, (void *)fintek)) | ||
515 | goto failure; | ||
516 | |||
517 | pnp_set_drvdata(pdev, fintek); | ||
518 | fintek->pdev = pdev; | ||
519 | |||
520 | ret = fintek_hw_detect(fintek); | ||
521 | if (ret) | ||
522 | goto failure; | ||
523 | |||
524 | /* Initialize CIR & CIR Wake Logical Devices */ | ||
525 | fintek_config_mode_enable(fintek); | ||
526 | fintek_cir_ldev_init(fintek); | ||
527 | fintek_config_mode_disable(fintek); | ||
528 | |||
529 | /* Initialize CIR & CIR Wake Config Registers */ | ||
530 | fintek_cir_regs_init(fintek); | ||
531 | |||
532 | /* Set up the rc device */ | ||
533 | rdev->priv = fintek; | ||
534 | rdev->driver_type = RC_DRIVER_IR_RAW; | ||
535 | rdev->allowed_protos = RC_TYPE_ALL; | ||
536 | rdev->open = fintek_open; | ||
537 | rdev->close = fintek_close; | ||
538 | rdev->input_name = FINTEK_DESCRIPTION; | ||
539 | rdev->input_phys = "fintek/cir0"; | ||
540 | rdev->input_id.bustype = BUS_HOST; | ||
541 | rdev->input_id.vendor = VENDOR_ID_FINTEK; | ||
542 | rdev->input_id.product = fintek->chip_major; | ||
543 | rdev->input_id.version = fintek->chip_minor; | ||
544 | rdev->dev.parent = &pdev->dev; | ||
545 | rdev->driver_name = FINTEK_DRIVER_NAME; | ||
546 | rdev->map_name = RC_MAP_RC6_MCE; | ||
547 | rdev->timeout = US_TO_NS(1000); | ||
548 | /* rx resolution is hardwired to 50us atm, 1, 25, 100 also possible */ | ||
549 | rdev->rx_resolution = US_TO_NS(CIR_SAMPLE_PERIOD); | ||
550 | |||
551 | ret = rc_register_device(rdev); | ||
552 | if (ret) | ||
553 | goto failure; | ||
554 | |||
555 | device_init_wakeup(&pdev->dev, true); | ||
556 | fintek->rdev = rdev; | ||
557 | fit_pr(KERN_NOTICE, "driver has been successfully loaded\n"); | ||
558 | if (debug) | ||
559 | cir_dump_regs(fintek); | ||
560 | |||
561 | return 0; | ||
562 | |||
563 | failure: | ||
564 | if (fintek->cir_irq) | ||
565 | free_irq(fintek->cir_irq, fintek); | ||
566 | if (fintek->cir_addr) | ||
567 | release_region(fintek->cir_addr, fintek->cir_port_len); | ||
568 | |||
569 | rc_free_device(rdev); | ||
570 | kfree(fintek); | ||
571 | |||
572 | return ret; | ||
573 | } | ||
574 | |||
575 | static void __devexit fintek_remove(struct pnp_dev *pdev) | ||
576 | { | ||
577 | struct fintek_dev *fintek = pnp_get_drvdata(pdev); | ||
578 | unsigned long flags; | ||
579 | |||
580 | spin_lock_irqsave(&fintek->fintek_lock, flags); | ||
581 | /* disable CIR */ | ||
582 | fintek_disable_cir(fintek); | ||
583 | fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS); | ||
584 | /* enable CIR Wake (for IR power-on) */ | ||
585 | fintek_enable_wake(fintek); | ||
586 | spin_unlock_irqrestore(&fintek->fintek_lock, flags); | ||
587 | |||
588 | /* free resources */ | ||
589 | free_irq(fintek->cir_irq, fintek); | ||
590 | release_region(fintek->cir_addr, fintek->cir_port_len); | ||
591 | |||
592 | rc_unregister_device(fintek->rdev); | ||
593 | |||
594 | kfree(fintek); | ||
595 | } | ||
596 | |||
597 | static int fintek_suspend(struct pnp_dev *pdev, pm_message_t state) | ||
598 | { | ||
599 | struct fintek_dev *fintek = pnp_get_drvdata(pdev); | ||
600 | |||
601 | fit_dbg("%s called", __func__); | ||
602 | |||
603 | /* disable all CIR interrupts */ | ||
604 | fintek_cir_reg_write(fintek, CIR_STATUS_IRQ_MASK, CIR_STATUS); | ||
605 | |||
606 | fintek_config_mode_enable(fintek); | ||
607 | |||
608 | /* disable cir logical dev */ | ||
609 | fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR); | ||
610 | fintek_cr_write(fintek, LOGICAL_DEV_DISABLE, CIR_CR_DEV_EN); | ||
611 | |||
612 | fintek_config_mode_disable(fintek); | ||
613 | |||
614 | /* make sure wake is enabled */ | ||
615 | fintek_enable_wake(fintek); | ||
616 | |||
617 | return 0; | ||
618 | } | ||
619 | |||
620 | static int fintek_resume(struct pnp_dev *pdev) | ||
621 | { | ||
622 | int ret = 0; | ||
623 | struct fintek_dev *fintek = pnp_get_drvdata(pdev); | ||
624 | |||
625 | fit_dbg("%s called", __func__); | ||
626 | |||
627 | /* open interrupt */ | ||
628 | fintek_enable_cir_irq(fintek); | ||
629 | |||
630 | /* Enable CIR logical device */ | ||
631 | fintek_config_mode_enable(fintek); | ||
632 | fintek_select_logical_dev(fintek, LOGICAL_DEV_CIR); | ||
633 | fintek_cr_write(fintek, LOGICAL_DEV_ENABLE, CIR_CR_DEV_EN); | ||
634 | |||
635 | fintek_config_mode_disable(fintek); | ||
636 | |||
637 | fintek_cir_regs_init(fintek); | ||
638 | |||
639 | return ret; | ||
640 | } | ||
641 | |||
642 | static void fintek_shutdown(struct pnp_dev *pdev) | ||
643 | { | ||
644 | struct fintek_dev *fintek = pnp_get_drvdata(pdev); | ||
645 | fintek_enable_wake(fintek); | ||
646 | } | ||
647 | |||
648 | static const struct pnp_device_id fintek_ids[] = { | ||
649 | { "FIT0002", 0 }, /* CIR */ | ||
650 | { "", 0 }, | ||
651 | }; | ||
652 | |||
653 | static struct pnp_driver fintek_driver = { | ||
654 | .name = FINTEK_DRIVER_NAME, | ||
655 | .id_table = fintek_ids, | ||
656 | .flags = PNP_DRIVER_RES_DO_NOT_CHANGE, | ||
657 | .probe = fintek_probe, | ||
658 | .remove = __devexit_p(fintek_remove), | ||
659 | .suspend = fintek_suspend, | ||
660 | .resume = fintek_resume, | ||
661 | .shutdown = fintek_shutdown, | ||
662 | }; | ||
663 | |||
664 | int fintek_init(void) | ||
665 | { | ||
666 | return pnp_register_driver(&fintek_driver); | ||
667 | } | ||
668 | |||
669 | void fintek_exit(void) | ||
670 | { | ||
671 | pnp_unregister_driver(&fintek_driver); | ||
672 | } | ||
673 | |||
674 | module_param(debug, int, S_IRUGO | S_IWUSR); | ||
675 | MODULE_PARM_DESC(debug, "Enable debugging output"); | ||
676 | |||
677 | MODULE_DEVICE_TABLE(pnp, fintek_ids); | ||
678 | MODULE_DESCRIPTION(FINTEK_DESCRIPTION " driver"); | ||
679 | |||
680 | MODULE_AUTHOR("Jarod Wilson <jarod@redhat.com>"); | ||
681 | MODULE_LICENSE("GPL"); | ||
682 | |||
683 | module_init(fintek_init); | ||
684 | module_exit(fintek_exit); | ||
diff --git a/drivers/media/rc/fintek-cir.h b/drivers/media/rc/fintek-cir.h new file mode 100644 index 000000000000..1b10b2011f5e --- /dev/null +++ b/drivers/media/rc/fintek-cir.h | |||
@@ -0,0 +1,243 @@ | |||
1 | /* | ||
2 | * Driver for Feature Integration Technology Inc. (aka Fintek) LPC CIR | ||
3 | * | ||
4 | * Copyright (C) 2011 Jarod Wilson <jarod@redhat.com> | ||
5 | * | ||
6 | * Special thanks to Fintek for providing hardware and spec sheets. | ||
7 | * This driver is based upon the nuvoton, ite and ene drivers for | ||
8 | * similar hardware. | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or | ||
11 | * modify it under the terms of the GNU General Public License as | ||
12 | * published by the Free Software Foundation; either version 2 of the | ||
13 | * License, or (at your option) any later version. | ||
14 | * | ||
15 | * This program is distributed in the hope that it will be useful, but | ||
16 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
18 | * General Public License for more details. | ||
19 | * | ||
20 | * You should have received a copy of the GNU General Public License | ||
21 | * along with this program; if not, write to the Free Software | ||
22 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 | ||
23 | * USA | ||
24 | */ | ||
25 | |||
26 | #include <linux/spinlock.h> | ||
27 | #include <linux/ioctl.h> | ||
28 | |||
29 | /* platform driver name to register */ | ||
30 | #define FINTEK_DRIVER_NAME "fintek-cir" | ||
31 | #define FINTEK_DESCRIPTION "Fintek LPC SuperIO Consumer IR Transceiver" | ||
32 | #define VENDOR_ID_FINTEK 0x1934 | ||
33 | |||
34 | |||
35 | /* debugging module parameter */ | ||
36 | static int debug; | ||
37 | |||
38 | #define fit_pr(level, text, ...) \ | ||
39 | printk(level KBUILD_MODNAME ": " text, ## __VA_ARGS__) | ||
40 | |||
41 | #define fit_dbg(text, ...) \ | ||
42 | if (debug) \ | ||
43 | printk(KERN_DEBUG \ | ||
44 | KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__) | ||
45 | |||
46 | #define fit_dbg_verbose(text, ...) \ | ||
47 | if (debug > 1) \ | ||
48 | printk(KERN_DEBUG \ | ||
49 | KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__) | ||
50 | |||
51 | #define fit_dbg_wake(text, ...) \ | ||
52 | if (debug > 2) \ | ||
53 | printk(KERN_DEBUG \ | ||
54 | KBUILD_MODNAME ": " text "\n" , ## __VA_ARGS__) | ||
55 | |||
56 | |||
57 | #define TX_BUF_LEN 256 | ||
58 | #define RX_BUF_LEN 32 | ||
59 | |||
60 | struct fintek_dev { | ||
61 | struct pnp_dev *pdev; | ||
62 | struct rc_dev *rdev; | ||
63 | |||
64 | spinlock_t fintek_lock; | ||
65 | |||
66 | /* for rx */ | ||
67 | u8 buf[RX_BUF_LEN]; | ||
68 | unsigned int pkts; | ||
69 | |||
70 | struct { | ||
71 | spinlock_t lock; | ||
72 | u8 buf[TX_BUF_LEN]; | ||
73 | unsigned int buf_count; | ||
74 | unsigned int cur_buf_num; | ||
75 | wait_queue_head_t queue; | ||
76 | } tx; | ||
77 | |||
78 | /* Config register index/data port pair */ | ||
79 | u8 cr_ip; | ||
80 | u8 cr_dp; | ||
81 | |||
82 | /* hardware I/O settings */ | ||
83 | unsigned long cir_addr; | ||
84 | int cir_irq; | ||
85 | int cir_port_len; | ||
86 | |||
87 | /* hardware id */ | ||
88 | u8 chip_major; | ||
89 | u8 chip_minor; | ||
90 | u16 chip_vendor; | ||
91 | |||
92 | /* hardware features */ | ||
93 | bool hw_learning_capable; | ||
94 | bool hw_tx_capable; | ||
95 | |||
96 | /* rx settings */ | ||
97 | bool learning_enabled; | ||
98 | bool carrier_detect_enabled; | ||
99 | |||
100 | enum { | ||
101 | CMD_HEADER = 0, | ||
102 | SUBCMD, | ||
103 | CMD_DATA, | ||
104 | PARSE_IRDATA, | ||
105 | } parser_state; | ||
106 | |||
107 | u8 cmd, rem; | ||
108 | |||
109 | /* carrier period = 1 / frequency */ | ||
110 | u32 carrier; | ||
111 | }; | ||
112 | |||
113 | /* buffer packet constants, largely identical to mceusb.c */ | ||
114 | #define BUF_PULSE_BIT 0x80 | ||
115 | #define BUF_LEN_MASK 0x1f | ||
116 | #define BUF_SAMPLE_MASK 0x7f | ||
117 | |||
118 | #define BUF_COMMAND_HEADER 0x9f | ||
119 | #define BUF_COMMAND_MASK 0xe0 | ||
120 | #define BUF_COMMAND_NULL 0x00 | ||
121 | #define BUF_HW_CMD_HEADER 0xff | ||
122 | #define BUF_CMD_G_REVISION 0x0b | ||
123 | #define BUF_CMD_S_CARRIER 0x06 | ||
124 | #define BUF_CMD_S_TIMEOUT 0x0c | ||
125 | #define BUF_CMD_SIG_END 0x01 | ||
126 | #define BUF_CMD_S_TXMASK 0x08 | ||
127 | #define BUF_CMD_S_RXSENSOR 0x14 | ||
128 | #define BUF_RSP_PULSE_COUNT 0x15 | ||
129 | |||
130 | #define CIR_SAMPLE_PERIOD 50 | ||
131 | |||
132 | /* | ||
133 | * Configuration Register: | ||
134 | * Index Port | ||
135 | * Data Port | ||
136 | */ | ||
137 | #define CR_INDEX_PORT 0x2e | ||
138 | #define CR_DATA_PORT 0x2f | ||
139 | |||
140 | /* Possible alternate values, depends on how the chip is wired */ | ||
141 | #define CR_INDEX_PORT2 0x4e | ||
142 | #define CR_DATA_PORT2 0x4f | ||
143 | |||
144 | /* | ||
145 | * GCR_CONFIG_PORT_SEL bit 4 specifies which Index Port value is | ||
146 | * active. 1 = 0x4e, 0 = 0x2e | ||
147 | */ | ||
148 | #define PORT_SEL_PORT_4E_EN 0x10 | ||
149 | |||
150 | /* Extended Function Mode enable/disable magic values */ | ||
151 | #define CONFIG_REG_ENABLE 0x87 | ||
152 | #define CONFIG_REG_DISABLE 0xaa | ||
153 | |||
154 | /* Chip IDs found in CR_CHIP_ID_{HI,LO} */ | ||
155 | #define CHIP_ID_HIGH_F71809U 0x04 | ||
156 | #define CHIP_ID_LOW_F71809U 0x08 | ||
157 | |||
158 | /* | ||
159 | * Global control regs we need to care about: | ||
160 | * Global Control def. | ||
161 | * Register name addr val. */ | ||
162 | #define GCR_SOFTWARE_RESET 0x02 /* 0x00 */ | ||
163 | #define GCR_LOGICAL_DEV_NO 0x07 /* 0x00 */ | ||
164 | #define GCR_CHIP_ID_HI 0x20 /* 0x04 */ | ||
165 | #define GCR_CHIP_ID_LO 0x21 /* 0x08 */ | ||
166 | #define GCR_VENDOR_ID_HI 0x23 /* 0x19 */ | ||
167 | #define GCR_VENDOR_ID_LO 0x24 /* 0x34 */ | ||
168 | #define GCR_CONFIG_PORT_SEL 0x25 /* 0x01 */ | ||
169 | #define GCR_KBMOUSE_WAKEUP 0x27 | ||
170 | |||
171 | #define LOGICAL_DEV_DISABLE 0x00 | ||
172 | #define LOGICAL_DEV_ENABLE 0x01 | ||
173 | |||
174 | /* Logical device number of the CIR function */ | ||
175 | #define LOGICAL_DEV_CIR 0x05 | ||
176 | |||
177 | /* CIR Logical Device (LDN 0x08) config registers */ | ||
178 | #define CIR_CR_COMMAND_INDEX 0x04 | ||
179 | #define CIR_CR_IRCS 0x05 /* Before host writes command to IR, host | ||
180 | must set to 1. When host finshes write | ||
181 | command to IR, host must clear to 0. */ | ||
182 | #define CIR_CR_COMMAND_DATA 0x06 /* Host read or write comand data */ | ||
183 | #define CIR_CR_CLASS 0x07 /* 0xff = rx-only, 0x66 = rx + 2 tx, | ||
184 | 0x33 = rx + 1 tx */ | ||
185 | #define CIR_CR_DEV_EN 0x30 /* bit0 = 1 enables CIR */ | ||
186 | #define CIR_CR_BASE_ADDR_HI 0x60 /* MSB of CIR IO base addr */ | ||
187 | #define CIR_CR_BASE_ADDR_LO 0x61 /* LSB of CIR IO base addr */ | ||
188 | #define CIR_CR_IRQ_SEL 0x70 /* bits3-0 store CIR IRQ */ | ||
189 | #define CIR_CR_PSOUT_STATUS 0xf1 | ||
190 | #define CIR_CR_WAKE_KEY3_ADDR 0xf8 | ||
191 | #define CIR_CR_WAKE_KEY3_CODE 0xf9 | ||
192 | #define CIR_CR_WAKE_KEY3_DC 0xfa | ||
193 | #define CIR_CR_WAKE_CONTROL 0xfb | ||
194 | #define CIR_CR_WAKE_KEY12_ADDR 0xfc | ||
195 | #define CIR_CR_WAKE_KEY4_ADDR 0xfd | ||
196 | #define CIR_CR_WAKE_KEY5_ADDR 0xfe | ||
197 | |||
198 | #define CLASS_RX_ONLY 0xff | ||
199 | #define CLASS_RX_2TX 0x66 | ||
200 | #define CLASS_RX_1TX 0x33 | ||
201 | |||
202 | /* CIR device registers */ | ||
203 | #define CIR_STATUS 0x00 | ||
204 | #define CIR_RX_DATA 0x01 | ||
205 | #define CIR_TX_CONTROL 0x02 | ||
206 | #define CIR_TX_DATA 0x03 | ||
207 | #define CIR_CONTROL 0x04 | ||
208 | |||
209 | /* Bits to enable CIR wake */ | ||
210 | #define LOGICAL_DEV_ACPI 0x01 | ||
211 | #define LDEV_ACPI_WAKE_EN_REG 0xe8 | ||
212 | #define ACPI_WAKE_EN_CIR_BIT 0x04 | ||
213 | |||
214 | #define LDEV_ACPI_PME_EN_REG 0xf0 | ||
215 | #define LDEV_ACPI_PME_CLR_REG 0xf1 | ||
216 | #define ACPI_PME_CIR_BIT 0x02 | ||
217 | |||
218 | #define LDEV_ACPI_STATE_REG 0xf4 | ||
219 | #define ACPI_STATE_CIR_BIT 0x20 | ||
220 | |||
221 | /* | ||
222 | * CIR status register (0x00): | ||
223 | * 7 - CIR_IRQ_EN (1 = enable CIR IRQ, 0 = disable) | ||
224 | * 3 - TX_FINISH (1 when TX finished, write 1 to clear) | ||
225 | * 2 - TX_UNDERRUN (1 on TX underrun, write 1 to clear) | ||
226 | * 1 - RX_TIMEOUT (1 on RX timeout, write 1 to clear) | ||
227 | * 0 - RX_RECEIVE (1 on RX receive, write 1 to clear) | ||
228 | */ | ||
229 | #define CIR_STATUS_IRQ_EN 0x80 | ||
230 | #define CIR_STATUS_TX_FINISH 0x08 | ||
231 | #define CIR_STATUS_TX_UNDERRUN 0x04 | ||
232 | #define CIR_STATUS_RX_TIMEOUT 0x02 | ||
233 | #define CIR_STATUS_RX_RECEIVE 0x01 | ||
234 | #define CIR_STATUS_IRQ_MASK 0x0f | ||
235 | |||
236 | /* | ||
237 | * CIR TX control register (0x02): | ||
238 | * 7 - TX_START (1 to indicate TX start, auto-cleared when done) | ||
239 | * 6 - TX_END (1 to indicate TX data written to TX fifo) | ||
240 | */ | ||
241 | #define CIR_TX_CONTROL_TX_START 0x80 | ||
242 | #define CIR_TX_CONTROL_TX_END 0x40 | ||
243 | |||
diff --git a/drivers/media/rc/keymaps/rc-lme2510.c b/drivers/media/rc/keymaps/rc-lme2510.c index afae14fd152e..129d3f9a461d 100644 --- a/drivers/media/rc/keymaps/rc-lme2510.c +++ b/drivers/media/rc/keymaps/rc-lme2510.c | |||
@@ -14,81 +14,81 @@ | |||
14 | 14 | ||
15 | static struct rc_map_table lme2510_rc[] = { | 15 | static struct rc_map_table lme2510_rc[] = { |
16 | /* Type 1 - 26 buttons */ | 16 | /* Type 1 - 26 buttons */ |
17 | { 0xef12ba45, KEY_0 }, | 17 | { 0x10ed45, KEY_0 }, |
18 | { 0xef12a05f, KEY_1 }, | 18 | { 0x10ed5f, KEY_1 }, |
19 | { 0xef12af50, KEY_2 }, | 19 | { 0x10ed50, KEY_2 }, |
20 | { 0xef12a25d, KEY_3 }, | 20 | { 0x10ed5d, KEY_3 }, |
21 | { 0xef12be41, KEY_4 }, | 21 | { 0x10ed41, KEY_4 }, |
22 | { 0xef12f50a, KEY_5 }, | 22 | { 0x10ed0a, KEY_5 }, |
23 | { 0xef12bd42, KEY_6 }, | 23 | { 0x10ed42, KEY_6 }, |
24 | { 0xef12b847, KEY_7 }, | 24 | { 0x10ed47, KEY_7 }, |
25 | { 0xef12b649, KEY_8 }, | 25 | { 0x10ed49, KEY_8 }, |
26 | { 0xef12fa05, KEY_9 }, | 26 | { 0x10ed05, KEY_9 }, |
27 | { 0xef12bc43, KEY_POWER }, | 27 | { 0x10ed43, KEY_POWER }, |
28 | { 0xef12b946, KEY_SUBTITLE }, | 28 | { 0x10ed46, KEY_SUBTITLE }, |
29 | { 0xef12f906, KEY_PAUSE }, | 29 | { 0x10ed06, KEY_PAUSE }, |
30 | { 0xef12fc03, KEY_MEDIA_REPEAT}, | 30 | { 0x10ed03, KEY_MEDIA_REPEAT}, |
31 | { 0xef12fd02, KEY_PAUSE }, | 31 | { 0x10ed02, KEY_PAUSE }, |
32 | { 0xef12a15e, KEY_VOLUMEUP }, | 32 | { 0x10ed5e, KEY_VOLUMEUP }, |
33 | { 0xef12a35c, KEY_VOLUMEDOWN }, | 33 | { 0x10ed5c, KEY_VOLUMEDOWN }, |
34 | { 0xef12f609, KEY_CHANNELUP }, | 34 | { 0x10ed09, KEY_CHANNELUP }, |
35 | { 0xef12e51a, KEY_CHANNELDOWN }, | 35 | { 0x10ed1a, KEY_CHANNELDOWN }, |
36 | { 0xef12e11e, KEY_PLAY }, | 36 | { 0x10ed1e, KEY_PLAY }, |
37 | { 0xef12e41b, KEY_ZOOM }, | 37 | { 0x10ed1b, KEY_ZOOM }, |
38 | { 0xef12a659, KEY_MUTE }, | 38 | { 0x10ed59, KEY_MUTE }, |
39 | { 0xef12a55a, KEY_TV }, | 39 | { 0x10ed5a, KEY_TV }, |
40 | { 0xef12e718, KEY_RECORD }, | 40 | { 0x10ed18, KEY_RECORD }, |
41 | { 0xef12f807, KEY_EPG }, | 41 | { 0x10ed07, KEY_EPG }, |
42 | { 0xef12fe01, KEY_STOP }, | 42 | { 0x10ed01, KEY_STOP }, |
43 | /* Type 2 - 20 buttons */ | 43 | /* Type 2 - 20 buttons */ |
44 | { 0xff40ea15, KEY_0 }, | 44 | { 0xbf15, KEY_0 }, |
45 | { 0xff40f708, KEY_1 }, | 45 | { 0xbf08, KEY_1 }, |
46 | { 0xff40f609, KEY_2 }, | 46 | { 0xbf09, KEY_2 }, |
47 | { 0xff40f50a, KEY_3 }, | 47 | { 0xbf0a, KEY_3 }, |
48 | { 0xff40f30c, KEY_4 }, | 48 | { 0xbf0c, KEY_4 }, |
49 | { 0xff40f20d, KEY_5 }, | 49 | { 0xbf0d, KEY_5 }, |
50 | { 0xff40f10e, KEY_6 }, | 50 | { 0xbf0e, KEY_6 }, |
51 | { 0xff40ef10, KEY_7 }, | 51 | { 0xbf10, KEY_7 }, |
52 | { 0xff40ee11, KEY_8 }, | 52 | { 0xbf11, KEY_8 }, |
53 | { 0xff40ed12, KEY_9 }, | 53 | { 0xbf12, KEY_9 }, |
54 | { 0xff40ff00, KEY_POWER }, | 54 | { 0xbf00, KEY_POWER }, |
55 | { 0xff40fb04, KEY_MEDIA_REPEAT}, /* Recall */ | 55 | { 0xbf04, KEY_MEDIA_REPEAT}, /* Recall */ |
56 | { 0xff40e51a, KEY_PAUSE }, /* Timeshift */ | 56 | { 0xbf1a, KEY_PAUSE }, /* Timeshift */ |
57 | { 0xff40fd02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */ | 57 | { 0xbf02, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */ |
58 | { 0xff40f906, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/ | 58 | { 0xbf06, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/ |
59 | { 0xff40fe01, KEY_CHANNELUP }, | 59 | { 0xbf01, KEY_CHANNELUP }, |
60 | { 0xff40fa05, KEY_CHANNELDOWN }, | 60 | { 0xbf05, KEY_CHANNELDOWN }, |
61 | { 0xff40eb14, KEY_ZOOM }, | 61 | { 0xbf14, KEY_ZOOM }, |
62 | { 0xff40e718, KEY_RECORD }, | 62 | { 0xbf18, KEY_RECORD }, |
63 | { 0xff40e916, KEY_STOP }, | 63 | { 0xbf16, KEY_STOP }, |
64 | /* Type 3 - 20 buttons */ | 64 | /* Type 3 - 20 buttons */ |
65 | { 0xff00e31c, KEY_0 }, | 65 | { 0x1c, KEY_0 }, |
66 | { 0xff00f807, KEY_1 }, | 66 | { 0x07, KEY_1 }, |
67 | { 0xff00ea15, KEY_2 }, | 67 | { 0x15, KEY_2 }, |
68 | { 0xff00f609, KEY_3 }, | 68 | { 0x09, KEY_3 }, |
69 | { 0xff00e916, KEY_4 }, | 69 | { 0x16, KEY_4 }, |
70 | { 0xff00e619, KEY_5 }, | 70 | { 0x19, KEY_5 }, |
71 | { 0xff00f20d, KEY_6 }, | 71 | { 0x0d, KEY_6 }, |
72 | { 0xff00f30c, KEY_7 }, | 72 | { 0x0c, KEY_7 }, |
73 | { 0xff00e718, KEY_8 }, | 73 | { 0x18, KEY_8 }, |
74 | { 0xff00a15e, KEY_9 }, | 74 | { 0x5e, KEY_9 }, |
75 | { 0xff00ba45, KEY_POWER }, | 75 | { 0x45, KEY_POWER }, |
76 | { 0xff00bb44, KEY_MEDIA_REPEAT}, /* Recall */ | 76 | { 0x44, KEY_MEDIA_REPEAT}, /* Recall */ |
77 | { 0xff00b54a, KEY_PAUSE }, /* Timeshift */ | 77 | { 0x4a, KEY_PAUSE }, /* Timeshift */ |
78 | { 0xff00b847, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */ | 78 | { 0x47, KEY_VOLUMEUP }, /* 2 x -/+ Keys not marked */ |
79 | { 0xff00bc43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/ | 79 | { 0x43, KEY_VOLUMEDOWN }, /* Volume defined as right hand*/ |
80 | { 0xff00b946, KEY_CHANNELUP }, | 80 | { 0x46, KEY_CHANNELUP }, |
81 | { 0xff00bf40, KEY_CHANNELDOWN }, | 81 | { 0x40, KEY_CHANNELDOWN }, |
82 | { 0xff00f708, KEY_ZOOM }, | 82 | { 0x08, KEY_ZOOM }, |
83 | { 0xff00bd42, KEY_RECORD }, | 83 | { 0x42, KEY_RECORD }, |
84 | { 0xff00a55a, KEY_STOP }, | 84 | { 0x5a, KEY_STOP }, |
85 | }; | 85 | }; |
86 | 86 | ||
87 | static struct rc_map_list lme2510_map = { | 87 | static struct rc_map_list lme2510_map = { |
88 | .map = { | 88 | .map = { |
89 | .scan = lme2510_rc, | 89 | .scan = lme2510_rc, |
90 | .size = ARRAY_SIZE(lme2510_rc), | 90 | .size = ARRAY_SIZE(lme2510_rc), |
91 | .rc_type = RC_TYPE_UNKNOWN, | 91 | .rc_type = RC_TYPE_NEC, |
92 | .name = RC_MAP_LME2510, | 92 | .name = RC_MAP_LME2510, |
93 | } | 93 | } |
94 | }; | 94 | }; |
diff --git a/drivers/media/video/Kconfig b/drivers/media/video/Kconfig index 3be180b3ba27..bb53de7fe408 100644 --- a/drivers/media/video/Kconfig +++ b/drivers/media/video/Kconfig | |||
@@ -687,7 +687,7 @@ config VIDEO_HEXIUM_GEMINI | |||
687 | 687 | ||
688 | config VIDEO_TIMBERDALE | 688 | config VIDEO_TIMBERDALE |
689 | tristate "Support for timberdale Video In/LogiWIN" | 689 | tristate "Support for timberdale Video In/LogiWIN" |
690 | depends on VIDEO_V4L2 && I2C | 690 | depends on VIDEO_V4L2 && I2C && DMADEVICES |
691 | select DMA_ENGINE | 691 | select DMA_ENGINE |
692 | select TIMB_DMA | 692 | select TIMB_DMA |
693 | select VIDEO_ADV7180 | 693 | select VIDEO_ADV7180 |
@@ -757,6 +757,8 @@ config VIDEO_NOON010PC30 | |||
757 | ---help--- | 757 | ---help--- |
758 | This driver supports NOON010PC30 CIF camera from Siliconfile | 758 | This driver supports NOON010PC30 CIF camera from Siliconfile |
759 | 759 | ||
760 | source "drivers/media/video/m5mols/Kconfig" | ||
761 | |||
760 | config VIDEO_OMAP3 | 762 | config VIDEO_OMAP3 |
761 | tristate "OMAP 3 Camera support (EXPERIMENTAL)" | 763 | tristate "OMAP 3 Camera support (EXPERIMENTAL)" |
762 | select OMAP_IOMMU | 764 | select OMAP_IOMMU |
@@ -952,7 +954,7 @@ config VIDEO_SAMSUNG_S5P_FIMC | |||
952 | 954 | ||
953 | config VIDEO_S5P_MIPI_CSIS | 955 | config VIDEO_S5P_MIPI_CSIS |
954 | tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver" | 956 | tristate "Samsung S5P and EXYNOS4 MIPI CSI receiver driver" |
955 | depends on VIDEO_V4L2 && PM_RUNTIME && VIDEO_V4L2_SUBDEV_API | 957 | depends on VIDEO_V4L2 && PM_RUNTIME && PLAT_S5P && VIDEO_V4L2_SUBDEV_API |
956 | ---help--- | 958 | ---help--- |
957 | This is a v4l2 driver for Samsung S5P/EXYNOS4 MIPI-CSI receiver. | 959 | This is a v4l2 driver for Samsung S5P/EXYNOS4 MIPI-CSI receiver. |
958 | 960 | ||
diff --git a/drivers/media/video/Makefile b/drivers/media/video/Makefile index 9519160c2e01..f0fecd6f6a33 100644 --- a/drivers/media/video/Makefile +++ b/drivers/media/video/Makefile | |||
@@ -69,6 +69,7 @@ obj-$(CONFIG_VIDEO_MT9V011) += mt9v011.o | |||
69 | obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o | 69 | obj-$(CONFIG_VIDEO_MT9V032) += mt9v032.o |
70 | obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o | 70 | obj-$(CONFIG_VIDEO_SR030PC30) += sr030pc30.o |
71 | obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o | 71 | obj-$(CONFIG_VIDEO_NOON010PC30) += noon010pc30.o |
72 | obj-$(CONFIG_VIDEO_M5MOLS) += m5mols/ | ||
72 | 73 | ||
73 | obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o | 74 | obj-$(CONFIG_SOC_CAMERA_IMX074) += imx074.o |
74 | obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o | 75 | obj-$(CONFIG_SOC_CAMERA_MT9M001) += mt9m001.o |
diff --git a/drivers/media/video/cpia2/cpia2_v4l.c b/drivers/media/video/cpia2/cpia2_v4l.c index 0073a8c55336..40eb6326e48a 100644 --- a/drivers/media/video/cpia2/cpia2_v4l.c +++ b/drivers/media/video/cpia2/cpia2_v4l.c | |||
@@ -438,7 +438,7 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v | |||
438 | strcat(vc->card, " (676/"); | 438 | strcat(vc->card, " (676/"); |
439 | break; | 439 | break; |
440 | default: | 440 | default: |
441 | strcat(vc->card, " (???/"); | 441 | strcat(vc->card, " (XXX/"); |
442 | break; | 442 | break; |
443 | } | 443 | } |
444 | switch (cam->params.version.sensor_flags) { | 444 | switch (cam->params.version.sensor_flags) { |
@@ -458,7 +458,7 @@ static int cpia2_querycap(struct file *file, void *fh, struct v4l2_capability *v | |||
458 | strcat(vc->card, "500)"); | 458 | strcat(vc->card, "500)"); |
459 | break; | 459 | break; |
460 | default: | 460 | default: |
461 | strcat(vc->card, "???)"); | 461 | strcat(vc->card, "XXX)"); |
462 | break; | 462 | break; |
463 | } | 463 | } |
464 | 464 | ||
diff --git a/drivers/media/video/cx231xx/cx231xx-avcore.c b/drivers/media/video/cx231xx/cx231xx-avcore.c index 280df43ca446..8d7813415760 100644 --- a/drivers/media/video/cx231xx/cx231xx-avcore.c +++ b/drivers/media/video/cx231xx/cx231xx-avcore.c | |||
@@ -1354,7 +1354,7 @@ void cx231xx_dump_SC_reg(struct cx231xx *dev) | |||
1354 | { | 1354 | { |
1355 | u8 value[4] = { 0, 0, 0, 0 }; | 1355 | u8 value[4] = { 0, 0, 0, 0 }; |
1356 | int status = 0; | 1356 | int status = 0; |
1357 | cx231xx_info("cx231xx_dump_SC_reg %s!\n", __TIME__); | 1357 | cx231xx_info("cx231xx_dump_SC_reg!\n"); |
1358 | 1358 | ||
1359 | status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, | 1359 | status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, |
1360 | value, 4); | 1360 | value, 4); |
diff --git a/drivers/media/video/gspca/kinect.c b/drivers/media/video/gspca/kinect.c index 66671a4092e4..26fc206f095e 100644 --- a/drivers/media/video/gspca/kinect.c +++ b/drivers/media/video/gspca/kinect.c | |||
@@ -34,7 +34,7 @@ MODULE_AUTHOR("Antonio Ospite <ospite@studenti.unina.it>"); | |||
34 | MODULE_DESCRIPTION("GSPCA/Kinect Sensor Device USB Camera Driver"); | 34 | MODULE_DESCRIPTION("GSPCA/Kinect Sensor Device USB Camera Driver"); |
35 | MODULE_LICENSE("GPL"); | 35 | MODULE_LICENSE("GPL"); |
36 | 36 | ||
37 | #ifdef DEBUG | 37 | #ifdef GSPCA_DEBUG |
38 | int gspca_debug = D_ERR | D_PROBE | D_CONF | D_STREAM | D_FRAM | D_PACK | | 38 | int gspca_debug = D_ERR | D_PROBE | D_CONF | D_STREAM | D_FRAM | D_PACK | |
39 | D_USBI | D_USBO | D_V4L2; | 39 | D_USBI | D_USBO | D_V4L2; |
40 | #endif | 40 | #endif |
diff --git a/drivers/media/video/m5mols/Kconfig b/drivers/media/video/m5mols/Kconfig new file mode 100644 index 000000000000..302dc3d70193 --- /dev/null +++ b/drivers/media/video/m5mols/Kconfig | |||
@@ -0,0 +1,5 @@ | |||
1 | config VIDEO_M5MOLS | ||
2 | tristate "Fujitsu M-5MOLS 8MP sensor support" | ||
3 | depends on I2C && VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API | ||
4 | ---help--- | ||
5 | This driver supports Fujitsu M-5MOLS camera sensor with ISP | ||
diff --git a/drivers/media/video/m5mols/Makefile b/drivers/media/video/m5mols/Makefile new file mode 100644 index 000000000000..0a44e028edc7 --- /dev/null +++ b/drivers/media/video/m5mols/Makefile | |||
@@ -0,0 +1,3 @@ | |||
1 | m5mols-objs := m5mols_core.o m5mols_controls.o m5mols_capture.o | ||
2 | |||
3 | obj-$(CONFIG_VIDEO_M5MOLS) += m5mols.o | ||
diff --git a/drivers/media/video/m5mols/m5mols.h b/drivers/media/video/m5mols/m5mols.h new file mode 100644 index 000000000000..10b55c854487 --- /dev/null +++ b/drivers/media/video/m5mols/m5mols.h | |||
@@ -0,0 +1,296 @@ | |||
1 | /* | ||
2 | * Header for M-5MOLS 8M Pixel camera sensor with ISP | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics Co., Ltd. | ||
5 | * Author: HeungJun Kim, riverful.kim@samsung.com | ||
6 | * | ||
7 | * Copyright (C) 2009 Samsung Electronics Co., Ltd. | ||
8 | * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #ifndef M5MOLS_H | ||
17 | #define M5MOLS_H | ||
18 | |||
19 | #include <media/v4l2-subdev.h> | ||
20 | #include "m5mols_reg.h" | ||
21 | |||
22 | extern int m5mols_debug; | ||
23 | |||
24 | #define to_m5mols(__sd) container_of(__sd, struct m5mols_info, sd) | ||
25 | |||
26 | #define to_sd(__ctrl) \ | ||
27 | (&container_of(__ctrl->handler, struct m5mols_info, handle)->sd) | ||
28 | |||
29 | enum m5mols_restype { | ||
30 | M5MOLS_RESTYPE_MONITOR, | ||
31 | M5MOLS_RESTYPE_CAPTURE, | ||
32 | M5MOLS_RESTYPE_MAX, | ||
33 | }; | ||
34 | |||
35 | /** | ||
36 | * struct m5mols_resolution - structure for the resolution | ||
37 | * @type: resolution type according to the pixel code | ||
38 | * @width: width of the resolution | ||
39 | * @height: height of the resolution | ||
40 | * @reg: resolution preset register value | ||
41 | */ | ||
42 | struct m5mols_resolution { | ||
43 | u8 reg; | ||
44 | enum m5mols_restype type; | ||
45 | u16 width; | ||
46 | u16 height; | ||
47 | }; | ||
48 | |||
49 | /** | ||
50 | * struct m5mols_exif - structure for the EXIF information of M-5MOLS | ||
51 | * @exposure_time: exposure time register value | ||
52 | * @shutter_speed: speed of the shutter register value | ||
53 | * @aperture: aperture register value | ||
54 | * @exposure_bias: it calls also EV bias | ||
55 | * @iso_speed: ISO register value | ||
56 | * @flash: status register value of the flash | ||
57 | * @sdr: status register value of the Subject Distance Range | ||
58 | * @qval: not written exact meaning in document | ||
59 | */ | ||
60 | struct m5mols_exif { | ||
61 | u32 exposure_time; | ||
62 | u32 shutter_speed; | ||
63 | u32 aperture; | ||
64 | u32 brightness; | ||
65 | u32 exposure_bias; | ||
66 | u16 iso_speed; | ||
67 | u16 flash; | ||
68 | u16 sdr; | ||
69 | u16 qval; | ||
70 | }; | ||
71 | |||
72 | /** | ||
73 | * struct m5mols_capture - Structure for the capture capability | ||
74 | * @exif: EXIF information | ||
75 | * @main: size in bytes of the main image | ||
76 | * @thumb: size in bytes of the thumb image, if it was accompanied | ||
77 | * @total: total size in bytes of the produced image | ||
78 | */ | ||
79 | struct m5mols_capture { | ||
80 | struct m5mols_exif exif; | ||
81 | u32 main; | ||
82 | u32 thumb; | ||
83 | u32 total; | ||
84 | }; | ||
85 | |||
86 | /** | ||
87 | * struct m5mols_scenemode - structure for the scenemode capability | ||
88 | * @metering: metering light register value | ||
89 | * @ev_bias: EV bias register value | ||
90 | * @wb_mode: mode which means the WhiteBalance is Auto or Manual | ||
91 | * @wb_preset: whitebalance preset register value in the Manual mode | ||
92 | * @chroma_en: register value whether the Chroma capability is enabled or not | ||
93 | * @chroma_lvl: chroma's level register value | ||
94 | * @edge_en: register value Whether the Edge capability is enabled or not | ||
95 | * @edge_lvl: edge's level register value | ||
96 | * @af_range: Auto Focus's range | ||
97 | * @fd_mode: Face Detection mode | ||
98 | * @mcc: Multi-axis Color Conversion which means emotion color | ||
99 | * @light: status of the Light | ||
100 | * @flash: status of the Flash | ||
101 | * @tone: Tone color which means Contrast | ||
102 | * @iso: ISO register value | ||
103 | * @capt_mode: Mode of the Image Stabilization while the camera capturing | ||
104 | * @wdr: Wide Dynamic Range register value | ||
105 | * | ||
106 | * The each value according to each scenemode is recommended in the documents. | ||
107 | */ | ||
108 | struct m5mols_scenemode { | ||
109 | u32 metering; | ||
110 | u32 ev_bias; | ||
111 | u32 wb_mode; | ||
112 | u32 wb_preset; | ||
113 | u32 chroma_en; | ||
114 | u32 chroma_lvl; | ||
115 | u32 edge_en; | ||
116 | u32 edge_lvl; | ||
117 | u32 af_range; | ||
118 | u32 fd_mode; | ||
119 | u32 mcc; | ||
120 | u32 light; | ||
121 | u32 flash; | ||
122 | u32 tone; | ||
123 | u32 iso; | ||
124 | u32 capt_mode; | ||
125 | u32 wdr; | ||
126 | }; | ||
127 | |||
128 | /** | ||
129 | * struct m5mols_version - firmware version information | ||
130 | * @customer: customer information | ||
131 | * @project: version of project information according to customer | ||
132 | * @fw: firmware revision | ||
133 | * @hw: hardware revision | ||
134 | * @param: version of the parameter | ||
135 | * @awb: Auto WhiteBalance algorithm version | ||
136 | * @str: information about manufacturer and packaging vendor | ||
137 | * @af: Auto Focus version | ||
138 | * | ||
139 | * The register offset starts the customer version at 0x0, and it ends | ||
140 | * the awb version at 0x09. The customer, project information occupies 1 bytes | ||
141 | * each. And also the fw, hw, param, awb each requires 2 bytes. The str is | ||
142 | * unique string associated with firmware's version. It includes information | ||
143 | * about manufacturer and the vendor of the sensor's packaging. The least | ||
144 | * significant 2 bytes of the string indicate packaging manufacturer. | ||
145 | */ | ||
146 | #define VERSION_STRING_SIZE 22 | ||
147 | struct m5mols_version { | ||
148 | u8 customer; | ||
149 | u8 project; | ||
150 | u16 fw; | ||
151 | u16 hw; | ||
152 | u16 param; | ||
153 | u16 awb; | ||
154 | u8 str[VERSION_STRING_SIZE]; | ||
155 | u8 af; | ||
156 | }; | ||
157 | #define VERSION_SIZE sizeof(struct m5mols_version) | ||
158 | |||
159 | /** | ||
160 | * struct m5mols_info - M-5MOLS driver data structure | ||
161 | * @pdata: platform data | ||
162 | * @sd: v4l-subdev instance | ||
163 | * @pad: media pad | ||
164 | * @ffmt: current fmt according to resolution type | ||
165 | * @res_type: current resolution type | ||
166 | * @code: current code | ||
167 | * @irq_waitq: waitqueue for the capture | ||
168 | * @work_irq: workqueue for the IRQ | ||
169 | * @flags: state variable for the interrupt handler | ||
170 | * @handle: control handler | ||
171 | * @autoexposure: Auto Exposure control | ||
172 | * @exposure: Exposure control | ||
173 | * @autowb: Auto White Balance control | ||
174 | * @colorfx: Color effect control | ||
175 | * @saturation: Saturation control | ||
176 | * @zoom: Zoom control | ||
177 | * @ver: information of the version | ||
178 | * @cap: the capture mode attributes | ||
179 | * @power: current sensor's power status | ||
180 | * @ctrl_sync: true means all controls of the sensor are initialized | ||
181 | * @int_capture: true means the capture interrupt is issued once | ||
182 | * @lock_ae: true means the Auto Exposure is locked | ||
183 | * @lock_awb: true means the Aut WhiteBalance is locked | ||
184 | * @resolution: register value for current resolution | ||
185 | * @interrupt: register value for current interrupt status | ||
186 | * @mode: register value for current operation mode | ||
187 | * @mode_save: register value for current operation mode for saving | ||
188 | * @set_power: optional power callback to the board code | ||
189 | */ | ||
190 | struct m5mols_info { | ||
191 | const struct m5mols_platform_data *pdata; | ||
192 | struct v4l2_subdev sd; | ||
193 | struct media_pad pad; | ||
194 | struct v4l2_mbus_framefmt ffmt[M5MOLS_RESTYPE_MAX]; | ||
195 | int res_type; | ||
196 | enum v4l2_mbus_pixelcode code; | ||
197 | wait_queue_head_t irq_waitq; | ||
198 | struct work_struct work_irq; | ||
199 | unsigned long flags; | ||
200 | |||
201 | struct v4l2_ctrl_handler handle; | ||
202 | /* Autoexposure/exposure control cluster */ | ||
203 | struct { | ||
204 | struct v4l2_ctrl *autoexposure; | ||
205 | struct v4l2_ctrl *exposure; | ||
206 | }; | ||
207 | struct v4l2_ctrl *autowb; | ||
208 | struct v4l2_ctrl *colorfx; | ||
209 | struct v4l2_ctrl *saturation; | ||
210 | struct v4l2_ctrl *zoom; | ||
211 | |||
212 | struct m5mols_version ver; | ||
213 | struct m5mols_capture cap; | ||
214 | bool power; | ||
215 | bool ctrl_sync; | ||
216 | bool lock_ae; | ||
217 | bool lock_awb; | ||
218 | u8 resolution; | ||
219 | u32 interrupt; | ||
220 | u32 mode; | ||
221 | u32 mode_save; | ||
222 | int (*set_power)(struct device *dev, int on); | ||
223 | }; | ||
224 | |||
225 | #define ST_CAPT_IRQ 0 | ||
226 | |||
227 | #define is_powered(__info) (__info->power) | ||
228 | #define is_ctrl_synced(__info) (__info->ctrl_sync) | ||
229 | #define is_available_af(__info) (__info->ver.af) | ||
230 | #define is_code(__code, __type) (__code == m5mols_default_ffmt[__type].code) | ||
231 | #define is_manufacturer(__info, __manufacturer) \ | ||
232 | (__info->ver.str[0] == __manufacturer[0] && \ | ||
233 | __info->ver.str[1] == __manufacturer[1]) | ||
234 | /* | ||
235 | * I2C operation of the M-5MOLS | ||
236 | * | ||
237 | * The I2C read operation of the M-5MOLS requires 2 messages. The first | ||
238 | * message sends the information about the command, command category, and total | ||
239 | * message size. The second message is used to retrieve the data specifed in | ||
240 | * the first message | ||
241 | * | ||
242 | * 1st message 2nd message | ||
243 | * +-------+---+----------+-----+-------+ +------+------+------+------+ | ||
244 | * | size1 | R | category | cmd | size2 | | d[0] | d[1] | d[2] | d[3] | | ||
245 | * +-------+---+----------+-----+-------+ +------+------+------+------+ | ||
246 | * - size1: message data size(5 in this case) | ||
247 | * - size2: desired buffer size of the 2nd message | ||
248 | * - d[0..3]: according to size2 | ||
249 | * | ||
250 | * The I2C write operation needs just one message. The message includes | ||
251 | * category, command, total size, and desired data. | ||
252 | * | ||
253 | * 1st message | ||
254 | * +-------+---+----------+-----+------+------+------+------+ | ||
255 | * | size1 | W | category | cmd | d[0] | d[1] | d[2] | d[3] | | ||
256 | * +-------+---+----------+-----+------+------+------+------+ | ||
257 | * - d[0..3]: according to size1 | ||
258 | */ | ||
259 | int m5mols_read(struct v4l2_subdev *sd, u32 reg_comb, u32 *val); | ||
260 | int m5mols_write(struct v4l2_subdev *sd, u32 reg_comb, u32 val); | ||
261 | int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 value); | ||
262 | |||
263 | /* | ||
264 | * Mode operation of the M-5MOLS | ||
265 | * | ||
266 | * Changing the mode of the M-5MOLS is needed right executing order. | ||
267 | * There are three modes(PARAMETER, MONITOR, CAPTURE) which can be changed | ||
268 | * by user. There are various categories associated with each mode. | ||
269 | * | ||
270 | * +============================================================+ | ||
271 | * | mode | category | | ||
272 | * +============================================================+ | ||
273 | * | FLASH | FLASH(only after Stand-by or Power-on) | | ||
274 | * | SYSTEM | SYSTEM(only after sensor arm-booting) | | ||
275 | * | PARAMETER | PARAMETER | | ||
276 | * | MONITOR | MONITOR(preview), Auto Focus, Face Detection | | ||
277 | * | CAPTURE | Single CAPTURE, Preview(recording) | | ||
278 | * +============================================================+ | ||
279 | * | ||
280 | * The available executing order between each modes are as follows: | ||
281 | * PARAMETER <---> MONITOR <---> CAPTURE | ||
282 | */ | ||
283 | int m5mols_mode(struct m5mols_info *info, u32 mode); | ||
284 | |||
285 | int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg); | ||
286 | int m5mols_sync_controls(struct m5mols_info *info); | ||
287 | int m5mols_start_capture(struct m5mols_info *info); | ||
288 | int m5mols_do_scenemode(struct m5mols_info *info, u32 mode); | ||
289 | int m5mols_lock_3a(struct m5mols_info *info, bool lock); | ||
290 | int m5mols_set_ctrl(struct v4l2_ctrl *ctrl); | ||
291 | |||
292 | /* The firmware function */ | ||
293 | int m5mols_update_fw(struct v4l2_subdev *sd, | ||
294 | int (*set_power)(struct m5mols_info *, bool)); | ||
295 | |||
296 | #endif /* M5MOLS_H */ | ||
diff --git a/drivers/media/video/m5mols/m5mols_capture.c b/drivers/media/video/m5mols/m5mols_capture.c new file mode 100644 index 000000000000..d71a3903b60f --- /dev/null +++ b/drivers/media/video/m5mols/m5mols_capture.c | |||
@@ -0,0 +1,191 @@ | |||
1 | /* | ||
2 | * The Capture code for Fujitsu M-5MOLS ISP | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics Co., Ltd. | ||
5 | * Author: HeungJun Kim, riverful.kim@samsung.com | ||
6 | * | ||
7 | * Copyright (C) 2009 Samsung Electronics Co., Ltd. | ||
8 | * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/i2c.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/irq.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/version.h> | ||
22 | #include <linux/gpio.h> | ||
23 | #include <linux/regulator/consumer.h> | ||
24 | #include <linux/videodev2.h> | ||
25 | #include <linux/version.h> | ||
26 | #include <media/v4l2-ctrls.h> | ||
27 | #include <media/v4l2-device.h> | ||
28 | #include <media/v4l2-subdev.h> | ||
29 | #include <media/m5mols.h> | ||
30 | |||
31 | #include "m5mols.h" | ||
32 | #include "m5mols_reg.h" | ||
33 | |||
34 | static int m5mols_capture_error_handler(struct m5mols_info *info, | ||
35 | int timeout) | ||
36 | { | ||
37 | int ret; | ||
38 | |||
39 | /* Disable all interrupts and clear relevant interrupt staus bits */ | ||
40 | ret = m5mols_write(&info->sd, SYSTEM_INT_ENABLE, | ||
41 | info->interrupt & ~(REG_INT_CAPTURE)); | ||
42 | if (ret) | ||
43 | return ret; | ||
44 | |||
45 | if (timeout == 0) | ||
46 | return -ETIMEDOUT; | ||
47 | |||
48 | return 0; | ||
49 | } | ||
50 | /** | ||
51 | * m5mols_read_rational - I2C read of a rational number | ||
52 | * | ||
53 | * Read numerator and denominator from registers @addr_num and @addr_den | ||
54 | * respectively and return the division result in @val. | ||
55 | */ | ||
56 | static int m5mols_read_rational(struct v4l2_subdev *sd, u32 addr_num, | ||
57 | u32 addr_den, u32 *val) | ||
58 | { | ||
59 | u32 num, den; | ||
60 | |||
61 | int ret = m5mols_read(sd, addr_num, &num); | ||
62 | if (!ret) | ||
63 | ret = m5mols_read(sd, addr_den, &den); | ||
64 | if (ret) | ||
65 | return ret; | ||
66 | *val = den == 0 ? 0 : num / den; | ||
67 | return ret; | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * m5mols_capture_info - Gather captured image information | ||
72 | * | ||
73 | * For now it gathers only EXIF information and file size. | ||
74 | */ | ||
75 | static int m5mols_capture_info(struct m5mols_info *info) | ||
76 | { | ||
77 | struct m5mols_exif *exif = &info->cap.exif; | ||
78 | struct v4l2_subdev *sd = &info->sd; | ||
79 | int ret; | ||
80 | |||
81 | ret = m5mols_read_rational(sd, EXIF_INFO_EXPTIME_NU, | ||
82 | EXIF_INFO_EXPTIME_DE, &exif->exposure_time); | ||
83 | if (ret) | ||
84 | return ret; | ||
85 | ret = m5mols_read_rational(sd, EXIF_INFO_TV_NU, EXIF_INFO_TV_DE, | ||
86 | &exif->shutter_speed); | ||
87 | if (ret) | ||
88 | return ret; | ||
89 | ret = m5mols_read_rational(sd, EXIF_INFO_AV_NU, EXIF_INFO_AV_DE, | ||
90 | &exif->aperture); | ||
91 | if (ret) | ||
92 | return ret; | ||
93 | ret = m5mols_read_rational(sd, EXIF_INFO_BV_NU, EXIF_INFO_BV_DE, | ||
94 | &exif->brightness); | ||
95 | if (ret) | ||
96 | return ret; | ||
97 | ret = m5mols_read_rational(sd, EXIF_INFO_EBV_NU, EXIF_INFO_EBV_DE, | ||
98 | &exif->exposure_bias); | ||
99 | if (ret) | ||
100 | return ret; | ||
101 | |||
102 | ret = m5mols_read(sd, EXIF_INFO_ISO, (u32 *)&exif->iso_speed); | ||
103 | if (!ret) | ||
104 | ret = m5mols_read(sd, EXIF_INFO_FLASH, (u32 *)&exif->flash); | ||
105 | if (!ret) | ||
106 | ret = m5mols_read(sd, EXIF_INFO_SDR, (u32 *)&exif->sdr); | ||
107 | if (!ret) | ||
108 | ret = m5mols_read(sd, EXIF_INFO_QVAL, (u32 *)&exif->qval); | ||
109 | if (ret) | ||
110 | return ret; | ||
111 | |||
112 | if (!ret) | ||
113 | ret = m5mols_read(sd, CAPC_IMAGE_SIZE, &info->cap.main); | ||
114 | if (!ret) | ||
115 | ret = m5mols_read(sd, CAPC_THUMB_SIZE, &info->cap.thumb); | ||
116 | if (!ret) | ||
117 | info->cap.total = info->cap.main + info->cap.thumb; | ||
118 | |||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | int m5mols_start_capture(struct m5mols_info *info) | ||
123 | { | ||
124 | struct v4l2_subdev *sd = &info->sd; | ||
125 | u32 resolution = info->resolution; | ||
126 | int timeout; | ||
127 | int ret; | ||
128 | |||
129 | /* | ||
130 | * Preparing capture. Setting control & interrupt before entering | ||
131 | * capture mode | ||
132 | * | ||
133 | * 1) change to MONITOR mode for operating control & interrupt | ||
134 | * 2) set controls (considering v4l2_control value & lock 3A) | ||
135 | * 3) set interrupt | ||
136 | * 4) change to CAPTURE mode | ||
137 | */ | ||
138 | ret = m5mols_mode(info, REG_MONITOR); | ||
139 | if (!ret) | ||
140 | ret = m5mols_sync_controls(info); | ||
141 | if (!ret) | ||
142 | ret = m5mols_lock_3a(info, true); | ||
143 | if (!ret) | ||
144 | ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE); | ||
145 | if (!ret) | ||
146 | ret = m5mols_mode(info, REG_CAPTURE); | ||
147 | if (!ret) { | ||
148 | /* Wait for capture interrupt, after changing capture mode */ | ||
149 | timeout = wait_event_interruptible_timeout(info->irq_waitq, | ||
150 | test_bit(ST_CAPT_IRQ, &info->flags), | ||
151 | msecs_to_jiffies(2000)); | ||
152 | if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags)) | ||
153 | ret = m5mols_capture_error_handler(info, timeout); | ||
154 | } | ||
155 | if (!ret) | ||
156 | ret = m5mols_lock_3a(info, false); | ||
157 | if (ret) | ||
158 | return ret; | ||
159 | /* | ||
160 | * Starting capture. Setting capture frame count and resolution and | ||
161 | * the format(available format: JPEG, Bayer RAW, YUV). | ||
162 | * | ||
163 | * 1) select single or multi(enable to 25), format, size | ||
164 | * 2) set interrupt | ||
165 | * 3) start capture(for main image, now) | ||
166 | * 4) get information | ||
167 | * 5) notify file size to v4l2 device(e.g, to s5p-fimc v4l2 device) | ||
168 | */ | ||
169 | ret = m5mols_write(sd, CAPC_SEL_FRAME, 1); | ||
170 | if (!ret) | ||
171 | ret = m5mols_write(sd, CAPP_YUVOUT_MAIN, REG_JPEG); | ||
172 | if (!ret) | ||
173 | ret = m5mols_write(sd, CAPP_MAIN_IMAGE_SIZE, resolution); | ||
174 | if (!ret) | ||
175 | ret = m5mols_enable_interrupt(sd, REG_INT_CAPTURE); | ||
176 | if (!ret) | ||
177 | ret = m5mols_write(sd, CAPC_START, REG_CAP_START_MAIN); | ||
178 | if (!ret) { | ||
179 | /* Wait for the capture completion interrupt */ | ||
180 | timeout = wait_event_interruptible_timeout(info->irq_waitq, | ||
181 | test_bit(ST_CAPT_IRQ, &info->flags), | ||
182 | msecs_to_jiffies(2000)); | ||
183 | if (test_and_clear_bit(ST_CAPT_IRQ, &info->flags)) { | ||
184 | ret = m5mols_capture_info(info); | ||
185 | if (!ret) | ||
186 | v4l2_subdev_notify(sd, 0, &info->cap.total); | ||
187 | } | ||
188 | } | ||
189 | |||
190 | return m5mols_capture_error_handler(info, timeout); | ||
191 | } | ||
diff --git a/drivers/media/video/m5mols/m5mols_controls.c b/drivers/media/video/m5mols/m5mols_controls.c new file mode 100644 index 000000000000..817c16fec368 --- /dev/null +++ b/drivers/media/video/m5mols/m5mols_controls.c | |||
@@ -0,0 +1,299 @@ | |||
1 | /* | ||
2 | * Controls for M-5MOLS 8M Pixel camera sensor with ISP | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics Co., Ltd. | ||
5 | * Author: HeungJun Kim, riverful.kim@samsung.com | ||
6 | * | ||
7 | * Copyright (C) 2009 Samsung Electronics Co., Ltd. | ||
8 | * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/i2c.h> | ||
17 | #include <linux/delay.h> | ||
18 | #include <linux/videodev2.h> | ||
19 | #include <media/v4l2-ctrls.h> | ||
20 | |||
21 | #include "m5mols.h" | ||
22 | #include "m5mols_reg.h" | ||
23 | |||
24 | static struct m5mols_scenemode m5mols_default_scenemode[] = { | ||
25 | [REG_SCENE_NORMAL] = { | ||
26 | REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0, | ||
27 | REG_CHROMA_ON, 3, REG_EDGE_ON, 5, | ||
28 | REG_AF_NORMAL, REG_FD_OFF, | ||
29 | REG_MCC_NORMAL, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
30 | 5, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF, | ||
31 | }, | ||
32 | [REG_SCENE_PORTRAIT] = { | ||
33 | REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0, | ||
34 | REG_CHROMA_ON, 3, REG_EDGE_ON, 4, | ||
35 | REG_AF_NORMAL, BIT_FD_EN | BIT_FD_DRAW_FACE_FRAME, | ||
36 | REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
37 | 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF, | ||
38 | }, | ||
39 | [REG_SCENE_LANDSCAPE] = { | ||
40 | REG_AE_ALL, REG_AE_INDEX_00, REG_AWB_AUTO, 0, | ||
41 | REG_CHROMA_ON, 4, REG_EDGE_ON, 6, | ||
42 | REG_AF_NORMAL, REG_FD_OFF, | ||
43 | REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
44 | 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF, | ||
45 | }, | ||
46 | [REG_SCENE_SPORTS] = { | ||
47 | REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0, | ||
48 | REG_CHROMA_ON, 3, REG_EDGE_ON, 5, | ||
49 | REG_AF_NORMAL, REG_FD_OFF, | ||
50 | REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
51 | 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF, | ||
52 | }, | ||
53 | [REG_SCENE_PARTY_INDOOR] = { | ||
54 | REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0, | ||
55 | REG_CHROMA_ON, 4, REG_EDGE_ON, 5, | ||
56 | REG_AF_NORMAL, REG_FD_OFF, | ||
57 | REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
58 | 6, REG_ISO_200, REG_CAP_NONE, REG_WDR_OFF, | ||
59 | }, | ||
60 | [REG_SCENE_BEACH_SNOW] = { | ||
61 | REG_AE_CENTER, REG_AE_INDEX_10_POS, REG_AWB_AUTO, 0, | ||
62 | REG_CHROMA_ON, 4, REG_EDGE_ON, 5, | ||
63 | REG_AF_NORMAL, REG_FD_OFF, | ||
64 | REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
65 | 6, REG_ISO_50, REG_CAP_NONE, REG_WDR_OFF, | ||
66 | }, | ||
67 | [REG_SCENE_SUNSET] = { | ||
68 | REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_PRESET, | ||
69 | REG_AWB_DAYLIGHT, | ||
70 | REG_CHROMA_ON, 3, REG_EDGE_ON, 5, | ||
71 | REG_AF_NORMAL, REG_FD_OFF, | ||
72 | REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
73 | 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF, | ||
74 | }, | ||
75 | [REG_SCENE_DAWN_DUSK] = { | ||
76 | REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_PRESET, | ||
77 | REG_AWB_FLUORESCENT_1, | ||
78 | REG_CHROMA_ON, 3, REG_EDGE_ON, 5, | ||
79 | REG_AF_NORMAL, REG_FD_OFF, | ||
80 | REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
81 | 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF, | ||
82 | }, | ||
83 | [REG_SCENE_FALL] = { | ||
84 | REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0, | ||
85 | REG_CHROMA_ON, 5, REG_EDGE_ON, 5, | ||
86 | REG_AF_NORMAL, REG_FD_OFF, | ||
87 | REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
88 | 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF, | ||
89 | }, | ||
90 | [REG_SCENE_NIGHT] = { | ||
91 | REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0, | ||
92 | REG_CHROMA_ON, 3, REG_EDGE_ON, 5, | ||
93 | REG_AF_NORMAL, REG_FD_OFF, | ||
94 | REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
95 | 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF, | ||
96 | }, | ||
97 | [REG_SCENE_AGAINST_LIGHT] = { | ||
98 | REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0, | ||
99 | REG_CHROMA_ON, 3, REG_EDGE_ON, 5, | ||
100 | REG_AF_NORMAL, REG_FD_OFF, | ||
101 | REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
102 | 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF, | ||
103 | }, | ||
104 | [REG_SCENE_FIRE] = { | ||
105 | REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0, | ||
106 | REG_CHROMA_ON, 3, REG_EDGE_ON, 5, | ||
107 | REG_AF_NORMAL, REG_FD_OFF, | ||
108 | REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
109 | 6, REG_ISO_50, REG_CAP_NONE, REG_WDR_OFF, | ||
110 | }, | ||
111 | [REG_SCENE_TEXT] = { | ||
112 | REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0, | ||
113 | REG_CHROMA_ON, 3, REG_EDGE_ON, 7, | ||
114 | REG_AF_MACRO, REG_FD_OFF, | ||
115 | REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
116 | 6, REG_ISO_AUTO, REG_CAP_ANTI_SHAKE, REG_WDR_ON, | ||
117 | }, | ||
118 | [REG_SCENE_CANDLE] = { | ||
119 | REG_AE_CENTER, REG_AE_INDEX_00, REG_AWB_AUTO, 0, | ||
120 | REG_CHROMA_ON, 3, REG_EDGE_ON, 5, | ||
121 | REG_AF_NORMAL, REG_FD_OFF, | ||
122 | REG_MCC_OFF, REG_LIGHT_OFF, REG_FLASH_OFF, | ||
123 | 6, REG_ISO_AUTO, REG_CAP_NONE, REG_WDR_OFF, | ||
124 | }, | ||
125 | }; | ||
126 | |||
127 | /** | ||
128 | * m5mols_do_scenemode() - Change current scenemode | ||
129 | * @mode: Desired mode of the scenemode | ||
130 | * | ||
131 | * WARNING: The execution order is important. Do not change the order. | ||
132 | */ | ||
133 | int m5mols_do_scenemode(struct m5mols_info *info, u32 mode) | ||
134 | { | ||
135 | struct v4l2_subdev *sd = &info->sd; | ||
136 | struct m5mols_scenemode scenemode = m5mols_default_scenemode[mode]; | ||
137 | int ret; | ||
138 | |||
139 | if (mode > REG_SCENE_CANDLE) | ||
140 | return -EINVAL; | ||
141 | |||
142 | ret = m5mols_lock_3a(info, false); | ||
143 | if (!ret) | ||
144 | ret = m5mols_write(sd, AE_EV_PRESET_MONITOR, mode); | ||
145 | if (!ret) | ||
146 | ret = m5mols_write(sd, AE_EV_PRESET_CAPTURE, mode); | ||
147 | if (!ret) | ||
148 | ret = m5mols_write(sd, AE_MODE, scenemode.metering); | ||
149 | if (!ret) | ||
150 | ret = m5mols_write(sd, AE_INDEX, scenemode.ev_bias); | ||
151 | if (!ret) | ||
152 | ret = m5mols_write(sd, AWB_MODE, scenemode.wb_mode); | ||
153 | if (!ret) | ||
154 | ret = m5mols_write(sd, AWB_MANUAL, scenemode.wb_preset); | ||
155 | if (!ret) | ||
156 | ret = m5mols_write(sd, MON_CHROMA_EN, scenemode.chroma_en); | ||
157 | if (!ret) | ||
158 | ret = m5mols_write(sd, MON_CHROMA_LVL, scenemode.chroma_lvl); | ||
159 | if (!ret) | ||
160 | ret = m5mols_write(sd, MON_EDGE_EN, scenemode.edge_en); | ||
161 | if (!ret) | ||
162 | ret = m5mols_write(sd, MON_EDGE_LVL, scenemode.edge_lvl); | ||
163 | if (!ret && is_available_af(info)) | ||
164 | ret = m5mols_write(sd, AF_MODE, scenemode.af_range); | ||
165 | if (!ret && is_available_af(info)) | ||
166 | ret = m5mols_write(sd, FD_CTL, scenemode.fd_mode); | ||
167 | if (!ret) | ||
168 | ret = m5mols_write(sd, MON_TONE_CTL, scenemode.tone); | ||
169 | if (!ret) | ||
170 | ret = m5mols_write(sd, AE_ISO, scenemode.iso); | ||
171 | if (!ret) | ||
172 | ret = m5mols_mode(info, REG_CAPTURE); | ||
173 | if (!ret) | ||
174 | ret = m5mols_write(sd, CAPP_WDR_EN, scenemode.wdr); | ||
175 | if (!ret) | ||
176 | ret = m5mols_write(sd, CAPP_MCC_MODE, scenemode.mcc); | ||
177 | if (!ret) | ||
178 | ret = m5mols_write(sd, CAPP_LIGHT_CTRL, scenemode.light); | ||
179 | if (!ret) | ||
180 | ret = m5mols_write(sd, CAPP_FLASH_CTRL, scenemode.flash); | ||
181 | if (!ret) | ||
182 | ret = m5mols_write(sd, CAPC_MODE, scenemode.capt_mode); | ||
183 | if (!ret) | ||
184 | ret = m5mols_mode(info, REG_MONITOR); | ||
185 | |||
186 | return ret; | ||
187 | } | ||
188 | |||
189 | static int m5mols_lock_ae(struct m5mols_info *info, bool lock) | ||
190 | { | ||
191 | int ret = 0; | ||
192 | |||
193 | if (info->lock_ae != lock) | ||
194 | ret = m5mols_write(&info->sd, AE_LOCK, | ||
195 | lock ? REG_AE_LOCK : REG_AE_UNLOCK); | ||
196 | if (!ret) | ||
197 | info->lock_ae = lock; | ||
198 | |||
199 | return ret; | ||
200 | } | ||
201 | |||
202 | static int m5mols_lock_awb(struct m5mols_info *info, bool lock) | ||
203 | { | ||
204 | int ret = 0; | ||
205 | |||
206 | if (info->lock_awb != lock) | ||
207 | ret = m5mols_write(&info->sd, AWB_LOCK, | ||
208 | lock ? REG_AWB_LOCK : REG_AWB_UNLOCK); | ||
209 | if (!ret) | ||
210 | info->lock_awb = lock; | ||
211 | |||
212 | return ret; | ||
213 | } | ||
214 | |||
215 | /* m5mols_lock_3a() - Lock 3A(Auto Exposure, Auto Whitebalance, Auto Focus) */ | ||
216 | int m5mols_lock_3a(struct m5mols_info *info, bool lock) | ||
217 | { | ||
218 | int ret; | ||
219 | |||
220 | ret = m5mols_lock_ae(info, lock); | ||
221 | if (!ret) | ||
222 | ret = m5mols_lock_awb(info, lock); | ||
223 | /* Don't need to handle unlocking AF */ | ||
224 | if (!ret && is_available_af(info) && lock) | ||
225 | ret = m5mols_write(&info->sd, AF_EXECUTE, REG_AF_STOP); | ||
226 | |||
227 | return ret; | ||
228 | } | ||
229 | |||
230 | /* m5mols_set_ctrl() - The main s_ctrl function called by m5mols_set_ctrl() */ | ||
231 | int m5mols_set_ctrl(struct v4l2_ctrl *ctrl) | ||
232 | { | ||
233 | struct v4l2_subdev *sd = to_sd(ctrl); | ||
234 | struct m5mols_info *info = to_m5mols(sd); | ||
235 | int ret; | ||
236 | |||
237 | switch (ctrl->id) { | ||
238 | case V4L2_CID_ZOOM_ABSOLUTE: | ||
239 | return m5mols_write(sd, MON_ZOOM, ctrl->val); | ||
240 | |||
241 | case V4L2_CID_EXPOSURE_AUTO: | ||
242 | ret = m5mols_lock_ae(info, | ||
243 | ctrl->val == V4L2_EXPOSURE_AUTO ? false : true); | ||
244 | if (!ret && ctrl->val == V4L2_EXPOSURE_AUTO) | ||
245 | ret = m5mols_write(sd, AE_MODE, REG_AE_ALL); | ||
246 | if (!ret && ctrl->val == V4L2_EXPOSURE_MANUAL) { | ||
247 | int val = info->exposure->val; | ||
248 | ret = m5mols_write(sd, AE_MODE, REG_AE_OFF); | ||
249 | if (!ret) | ||
250 | ret = m5mols_write(sd, AE_MAN_GAIN_MON, val); | ||
251 | if (!ret) | ||
252 | ret = m5mols_write(sd, AE_MAN_GAIN_CAP, val); | ||
253 | } | ||
254 | return ret; | ||
255 | |||
256 | case V4L2_CID_AUTO_WHITE_BALANCE: | ||
257 | ret = m5mols_lock_awb(info, ctrl->val ? false : true); | ||
258 | if (!ret) | ||
259 | ret = m5mols_write(sd, AWB_MODE, ctrl->val ? | ||
260 | REG_AWB_AUTO : REG_AWB_PRESET); | ||
261 | return ret; | ||
262 | |||
263 | case V4L2_CID_SATURATION: | ||
264 | ret = m5mols_write(sd, MON_CHROMA_LVL, ctrl->val); | ||
265 | if (!ret) | ||
266 | ret = m5mols_write(sd, MON_CHROMA_EN, REG_CHROMA_ON); | ||
267 | return ret; | ||
268 | |||
269 | case V4L2_CID_COLORFX: | ||
270 | /* | ||
271 | * This control uses two kinds of registers: normal & color. | ||
272 | * The normal effect belongs to category 1, while the color | ||
273 | * one belongs to category 2. | ||
274 | * | ||
275 | * The normal effect uses one register: CAT1_EFFECT. | ||
276 | * The color effect uses three registers: | ||
277 | * CAT2_COLOR_EFFECT, CAT2_CFIXR, CAT2_CFIXB. | ||
278 | */ | ||
279 | ret = m5mols_write(sd, PARM_EFFECT, | ||
280 | ctrl->val == V4L2_COLORFX_NEGATIVE ? REG_EFFECT_NEGA : | ||
281 | ctrl->val == V4L2_COLORFX_EMBOSS ? REG_EFFECT_EMBOSS : | ||
282 | REG_EFFECT_OFF); | ||
283 | if (!ret) | ||
284 | ret = m5mols_write(sd, MON_EFFECT, | ||
285 | ctrl->val == V4L2_COLORFX_SEPIA ? | ||
286 | REG_COLOR_EFFECT_ON : REG_COLOR_EFFECT_OFF); | ||
287 | if (!ret) | ||
288 | ret = m5mols_write(sd, MON_CFIXR, | ||
289 | ctrl->val == V4L2_COLORFX_SEPIA ? | ||
290 | REG_CFIXR_SEPIA : 0); | ||
291 | if (!ret) | ||
292 | ret = m5mols_write(sd, MON_CFIXB, | ||
293 | ctrl->val == V4L2_COLORFX_SEPIA ? | ||
294 | REG_CFIXB_SEPIA : 0); | ||
295 | return ret; | ||
296 | } | ||
297 | |||
298 | return -EINVAL; | ||
299 | } | ||
diff --git a/drivers/media/video/m5mols/m5mols_core.c b/drivers/media/video/m5mols/m5mols_core.c new file mode 100644 index 000000000000..76eac26e84ae --- /dev/null +++ b/drivers/media/video/m5mols/m5mols_core.c | |||
@@ -0,0 +1,1004 @@ | |||
1 | /* | ||
2 | * Driver for M-5MOLS 8M Pixel camera sensor with ISP | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics Co., Ltd. | ||
5 | * Author: HeungJun Kim, riverful.kim@samsung.com | ||
6 | * | ||
7 | * Copyright (C) 2009 Samsung Electronics Co., Ltd. | ||
8 | * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #include <linux/i2c.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/irq.h> | ||
19 | #include <linux/interrupt.h> | ||
20 | #include <linux/delay.h> | ||
21 | #include <linux/version.h> | ||
22 | #include <linux/gpio.h> | ||
23 | #include <linux/regulator/consumer.h> | ||
24 | #include <linux/videodev2.h> | ||
25 | #include <media/v4l2-ctrls.h> | ||
26 | #include <media/v4l2-device.h> | ||
27 | #include <media/v4l2-subdev.h> | ||
28 | #include <media/m5mols.h> | ||
29 | |||
30 | #include "m5mols.h" | ||
31 | #include "m5mols_reg.h" | ||
32 | |||
33 | int m5mols_debug; | ||
34 | module_param(m5mols_debug, int, 0644); | ||
35 | |||
36 | #define MODULE_NAME "M5MOLS" | ||
37 | #define M5MOLS_I2C_CHECK_RETRY 500 | ||
38 | |||
39 | /* The regulator consumer names for external voltage regulators */ | ||
40 | static struct regulator_bulk_data supplies[] = { | ||
41 | { | ||
42 | .supply = "core", /* ARM core power, 1.2V */ | ||
43 | }, { | ||
44 | .supply = "dig_18", /* digital power 1, 1.8V */ | ||
45 | }, { | ||
46 | .supply = "d_sensor", /* sensor power 1, 1.8V */ | ||
47 | }, { | ||
48 | .supply = "dig_28", /* digital power 2, 2.8V */ | ||
49 | }, { | ||
50 | .supply = "a_sensor", /* analog power */ | ||
51 | }, { | ||
52 | .supply = "dig_12", /* digital power 3, 1.2V */ | ||
53 | }, | ||
54 | }; | ||
55 | |||
56 | static struct v4l2_mbus_framefmt m5mols_default_ffmt[M5MOLS_RESTYPE_MAX] = { | ||
57 | [M5MOLS_RESTYPE_MONITOR] = { | ||
58 | .width = 1920, | ||
59 | .height = 1080, | ||
60 | .code = V4L2_MBUS_FMT_VYUY8_2X8, | ||
61 | .field = V4L2_FIELD_NONE, | ||
62 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
63 | }, | ||
64 | [M5MOLS_RESTYPE_CAPTURE] = { | ||
65 | .width = 1920, | ||
66 | .height = 1080, | ||
67 | .code = V4L2_MBUS_FMT_JPEG_1X8, | ||
68 | .field = V4L2_FIELD_NONE, | ||
69 | .colorspace = V4L2_COLORSPACE_JPEG, | ||
70 | }, | ||
71 | }; | ||
72 | #define SIZE_DEFAULT_FFMT ARRAY_SIZE(m5mols_default_ffmt) | ||
73 | |||
74 | static const struct m5mols_resolution m5mols_reg_res[] = { | ||
75 | { 0x01, M5MOLS_RESTYPE_MONITOR, 128, 96 }, /* SUB-QCIF */ | ||
76 | { 0x03, M5MOLS_RESTYPE_MONITOR, 160, 120 }, /* QQVGA */ | ||
77 | { 0x05, M5MOLS_RESTYPE_MONITOR, 176, 144 }, /* QCIF */ | ||
78 | { 0x06, M5MOLS_RESTYPE_MONITOR, 176, 176 }, | ||
79 | { 0x08, M5MOLS_RESTYPE_MONITOR, 240, 320 }, /* QVGA */ | ||
80 | { 0x09, M5MOLS_RESTYPE_MONITOR, 320, 240 }, /* QVGA */ | ||
81 | { 0x0c, M5MOLS_RESTYPE_MONITOR, 240, 400 }, /* WQVGA */ | ||
82 | { 0x0d, M5MOLS_RESTYPE_MONITOR, 400, 240 }, /* WQVGA */ | ||
83 | { 0x0e, M5MOLS_RESTYPE_MONITOR, 352, 288 }, /* CIF */ | ||
84 | { 0x13, M5MOLS_RESTYPE_MONITOR, 480, 360 }, | ||
85 | { 0x15, M5MOLS_RESTYPE_MONITOR, 640, 360 }, /* qHD */ | ||
86 | { 0x17, M5MOLS_RESTYPE_MONITOR, 640, 480 }, /* VGA */ | ||
87 | { 0x18, M5MOLS_RESTYPE_MONITOR, 720, 480 }, | ||
88 | { 0x1a, M5MOLS_RESTYPE_MONITOR, 800, 480 }, /* WVGA */ | ||
89 | { 0x1f, M5MOLS_RESTYPE_MONITOR, 800, 600 }, /* SVGA */ | ||
90 | { 0x21, M5MOLS_RESTYPE_MONITOR, 1280, 720 }, /* HD */ | ||
91 | { 0x25, M5MOLS_RESTYPE_MONITOR, 1920, 1080 }, /* 1080p */ | ||
92 | { 0x29, M5MOLS_RESTYPE_MONITOR, 3264, 2448 }, /* 2.63fps 8M */ | ||
93 | { 0x39, M5MOLS_RESTYPE_MONITOR, 800, 602 }, /* AHS_MON debug */ | ||
94 | |||
95 | { 0x02, M5MOLS_RESTYPE_CAPTURE, 320, 240 }, /* QVGA */ | ||
96 | { 0x04, M5MOLS_RESTYPE_CAPTURE, 400, 240 }, /* WQVGA */ | ||
97 | { 0x07, M5MOLS_RESTYPE_CAPTURE, 480, 360 }, | ||
98 | { 0x08, M5MOLS_RESTYPE_CAPTURE, 640, 360 }, /* qHD */ | ||
99 | { 0x09, M5MOLS_RESTYPE_CAPTURE, 640, 480 }, /* VGA */ | ||
100 | { 0x0a, M5MOLS_RESTYPE_CAPTURE, 800, 480 }, /* WVGA */ | ||
101 | { 0x10, M5MOLS_RESTYPE_CAPTURE, 1280, 720 }, /* HD */ | ||
102 | { 0x14, M5MOLS_RESTYPE_CAPTURE, 1280, 960 }, /* 1M */ | ||
103 | { 0x17, M5MOLS_RESTYPE_CAPTURE, 1600, 1200 }, /* 2M */ | ||
104 | { 0x19, M5MOLS_RESTYPE_CAPTURE, 1920, 1080 }, /* Full-HD */ | ||
105 | { 0x1a, M5MOLS_RESTYPE_CAPTURE, 2048, 1152 }, /* 3Mega */ | ||
106 | { 0x1b, M5MOLS_RESTYPE_CAPTURE, 2048, 1536 }, | ||
107 | { 0x1c, M5MOLS_RESTYPE_CAPTURE, 2560, 1440 }, /* 4Mega */ | ||
108 | { 0x1d, M5MOLS_RESTYPE_CAPTURE, 2560, 1536 }, | ||
109 | { 0x1f, M5MOLS_RESTYPE_CAPTURE, 2560, 1920 }, /* 5Mega */ | ||
110 | { 0x21, M5MOLS_RESTYPE_CAPTURE, 3264, 1836 }, /* 6Mega */ | ||
111 | { 0x22, M5MOLS_RESTYPE_CAPTURE, 3264, 1960 }, | ||
112 | { 0x25, M5MOLS_RESTYPE_CAPTURE, 3264, 2448 }, /* 8Mega */ | ||
113 | }; | ||
114 | |||
115 | /** | ||
116 | * m5mols_swap_byte - an byte array to integer conversion function | ||
117 | * @size: size in bytes of I2C packet defined in the M-5MOLS datasheet | ||
118 | * | ||
119 | * Convert I2C data byte array with performing any required byte | ||
120 | * reordering to assure proper values for each data type, regardless | ||
121 | * of the architecture endianness. | ||
122 | */ | ||
123 | static u32 m5mols_swap_byte(u8 *data, u8 length) | ||
124 | { | ||
125 | if (length == 1) | ||
126 | return *data; | ||
127 | else if (length == 2) | ||
128 | return be16_to_cpu(*((u16 *)data)); | ||
129 | else | ||
130 | return be32_to_cpu(*((u32 *)data)); | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * m5mols_read - I2C read function | ||
135 | * @reg: combination of size, category and command for the I2C packet | ||
136 | * @val: read value | ||
137 | */ | ||
138 | int m5mols_read(struct v4l2_subdev *sd, u32 reg, u32 *val) | ||
139 | { | ||
140 | struct i2c_client *client = v4l2_get_subdevdata(sd); | ||
141 | u8 rbuf[M5MOLS_I2C_MAX_SIZE + 1]; | ||
142 | u8 size = I2C_SIZE(reg); | ||
143 | u8 category = I2C_CATEGORY(reg); | ||
144 | u8 cmd = I2C_COMMAND(reg); | ||
145 | struct i2c_msg msg[2]; | ||
146 | u8 wbuf[5]; | ||
147 | int ret; | ||
148 | |||
149 | if (!client->adapter) | ||
150 | return -ENODEV; | ||
151 | |||
152 | if (size != 1 && size != 2 && size != 4) { | ||
153 | v4l2_err(sd, "Wrong data size\n"); | ||
154 | return -EINVAL; | ||
155 | } | ||
156 | |||
157 | msg[0].addr = client->addr; | ||
158 | msg[0].flags = 0; | ||
159 | msg[0].len = 5; | ||
160 | msg[0].buf = wbuf; | ||
161 | wbuf[0] = 5; | ||
162 | wbuf[1] = M5MOLS_BYTE_READ; | ||
163 | wbuf[2] = category; | ||
164 | wbuf[3] = cmd; | ||
165 | wbuf[4] = size; | ||
166 | |||
167 | msg[1].addr = client->addr; | ||
168 | msg[1].flags = I2C_M_RD; | ||
169 | msg[1].len = size + 1; | ||
170 | msg[1].buf = rbuf; | ||
171 | |||
172 | /* minimum stabilization time */ | ||
173 | usleep_range(200, 200); | ||
174 | |||
175 | ret = i2c_transfer(client->adapter, msg, 2); | ||
176 | if (ret < 0) { | ||
177 | v4l2_err(sd, "read failed: size:%d cat:%02x cmd:%02x. %d\n", | ||
178 | size, category, cmd, ret); | ||
179 | return ret; | ||
180 | } | ||
181 | |||
182 | *val = m5mols_swap_byte(&rbuf[1], size); | ||
183 | |||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | /** | ||
188 | * m5mols_write - I2C command write function | ||
189 | * @reg: combination of size, category and command for the I2C packet | ||
190 | * @val: value to write | ||
191 | */ | ||
192 | int m5mols_write(struct v4l2_subdev *sd, u32 reg, u32 val) | ||
193 | { | ||
194 | struct i2c_client *client = v4l2_get_subdevdata(sd); | ||
195 | u8 wbuf[M5MOLS_I2C_MAX_SIZE + 4]; | ||
196 | u8 category = I2C_CATEGORY(reg); | ||
197 | u8 cmd = I2C_COMMAND(reg); | ||
198 | u8 size = I2C_SIZE(reg); | ||
199 | u32 *buf = (u32 *)&wbuf[4]; | ||
200 | struct i2c_msg msg[1]; | ||
201 | int ret; | ||
202 | |||
203 | if (!client->adapter) | ||
204 | return -ENODEV; | ||
205 | |||
206 | if (size != 1 && size != 2 && size != 4) { | ||
207 | v4l2_err(sd, "Wrong data size\n"); | ||
208 | return -EINVAL; | ||
209 | } | ||
210 | |||
211 | msg->addr = client->addr; | ||
212 | msg->flags = 0; | ||
213 | msg->len = (u16)size + 4; | ||
214 | msg->buf = wbuf; | ||
215 | wbuf[0] = size + 4; | ||
216 | wbuf[1] = M5MOLS_BYTE_WRITE; | ||
217 | wbuf[2] = category; | ||
218 | wbuf[3] = cmd; | ||
219 | |||
220 | *buf = m5mols_swap_byte((u8 *)&val, size); | ||
221 | |||
222 | usleep_range(200, 200); | ||
223 | |||
224 | ret = i2c_transfer(client->adapter, msg, 1); | ||
225 | if (ret < 0) { | ||
226 | v4l2_err(sd, "write failed: size:%d cat:%02x cmd:%02x. %d\n", | ||
227 | size, category, cmd, ret); | ||
228 | return ret; | ||
229 | } | ||
230 | |||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | int m5mols_busy(struct v4l2_subdev *sd, u8 category, u8 cmd, u32 mask) | ||
235 | { | ||
236 | u32 busy, i; | ||
237 | int ret; | ||
238 | |||
239 | for (i = 0; i < M5MOLS_I2C_CHECK_RETRY; i++) { | ||
240 | ret = m5mols_read(sd, I2C_REG(category, cmd, 1), &busy); | ||
241 | if (ret < 0) | ||
242 | return ret; | ||
243 | if ((busy & mask) == mask) | ||
244 | return 0; | ||
245 | } | ||
246 | return -EBUSY; | ||
247 | } | ||
248 | |||
249 | /** | ||
250 | * m5mols_enable_interrupt - Clear interrupt pending bits and unmask interrupts | ||
251 | * | ||
252 | * Before writing desired interrupt value the INT_FACTOR register should | ||
253 | * be read to clear pending interrupts. | ||
254 | */ | ||
255 | int m5mols_enable_interrupt(struct v4l2_subdev *sd, u32 reg) | ||
256 | { | ||
257 | struct m5mols_info *info = to_m5mols(sd); | ||
258 | u32 mask = is_available_af(info) ? REG_INT_AF : 0; | ||
259 | u32 dummy; | ||
260 | int ret; | ||
261 | |||
262 | ret = m5mols_read(sd, SYSTEM_INT_FACTOR, &dummy); | ||
263 | if (!ret) | ||
264 | ret = m5mols_write(sd, SYSTEM_INT_ENABLE, reg & ~mask); | ||
265 | return ret; | ||
266 | } | ||
267 | |||
268 | /** | ||
269 | * m5mols_reg_mode - Write the mode and check busy status | ||
270 | * | ||
271 | * It always accompanies a little delay changing the M-5MOLS mode, so it is | ||
272 | * needed checking current busy status to guarantee right mode. | ||
273 | */ | ||
274 | static int m5mols_reg_mode(struct v4l2_subdev *sd, u32 mode) | ||
275 | { | ||
276 | int ret = m5mols_write(sd, SYSTEM_SYSMODE, mode); | ||
277 | |||
278 | return ret ? ret : m5mols_busy(sd, CAT_SYSTEM, CAT0_SYSMODE, mode); | ||
279 | } | ||
280 | |||
281 | /** | ||
282 | * m5mols_mode - manage the M-5MOLS's mode | ||
283 | * @mode: the required operation mode | ||
284 | * | ||
285 | * The commands of M-5MOLS are grouped into specific modes. Each functionality | ||
286 | * can be guaranteed only when the sensor is operating in mode which which | ||
287 | * a command belongs to. | ||
288 | */ | ||
289 | int m5mols_mode(struct m5mols_info *info, u32 mode) | ||
290 | { | ||
291 | struct v4l2_subdev *sd = &info->sd; | ||
292 | int ret = -EINVAL; | ||
293 | u32 reg; | ||
294 | |||
295 | if (mode < REG_PARAMETER && mode > REG_CAPTURE) | ||
296 | return ret; | ||
297 | |||
298 | ret = m5mols_read(sd, SYSTEM_SYSMODE, ®); | ||
299 | if ((!ret && reg == mode) || ret) | ||
300 | return ret; | ||
301 | |||
302 | switch (reg) { | ||
303 | case REG_PARAMETER: | ||
304 | ret = m5mols_reg_mode(sd, REG_MONITOR); | ||
305 | if (!ret && mode == REG_MONITOR) | ||
306 | break; | ||
307 | if (!ret) | ||
308 | ret = m5mols_reg_mode(sd, REG_CAPTURE); | ||
309 | break; | ||
310 | |||
311 | case REG_MONITOR: | ||
312 | if (mode == REG_PARAMETER) { | ||
313 | ret = m5mols_reg_mode(sd, REG_PARAMETER); | ||
314 | break; | ||
315 | } | ||
316 | |||
317 | ret = m5mols_reg_mode(sd, REG_CAPTURE); | ||
318 | break; | ||
319 | |||
320 | case REG_CAPTURE: | ||
321 | ret = m5mols_reg_mode(sd, REG_MONITOR); | ||
322 | if (!ret && mode == REG_MONITOR) | ||
323 | break; | ||
324 | if (!ret) | ||
325 | ret = m5mols_reg_mode(sd, REG_PARAMETER); | ||
326 | break; | ||
327 | |||
328 | default: | ||
329 | v4l2_warn(sd, "Wrong mode: %d\n", mode); | ||
330 | } | ||
331 | |||
332 | if (!ret) | ||
333 | info->mode = mode; | ||
334 | |||
335 | return ret; | ||
336 | } | ||
337 | |||
338 | /** | ||
339 | * m5mols_get_version - retrieve full revisions information of M-5MOLS | ||
340 | * | ||
341 | * The version information includes revisions of hardware and firmware, | ||
342 | * AutoFocus alghorithm version and the version string. | ||
343 | */ | ||
344 | static int m5mols_get_version(struct v4l2_subdev *sd) | ||
345 | { | ||
346 | struct m5mols_info *info = to_m5mols(sd); | ||
347 | union { | ||
348 | struct m5mols_version ver; | ||
349 | u8 bytes[VERSION_SIZE]; | ||
350 | } version; | ||
351 | u32 *value; | ||
352 | u8 cmd = CAT0_VER_CUSTOMER; | ||
353 | int ret; | ||
354 | |||
355 | do { | ||
356 | value = (u32 *)&version.bytes[cmd]; | ||
357 | ret = m5mols_read(sd, SYSTEM_CMD(cmd), value); | ||
358 | if (ret) | ||
359 | return ret; | ||
360 | } while (cmd++ != CAT0_VER_AWB); | ||
361 | |||
362 | do { | ||
363 | value = (u32 *)&version.bytes[cmd]; | ||
364 | ret = m5mols_read(sd, SYSTEM_VER_STRING, value); | ||
365 | if (ret) | ||
366 | return ret; | ||
367 | if (cmd >= VERSION_SIZE - 1) | ||
368 | return -EINVAL; | ||
369 | } while (version.bytes[cmd++]); | ||
370 | |||
371 | value = (u32 *)&version.bytes[cmd]; | ||
372 | ret = m5mols_read(sd, AF_VERSION, value); | ||
373 | if (ret) | ||
374 | return ret; | ||
375 | |||
376 | /* store version information swapped for being readable */ | ||
377 | info->ver = version.ver; | ||
378 | info->ver.fw = be16_to_cpu(info->ver.fw); | ||
379 | info->ver.hw = be16_to_cpu(info->ver.hw); | ||
380 | info->ver.param = be16_to_cpu(info->ver.param); | ||
381 | info->ver.awb = be16_to_cpu(info->ver.awb); | ||
382 | |||
383 | v4l2_info(sd, "Manufacturer\t[%s]\n", | ||
384 | is_manufacturer(info, REG_SAMSUNG_ELECTRO) ? | ||
385 | "Samsung Electro-Machanics" : | ||
386 | is_manufacturer(info, REG_SAMSUNG_OPTICS) ? | ||
387 | "Samsung Fiber-Optics" : | ||
388 | is_manufacturer(info, REG_SAMSUNG_TECHWIN) ? | ||
389 | "Samsung Techwin" : "None"); | ||
390 | v4l2_info(sd, "Customer/Project\t[0x%02x/0x%02x]\n", | ||
391 | info->ver.customer, info->ver.project); | ||
392 | |||
393 | if (!is_available_af(info)) | ||
394 | v4l2_info(sd, "No support Auto Focus on this firmware\n"); | ||
395 | |||
396 | return ret; | ||
397 | } | ||
398 | |||
399 | /** | ||
400 | * __find_restype - Lookup M-5MOLS resolution type according to pixel code | ||
401 | * @code: pixel code | ||
402 | */ | ||
403 | static enum m5mols_restype __find_restype(enum v4l2_mbus_pixelcode code) | ||
404 | { | ||
405 | enum m5mols_restype type = M5MOLS_RESTYPE_MONITOR; | ||
406 | |||
407 | do { | ||
408 | if (code == m5mols_default_ffmt[type].code) | ||
409 | return type; | ||
410 | } while (type++ != SIZE_DEFAULT_FFMT); | ||
411 | |||
412 | return 0; | ||
413 | } | ||
414 | |||
415 | /** | ||
416 | * __find_resolution - Lookup preset and type of M-5MOLS's resolution | ||
417 | * @mf: pixel format to find/negotiate the resolution preset for | ||
418 | * @type: M-5MOLS resolution type | ||
419 | * @resolution: M-5MOLS resolution preset register value | ||
420 | * | ||
421 | * Find nearest resolution matching resolution preset and adjust mf | ||
422 | * to supported values. | ||
423 | */ | ||
424 | static int __find_resolution(struct v4l2_subdev *sd, | ||
425 | struct v4l2_mbus_framefmt *mf, | ||
426 | enum m5mols_restype *type, | ||
427 | u32 *resolution) | ||
428 | { | ||
429 | const struct m5mols_resolution *fsize = &m5mols_reg_res[0]; | ||
430 | const struct m5mols_resolution *match = NULL; | ||
431 | enum m5mols_restype stype = __find_restype(mf->code); | ||
432 | int i = ARRAY_SIZE(m5mols_reg_res); | ||
433 | unsigned int min_err = ~0; | ||
434 | |||
435 | while (i--) { | ||
436 | int err; | ||
437 | if (stype == fsize->type) { | ||
438 | err = abs(fsize->width - mf->width) | ||
439 | + abs(fsize->height - mf->height); | ||
440 | |||
441 | if (err < min_err) { | ||
442 | min_err = err; | ||
443 | match = fsize; | ||
444 | } | ||
445 | } | ||
446 | fsize++; | ||
447 | } | ||
448 | if (match) { | ||
449 | mf->width = match->width; | ||
450 | mf->height = match->height; | ||
451 | *resolution = match->reg; | ||
452 | *type = stype; | ||
453 | return 0; | ||
454 | } | ||
455 | |||
456 | return -EINVAL; | ||
457 | } | ||
458 | |||
459 | static struct v4l2_mbus_framefmt *__find_format(struct m5mols_info *info, | ||
460 | struct v4l2_subdev_fh *fh, | ||
461 | enum v4l2_subdev_format_whence which, | ||
462 | enum m5mols_restype type) | ||
463 | { | ||
464 | if (which == V4L2_SUBDEV_FORMAT_TRY) | ||
465 | return fh ? v4l2_subdev_get_try_format(fh, 0) : NULL; | ||
466 | |||
467 | return &info->ffmt[type]; | ||
468 | } | ||
469 | |||
470 | static int m5mols_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, | ||
471 | struct v4l2_subdev_format *fmt) | ||
472 | { | ||
473 | struct m5mols_info *info = to_m5mols(sd); | ||
474 | struct v4l2_mbus_framefmt *format; | ||
475 | |||
476 | if (fmt->pad != 0) | ||
477 | return -EINVAL; | ||
478 | |||
479 | format = __find_format(info, fh, fmt->which, info->res_type); | ||
480 | if (!format) | ||
481 | return -EINVAL; | ||
482 | |||
483 | fmt->format = *format; | ||
484 | return 0; | ||
485 | } | ||
486 | |||
487 | static int m5mols_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, | ||
488 | struct v4l2_subdev_format *fmt) | ||
489 | { | ||
490 | struct m5mols_info *info = to_m5mols(sd); | ||
491 | struct v4l2_mbus_framefmt *format = &fmt->format; | ||
492 | struct v4l2_mbus_framefmt *sfmt; | ||
493 | enum m5mols_restype type; | ||
494 | u32 resolution = 0; | ||
495 | int ret; | ||
496 | |||
497 | if (fmt->pad != 0) | ||
498 | return -EINVAL; | ||
499 | |||
500 | ret = __find_resolution(sd, format, &type, &resolution); | ||
501 | if (ret < 0) | ||
502 | return ret; | ||
503 | |||
504 | sfmt = __find_format(info, fh, fmt->which, type); | ||
505 | if (!sfmt) | ||
506 | return 0; | ||
507 | |||
508 | *sfmt = m5mols_default_ffmt[type]; | ||
509 | sfmt->width = format->width; | ||
510 | sfmt->height = format->height; | ||
511 | |||
512 | if (fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { | ||
513 | info->resolution = resolution; | ||
514 | info->code = format->code; | ||
515 | info->res_type = type; | ||
516 | } | ||
517 | |||
518 | return 0; | ||
519 | } | ||
520 | |||
521 | static int m5mols_enum_mbus_code(struct v4l2_subdev *sd, | ||
522 | struct v4l2_subdev_fh *fh, | ||
523 | struct v4l2_subdev_mbus_code_enum *code) | ||
524 | { | ||
525 | if (!code || code->index >= SIZE_DEFAULT_FFMT) | ||
526 | return -EINVAL; | ||
527 | |||
528 | code->code = m5mols_default_ffmt[code->index].code; | ||
529 | |||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | static struct v4l2_subdev_pad_ops m5mols_pad_ops = { | ||
534 | .enum_mbus_code = m5mols_enum_mbus_code, | ||
535 | .get_fmt = m5mols_get_fmt, | ||
536 | .set_fmt = m5mols_set_fmt, | ||
537 | }; | ||
538 | |||
539 | /** | ||
540 | * m5mols_sync_controls - Apply default scene mode and the current controls | ||
541 | * | ||
542 | * This is used only streaming for syncing between v4l2_ctrl framework and | ||
543 | * m5mols's controls. First, do the scenemode to the sensor, then call | ||
544 | * v4l2_ctrl_handler_setup. It can be same between some commands and | ||
545 | * the scenemode's in the default v4l2_ctrls. But, such commands of control | ||
546 | * should be prior to the scenemode's one. | ||
547 | */ | ||
548 | int m5mols_sync_controls(struct m5mols_info *info) | ||
549 | { | ||
550 | int ret = -EINVAL; | ||
551 | |||
552 | if (!is_ctrl_synced(info)) { | ||
553 | ret = m5mols_do_scenemode(info, REG_SCENE_NORMAL); | ||
554 | if (ret) | ||
555 | return ret; | ||
556 | |||
557 | v4l2_ctrl_handler_setup(&info->handle); | ||
558 | info->ctrl_sync = true; | ||
559 | } | ||
560 | |||
561 | return ret; | ||
562 | } | ||
563 | |||
564 | /** | ||
565 | * m5mols_start_monitor - Start the monitor mode | ||
566 | * | ||
567 | * Before applying the controls setup the resolution and frame rate | ||
568 | * in PARAMETER mode, and then switch over to MONITOR mode. | ||
569 | */ | ||
570 | static int m5mols_start_monitor(struct m5mols_info *info) | ||
571 | { | ||
572 | struct v4l2_subdev *sd = &info->sd; | ||
573 | int ret; | ||
574 | |||
575 | ret = m5mols_mode(info, REG_PARAMETER); | ||
576 | if (!ret) | ||
577 | ret = m5mols_write(sd, PARM_MON_SIZE, info->resolution); | ||
578 | if (!ret) | ||
579 | ret = m5mols_write(sd, PARM_MON_FPS, REG_FPS_30); | ||
580 | if (!ret) | ||
581 | ret = m5mols_mode(info, REG_MONITOR); | ||
582 | if (!ret) | ||
583 | ret = m5mols_sync_controls(info); | ||
584 | |||
585 | return ret; | ||
586 | } | ||
587 | |||
588 | static int m5mols_s_stream(struct v4l2_subdev *sd, int enable) | ||
589 | { | ||
590 | struct m5mols_info *info = to_m5mols(sd); | ||
591 | |||
592 | if (enable) { | ||
593 | int ret = -EINVAL; | ||
594 | |||
595 | if (is_code(info->code, M5MOLS_RESTYPE_MONITOR)) | ||
596 | ret = m5mols_start_monitor(info); | ||
597 | if (is_code(info->code, M5MOLS_RESTYPE_CAPTURE)) | ||
598 | ret = m5mols_start_capture(info); | ||
599 | |||
600 | return ret; | ||
601 | } | ||
602 | |||
603 | return m5mols_mode(info, REG_PARAMETER); | ||
604 | } | ||
605 | |||
606 | static const struct v4l2_subdev_video_ops m5mols_video_ops = { | ||
607 | .s_stream = m5mols_s_stream, | ||
608 | }; | ||
609 | |||
610 | static int m5mols_s_ctrl(struct v4l2_ctrl *ctrl) | ||
611 | { | ||
612 | struct v4l2_subdev *sd = to_sd(ctrl); | ||
613 | struct m5mols_info *info = to_m5mols(sd); | ||
614 | int ret; | ||
615 | |||
616 | info->mode_save = info->mode; | ||
617 | |||
618 | ret = m5mols_mode(info, REG_PARAMETER); | ||
619 | if (!ret) | ||
620 | ret = m5mols_set_ctrl(ctrl); | ||
621 | if (!ret) | ||
622 | ret = m5mols_mode(info, info->mode_save); | ||
623 | |||
624 | return ret; | ||
625 | } | ||
626 | |||
627 | static const struct v4l2_ctrl_ops m5mols_ctrl_ops = { | ||
628 | .s_ctrl = m5mols_s_ctrl, | ||
629 | }; | ||
630 | |||
631 | static int m5mols_sensor_power(struct m5mols_info *info, bool enable) | ||
632 | { | ||
633 | struct v4l2_subdev *sd = &info->sd; | ||
634 | struct i2c_client *client = v4l2_get_subdevdata(sd); | ||
635 | const struct m5mols_platform_data *pdata = info->pdata; | ||
636 | int ret; | ||
637 | |||
638 | if (enable) { | ||
639 | if (is_powered(info)) | ||
640 | return 0; | ||
641 | |||
642 | if (info->set_power) { | ||
643 | ret = info->set_power(&client->dev, 1); | ||
644 | if (ret) | ||
645 | return ret; | ||
646 | } | ||
647 | |||
648 | ret = regulator_bulk_enable(ARRAY_SIZE(supplies), supplies); | ||
649 | if (ret) { | ||
650 | info->set_power(&client->dev, 0); | ||
651 | return ret; | ||
652 | } | ||
653 | |||
654 | gpio_set_value(pdata->gpio_reset, !pdata->reset_polarity); | ||
655 | usleep_range(1000, 1000); | ||
656 | info->power = true; | ||
657 | |||
658 | return ret; | ||
659 | } | ||
660 | |||
661 | if (!is_powered(info)) | ||
662 | return 0; | ||
663 | |||
664 | ret = regulator_bulk_disable(ARRAY_SIZE(supplies), supplies); | ||
665 | if (ret) | ||
666 | return ret; | ||
667 | |||
668 | if (info->set_power) | ||
669 | info->set_power(&client->dev, 0); | ||
670 | |||
671 | gpio_set_value(pdata->gpio_reset, pdata->reset_polarity); | ||
672 | usleep_range(1000, 1000); | ||
673 | info->power = false; | ||
674 | |||
675 | return ret; | ||
676 | } | ||
677 | |||
678 | /* m5mols_update_fw - optional firmware update routine */ | ||
679 | int __attribute__ ((weak)) m5mols_update_fw(struct v4l2_subdev *sd, | ||
680 | int (*set_power)(struct m5mols_info *, bool)) | ||
681 | { | ||
682 | return 0; | ||
683 | } | ||
684 | |||
685 | /** | ||
686 | * m5mols_sensor_armboot - Booting M-5MOLS internal ARM core. | ||
687 | * | ||
688 | * Booting internal ARM core makes the M-5MOLS is ready for getting commands | ||
689 | * with I2C. It's the first thing to be done after it powered up. It must wait | ||
690 | * at least 520ms recommended by M-5MOLS datasheet, after executing arm booting. | ||
691 | */ | ||
692 | static int m5mols_sensor_armboot(struct v4l2_subdev *sd) | ||
693 | { | ||
694 | int ret; | ||
695 | |||
696 | ret = m5mols_write(sd, FLASH_CAM_START, REG_START_ARM_BOOT); | ||
697 | if (ret < 0) | ||
698 | return ret; | ||
699 | |||
700 | msleep(520); | ||
701 | |||
702 | ret = m5mols_get_version(sd); | ||
703 | if (!ret) | ||
704 | ret = m5mols_update_fw(sd, m5mols_sensor_power); | ||
705 | if (ret) | ||
706 | return ret; | ||
707 | |||
708 | v4l2_dbg(1, m5mols_debug, sd, "Success ARM Booting\n"); | ||
709 | |||
710 | ret = m5mols_write(sd, PARM_INTERFACE, REG_INTERFACE_MIPI); | ||
711 | if (!ret) | ||
712 | ret = m5mols_enable_interrupt(sd, REG_INT_AF); | ||
713 | |||
714 | return ret; | ||
715 | } | ||
716 | |||
717 | static int m5mols_init_controls(struct m5mols_info *info) | ||
718 | { | ||
719 | struct v4l2_subdev *sd = &info->sd; | ||
720 | u16 max_exposure; | ||
721 | u16 step_zoom; | ||
722 | int ret; | ||
723 | |||
724 | /* Determine value's range & step of controls for various FW version */ | ||
725 | ret = m5mols_read(sd, AE_MAX_GAIN_MON, (u32 *)&max_exposure); | ||
726 | if (!ret) | ||
727 | step_zoom = is_manufacturer(info, REG_SAMSUNG_OPTICS) ? 31 : 1; | ||
728 | if (ret) | ||
729 | return ret; | ||
730 | |||
731 | v4l2_ctrl_handler_init(&info->handle, 6); | ||
732 | info->autowb = v4l2_ctrl_new_std(&info->handle, | ||
733 | &m5mols_ctrl_ops, V4L2_CID_AUTO_WHITE_BALANCE, | ||
734 | 0, 1, 1, 0); | ||
735 | info->saturation = v4l2_ctrl_new_std(&info->handle, | ||
736 | &m5mols_ctrl_ops, V4L2_CID_SATURATION, | ||
737 | 1, 5, 1, 3); | ||
738 | info->zoom = v4l2_ctrl_new_std(&info->handle, | ||
739 | &m5mols_ctrl_ops, V4L2_CID_ZOOM_ABSOLUTE, | ||
740 | 1, 70, step_zoom, 1); | ||
741 | info->exposure = v4l2_ctrl_new_std(&info->handle, | ||
742 | &m5mols_ctrl_ops, V4L2_CID_EXPOSURE, | ||
743 | 0, max_exposure, 1, (int)max_exposure/2); | ||
744 | info->colorfx = v4l2_ctrl_new_std_menu(&info->handle, | ||
745 | &m5mols_ctrl_ops, V4L2_CID_COLORFX, | ||
746 | 4, (1 << V4L2_COLORFX_BW), V4L2_COLORFX_NONE); | ||
747 | info->autoexposure = v4l2_ctrl_new_std_menu(&info->handle, | ||
748 | &m5mols_ctrl_ops, V4L2_CID_EXPOSURE_AUTO, | ||
749 | 1, 0, V4L2_EXPOSURE_MANUAL); | ||
750 | |||
751 | sd->ctrl_handler = &info->handle; | ||
752 | if (info->handle.error) { | ||
753 | v4l2_err(sd, "Failed to initialize controls: %d\n", ret); | ||
754 | v4l2_ctrl_handler_free(&info->handle); | ||
755 | return info->handle.error; | ||
756 | } | ||
757 | |||
758 | v4l2_ctrl_cluster(2, &info->autoexposure); | ||
759 | |||
760 | return 0; | ||
761 | } | ||
762 | |||
763 | /** | ||
764 | * m5mols_s_power - Main sensor power control function | ||
765 | * | ||
766 | * To prevent breaking the lens when the sensor is powered off the Soft-Landing | ||
767 | * algorithm is called where available. The Soft-Landing algorithm availability | ||
768 | * dependends on the firmware provider. | ||
769 | */ | ||
770 | static int m5mols_s_power(struct v4l2_subdev *sd, int on) | ||
771 | { | ||
772 | struct m5mols_info *info = to_m5mols(sd); | ||
773 | int ret; | ||
774 | |||
775 | if (on) { | ||
776 | ret = m5mols_sensor_power(info, true); | ||
777 | if (!ret) | ||
778 | ret = m5mols_sensor_armboot(sd); | ||
779 | if (!ret) | ||
780 | ret = m5mols_init_controls(info); | ||
781 | if (ret) | ||
782 | return ret; | ||
783 | |||
784 | info->ffmt[M5MOLS_RESTYPE_MONITOR] = | ||
785 | m5mols_default_ffmt[M5MOLS_RESTYPE_MONITOR]; | ||
786 | info->ffmt[M5MOLS_RESTYPE_CAPTURE] = | ||
787 | m5mols_default_ffmt[M5MOLS_RESTYPE_CAPTURE]; | ||
788 | return ret; | ||
789 | } | ||
790 | |||
791 | if (is_manufacturer(info, REG_SAMSUNG_TECHWIN)) { | ||
792 | ret = m5mols_mode(info, REG_MONITOR); | ||
793 | if (!ret) | ||
794 | ret = m5mols_write(sd, AF_EXECUTE, REG_AF_STOP); | ||
795 | if (!ret) | ||
796 | ret = m5mols_write(sd, AF_MODE, REG_AF_POWEROFF); | ||
797 | if (!ret) | ||
798 | ret = m5mols_busy(sd, CAT_SYSTEM, CAT0_STATUS, | ||
799 | REG_AF_IDLE); | ||
800 | if (!ret) | ||
801 | v4l2_info(sd, "Success soft-landing lens\n"); | ||
802 | } | ||
803 | |||
804 | ret = m5mols_sensor_power(info, false); | ||
805 | if (!ret) { | ||
806 | v4l2_ctrl_handler_free(&info->handle); | ||
807 | info->ctrl_sync = false; | ||
808 | } | ||
809 | |||
810 | return ret; | ||
811 | } | ||
812 | |||
813 | static int m5mols_log_status(struct v4l2_subdev *sd) | ||
814 | { | ||
815 | struct m5mols_info *info = to_m5mols(sd); | ||
816 | |||
817 | v4l2_ctrl_handler_log_status(&info->handle, sd->name); | ||
818 | |||
819 | return 0; | ||
820 | } | ||
821 | |||
822 | static const struct v4l2_subdev_core_ops m5mols_core_ops = { | ||
823 | .s_power = m5mols_s_power, | ||
824 | .g_ctrl = v4l2_subdev_g_ctrl, | ||
825 | .s_ctrl = v4l2_subdev_s_ctrl, | ||
826 | .queryctrl = v4l2_subdev_queryctrl, | ||
827 | .querymenu = v4l2_subdev_querymenu, | ||
828 | .g_ext_ctrls = v4l2_subdev_g_ext_ctrls, | ||
829 | .try_ext_ctrls = v4l2_subdev_try_ext_ctrls, | ||
830 | .s_ext_ctrls = v4l2_subdev_s_ext_ctrls, | ||
831 | .log_status = m5mols_log_status, | ||
832 | }; | ||
833 | |||
834 | static const struct v4l2_subdev_ops m5mols_ops = { | ||
835 | .core = &m5mols_core_ops, | ||
836 | .pad = &m5mols_pad_ops, | ||
837 | .video = &m5mols_video_ops, | ||
838 | }; | ||
839 | |||
840 | static void m5mols_irq_work(struct work_struct *work) | ||
841 | { | ||
842 | struct m5mols_info *info = | ||
843 | container_of(work, struct m5mols_info, work_irq); | ||
844 | struct v4l2_subdev *sd = &info->sd; | ||
845 | u32 reg; | ||
846 | int ret; | ||
847 | |||
848 | if (!is_powered(info) || | ||
849 | m5mols_read(sd, SYSTEM_INT_FACTOR, &info->interrupt)) | ||
850 | return; | ||
851 | |||
852 | switch (info->interrupt & REG_INT_MASK) { | ||
853 | case REG_INT_AF: | ||
854 | if (!is_available_af(info)) | ||
855 | break; | ||
856 | ret = m5mols_read(sd, AF_STATUS, ®); | ||
857 | v4l2_dbg(2, m5mols_debug, sd, "AF %s\n", | ||
858 | reg == REG_AF_FAIL ? "Failed" : | ||
859 | reg == REG_AF_SUCCESS ? "Success" : | ||
860 | reg == REG_AF_IDLE ? "Idle" : "Busy"); | ||
861 | break; | ||
862 | case REG_INT_CAPTURE: | ||
863 | if (!test_and_set_bit(ST_CAPT_IRQ, &info->flags)) | ||
864 | wake_up_interruptible(&info->irq_waitq); | ||
865 | |||
866 | v4l2_dbg(2, m5mols_debug, sd, "CAPTURE\n"); | ||
867 | break; | ||
868 | default: | ||
869 | v4l2_dbg(2, m5mols_debug, sd, "Undefined: %02x\n", reg); | ||
870 | break; | ||
871 | }; | ||
872 | } | ||
873 | |||
874 | static irqreturn_t m5mols_irq_handler(int irq, void *data) | ||
875 | { | ||
876 | struct v4l2_subdev *sd = data; | ||
877 | struct m5mols_info *info = to_m5mols(sd); | ||
878 | |||
879 | schedule_work(&info->work_irq); | ||
880 | |||
881 | return IRQ_HANDLED; | ||
882 | } | ||
883 | |||
884 | static int __devinit m5mols_probe(struct i2c_client *client, | ||
885 | const struct i2c_device_id *id) | ||
886 | { | ||
887 | const struct m5mols_platform_data *pdata = client->dev.platform_data; | ||
888 | struct m5mols_info *info; | ||
889 | struct v4l2_subdev *sd; | ||
890 | int ret; | ||
891 | |||
892 | if (pdata == NULL) { | ||
893 | dev_err(&client->dev, "No platform data\n"); | ||
894 | return -EINVAL; | ||
895 | } | ||
896 | |||
897 | if (!gpio_is_valid(pdata->gpio_reset)) { | ||
898 | dev_err(&client->dev, "No valid RESET GPIO specified\n"); | ||
899 | return -EINVAL; | ||
900 | } | ||
901 | |||
902 | if (!pdata->irq) { | ||
903 | dev_err(&client->dev, "Interrupt not assigned\n"); | ||
904 | return -EINVAL; | ||
905 | } | ||
906 | |||
907 | info = kzalloc(sizeof(struct m5mols_info), GFP_KERNEL); | ||
908 | if (!info) | ||
909 | return -ENOMEM; | ||
910 | |||
911 | info->pdata = pdata; | ||
912 | info->set_power = pdata->set_power; | ||
913 | |||
914 | ret = gpio_request(pdata->gpio_reset, "M5MOLS_NRST"); | ||
915 | if (ret) { | ||
916 | dev_err(&client->dev, "Failed to request gpio: %d\n", ret); | ||
917 | goto out_free; | ||
918 | } | ||
919 | gpio_direction_output(pdata->gpio_reset, pdata->reset_polarity); | ||
920 | |||
921 | ret = regulator_bulk_get(&client->dev, ARRAY_SIZE(supplies), supplies); | ||
922 | if (ret) { | ||
923 | dev_err(&client->dev, "Failed to get regulators: %d\n", ret); | ||
924 | goto out_gpio; | ||
925 | } | ||
926 | |||
927 | sd = &info->sd; | ||
928 | strlcpy(sd->name, MODULE_NAME, sizeof(sd->name)); | ||
929 | v4l2_i2c_subdev_init(sd, client, &m5mols_ops); | ||
930 | |||
931 | info->pad.flags = MEDIA_PAD_FL_SOURCE; | ||
932 | ret = media_entity_init(&sd->entity, 1, &info->pad, 0); | ||
933 | if (ret < 0) | ||
934 | goto out_reg; | ||
935 | sd->entity.type = MEDIA_ENT_T_V4L2_SUBDEV_SENSOR; | ||
936 | |||
937 | init_waitqueue_head(&info->irq_waitq); | ||
938 | INIT_WORK(&info->work_irq, m5mols_irq_work); | ||
939 | ret = request_irq(pdata->irq, m5mols_irq_handler, | ||
940 | IRQF_TRIGGER_RISING, MODULE_NAME, sd); | ||
941 | if (ret) { | ||
942 | dev_err(&client->dev, "Interrupt request failed: %d\n", ret); | ||
943 | goto out_me; | ||
944 | } | ||
945 | info->res_type = M5MOLS_RESTYPE_MONITOR; | ||
946 | return 0; | ||
947 | out_me: | ||
948 | media_entity_cleanup(&sd->entity); | ||
949 | out_reg: | ||
950 | regulator_bulk_free(ARRAY_SIZE(supplies), supplies); | ||
951 | out_gpio: | ||
952 | gpio_free(pdata->gpio_reset); | ||
953 | out_free: | ||
954 | kfree(info); | ||
955 | return ret; | ||
956 | } | ||
957 | |||
958 | static int __devexit m5mols_remove(struct i2c_client *client) | ||
959 | { | ||
960 | struct v4l2_subdev *sd = i2c_get_clientdata(client); | ||
961 | struct m5mols_info *info = to_m5mols(sd); | ||
962 | |||
963 | v4l2_device_unregister_subdev(sd); | ||
964 | free_irq(info->pdata->irq, sd); | ||
965 | |||
966 | regulator_bulk_free(ARRAY_SIZE(supplies), supplies); | ||
967 | gpio_free(info->pdata->gpio_reset); | ||
968 | media_entity_cleanup(&sd->entity); | ||
969 | kfree(info); | ||
970 | return 0; | ||
971 | } | ||
972 | |||
973 | static const struct i2c_device_id m5mols_id[] = { | ||
974 | { MODULE_NAME, 0 }, | ||
975 | { }, | ||
976 | }; | ||
977 | MODULE_DEVICE_TABLE(i2c, m5mols_id); | ||
978 | |||
979 | static struct i2c_driver m5mols_i2c_driver = { | ||
980 | .driver = { | ||
981 | .name = MODULE_NAME, | ||
982 | }, | ||
983 | .probe = m5mols_probe, | ||
984 | .remove = __devexit_p(m5mols_remove), | ||
985 | .id_table = m5mols_id, | ||
986 | }; | ||
987 | |||
988 | static int __init m5mols_mod_init(void) | ||
989 | { | ||
990 | return i2c_add_driver(&m5mols_i2c_driver); | ||
991 | } | ||
992 | |||
993 | static void __exit m5mols_mod_exit(void) | ||
994 | { | ||
995 | i2c_del_driver(&m5mols_i2c_driver); | ||
996 | } | ||
997 | |||
998 | module_init(m5mols_mod_init); | ||
999 | module_exit(m5mols_mod_exit); | ||
1000 | |||
1001 | MODULE_AUTHOR("HeungJun Kim <riverful.kim@samsung.com>"); | ||
1002 | MODULE_AUTHOR("Dongsoo Kim <dongsoo45.kim@samsung.com>"); | ||
1003 | MODULE_DESCRIPTION("Fujitsu M-5MOLS 8M Pixel camera driver"); | ||
1004 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/media/video/m5mols/m5mols_reg.h b/drivers/media/video/m5mols/m5mols_reg.h new file mode 100644 index 000000000000..b83e36fc6ac6 --- /dev/null +++ b/drivers/media/video/m5mols/m5mols_reg.h | |||
@@ -0,0 +1,399 @@ | |||
1 | /* | ||
2 | * Register map for M-5MOLS 8M Pixel camera sensor with ISP | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics Co., Ltd. | ||
5 | * Author: HeungJun Kim, riverful.kim@samsung.com | ||
6 | * | ||
7 | * Copyright (C) 2009 Samsung Electronics Co., Ltd. | ||
8 | * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #ifndef M5MOLS_REG_H | ||
17 | #define M5MOLS_REG_H | ||
18 | |||
19 | #define M5MOLS_I2C_MAX_SIZE 4 | ||
20 | #define M5MOLS_BYTE_READ 0x01 | ||
21 | #define M5MOLS_BYTE_WRITE 0x02 | ||
22 | |||
23 | #define I2C_CATEGORY(__cat) ((__cat >> 16) & 0xff) | ||
24 | #define I2C_COMMAND(__comm) ((__comm >> 8) & 0xff) | ||
25 | #define I2C_SIZE(__reg_s) ((__reg_s) & 0xff) | ||
26 | #define I2C_REG(__cat, __cmd, __reg_s) ((__cat << 16) | (__cmd << 8) | __reg_s) | ||
27 | |||
28 | /* | ||
29 | * Category section register | ||
30 | * | ||
31 | * The category means set including relevant command of M-5MOLS. | ||
32 | */ | ||
33 | #define CAT_SYSTEM 0x00 | ||
34 | #define CAT_PARAM 0x01 | ||
35 | #define CAT_MONITOR 0x02 | ||
36 | #define CAT_AE 0x03 | ||
37 | #define CAT_WB 0x06 | ||
38 | #define CAT_EXIF 0x07 | ||
39 | #define CAT_FD 0x09 | ||
40 | #define CAT_LENS 0x0a | ||
41 | #define CAT_CAPT_PARM 0x0b | ||
42 | #define CAT_CAPT_CTRL 0x0c | ||
43 | #define CAT_FLASH 0x0f /* related to FW, revisions, booting */ | ||
44 | |||
45 | /* | ||
46 | * Category 0 - SYSTEM mode | ||
47 | * | ||
48 | * The SYSTEM mode in the M-5MOLS means area available to handle with the whole | ||
49 | * & all-round system of sensor. It deals with version/interrupt/setting mode & | ||
50 | * even sensor's status. Especially, the M-5MOLS sensor with ISP varies by | ||
51 | * packaging & manufacturer, even the customer and project code. And the | ||
52 | * function details may vary among them. The version information helps to | ||
53 | * determine what methods shall be used in the driver. | ||
54 | * | ||
55 | * There is many registers between customer version address and awb one. For | ||
56 | * more specific contents, see definition if file m5mols.h. | ||
57 | */ | ||
58 | #define CAT0_VER_CUSTOMER 0x00 /* customer version */ | ||
59 | #define CAT0_VER_AWB 0x09 /* Auto WB version */ | ||
60 | #define CAT0_VER_STRING 0x0a /* string including M-5MOLS */ | ||
61 | #define CAT0_SYSMODE 0x0b /* SYSTEM mode register */ | ||
62 | #define CAT0_STATUS 0x0c /* SYSTEM mode status register */ | ||
63 | #define CAT0_INT_FACTOR 0x10 /* interrupt pending register */ | ||
64 | #define CAT0_INT_ENABLE 0x11 /* interrupt enable register */ | ||
65 | |||
66 | #define SYSTEM_SYSMODE I2C_REG(CAT_SYSTEM, CAT0_SYSMODE, 1) | ||
67 | #define REG_SYSINIT 0x00 /* SYSTEM mode */ | ||
68 | #define REG_PARAMETER 0x01 /* PARAMETER mode */ | ||
69 | #define REG_MONITOR 0x02 /* MONITOR mode */ | ||
70 | #define REG_CAPTURE 0x03 /* CAPTURE mode */ | ||
71 | |||
72 | #define SYSTEM_CMD(__cmd) I2C_REG(CAT_SYSTEM, cmd, 1) | ||
73 | #define SYSTEM_VER_STRING I2C_REG(CAT_SYSTEM, CAT0_VER_STRING, 1) | ||
74 | #define REG_SAMSUNG_ELECTRO "SE" /* Samsung Electro-Mechanics */ | ||
75 | #define REG_SAMSUNG_OPTICS "OP" /* Samsung Fiber-Optics */ | ||
76 | #define REG_SAMSUNG_TECHWIN "TB" /* Samsung Techwin */ | ||
77 | |||
78 | #define SYSTEM_INT_FACTOR I2C_REG(CAT_SYSTEM, CAT0_INT_FACTOR, 1) | ||
79 | #define SYSTEM_INT_ENABLE I2C_REG(CAT_SYSTEM, CAT0_INT_ENABLE, 1) | ||
80 | #define REG_INT_MODE (1 << 0) | ||
81 | #define REG_INT_AF (1 << 1) | ||
82 | #define REG_INT_ZOOM (1 << 2) | ||
83 | #define REG_INT_CAPTURE (1 << 3) | ||
84 | #define REG_INT_FRAMESYNC (1 << 4) | ||
85 | #define REG_INT_FD (1 << 5) | ||
86 | #define REG_INT_LENS_INIT (1 << 6) | ||
87 | #define REG_INT_SOUND (1 << 7) | ||
88 | #define REG_INT_MASK 0x0f | ||
89 | |||
90 | /* | ||
91 | * category 1 - PARAMETER mode | ||
92 | * | ||
93 | * This category supports function of camera features of M-5MOLS. It means we | ||
94 | * can handle with preview(MONITOR) resolution size/frame per second/interface | ||
95 | * between the sensor and the Application Processor/even the image effect. | ||
96 | */ | ||
97 | #define CAT1_DATA_INTERFACE 0x00 /* interface between sensor and AP */ | ||
98 | #define CAT1_MONITOR_SIZE 0x01 /* resolution at the MONITOR mode */ | ||
99 | #define CAT1_MONITOR_FPS 0x02 /* frame per second at this mode */ | ||
100 | #define CAT1_EFFECT 0x0b /* image effects */ | ||
101 | |||
102 | #define PARM_MON_SIZE I2C_REG(CAT_PARAM, CAT1_MONITOR_SIZE, 1) | ||
103 | |||
104 | #define PARM_MON_FPS I2C_REG(CAT_PARAM, CAT1_MONITOR_FPS, 1) | ||
105 | #define REG_FPS_30 0x02 | ||
106 | |||
107 | #define PARM_INTERFACE I2C_REG(CAT_PARAM, CAT1_DATA_INTERFACE, 1) | ||
108 | #define REG_INTERFACE_MIPI 0x02 | ||
109 | |||
110 | #define PARM_EFFECT I2C_REG(CAT_PARAM, CAT1_EFFECT, 1) | ||
111 | #define REG_EFFECT_OFF 0x00 | ||
112 | #define REG_EFFECT_NEGA 0x01 | ||
113 | #define REG_EFFECT_EMBOSS 0x06 | ||
114 | #define REG_EFFECT_OUTLINE 0x07 | ||
115 | #define REG_EFFECT_WATERCOLOR 0x08 | ||
116 | |||
117 | /* | ||
118 | * Category 2 - MONITOR mode | ||
119 | * | ||
120 | * The MONITOR mode is same as preview mode as we said. The M-5MOLS has another | ||
121 | * mode named "Preview", but this preview mode is used at the case specific | ||
122 | * vider-recording mode. This mmode supports only YUYV format. On the other | ||
123 | * hand, the JPEG & RAW formats is supports by CAPTURE mode. And, there are | ||
124 | * another options like zoom/color effect(different with effect in PARAMETER | ||
125 | * mode)/anti hand shaking algorithm. | ||
126 | */ | ||
127 | #define CAT2_ZOOM 0x01 /* set the zoom position & execute */ | ||
128 | #define CAT2_ZOOM_STEP 0x03 /* set the zoom step */ | ||
129 | #define CAT2_CFIXB 0x09 /* CB value for color effect */ | ||
130 | #define CAT2_CFIXR 0x0a /* CR value for color effect */ | ||
131 | #define CAT2_COLOR_EFFECT 0x0b /* set on/off of color effect */ | ||
132 | #define CAT2_CHROMA_LVL 0x0f /* set chroma level */ | ||
133 | #define CAT2_CHROMA_EN 0x10 /* set on/off of choroma */ | ||
134 | #define CAT2_EDGE_LVL 0x11 /* set sharpness level */ | ||
135 | #define CAT2_EDGE_EN 0x12 /* set on/off sharpness */ | ||
136 | #define CAT2_TONE_CTL 0x25 /* set tone color(contrast) */ | ||
137 | |||
138 | #define MON_ZOOM I2C_REG(CAT_MONITOR, CAT2_ZOOM, 1) | ||
139 | |||
140 | #define MON_CFIXR I2C_REG(CAT_MONITOR, CAT2_CFIXR, 1) | ||
141 | #define MON_CFIXB I2C_REG(CAT_MONITOR, CAT2_CFIXB, 1) | ||
142 | #define REG_CFIXB_SEPIA 0xd8 | ||
143 | #define REG_CFIXR_SEPIA 0x18 | ||
144 | |||
145 | #define MON_EFFECT I2C_REG(CAT_MONITOR, CAT2_COLOR_EFFECT, 1) | ||
146 | #define REG_COLOR_EFFECT_OFF 0x00 | ||
147 | #define REG_COLOR_EFFECT_ON 0x01 | ||
148 | |||
149 | #define MON_CHROMA_EN I2C_REG(CAT_MONITOR, CAT2_CHROMA_EN, 1) | ||
150 | #define MON_CHROMA_LVL I2C_REG(CAT_MONITOR, CAT2_CHROMA_LVL, 1) | ||
151 | #define REG_CHROMA_OFF 0x00 | ||
152 | #define REG_CHROMA_ON 0x01 | ||
153 | |||
154 | #define MON_EDGE_EN I2C_REG(CAT_MONITOR, CAT2_EDGE_EN, 1) | ||
155 | #define MON_EDGE_LVL I2C_REG(CAT_MONITOR, CAT2_EDGE_LVL, 1) | ||
156 | #define REG_EDGE_OFF 0x00 | ||
157 | #define REG_EDGE_ON 0x01 | ||
158 | |||
159 | #define MON_TONE_CTL I2C_REG(CAT_MONITOR, CAT2_TONE_CTL, 1) | ||
160 | |||
161 | /* | ||
162 | * Category 3 - Auto Exposure | ||
163 | * | ||
164 | * The M-5MOLS exposure capbility is detailed as which is similar to digital | ||
165 | * camera. This category supports AE locking/various AE mode(range of exposure) | ||
166 | * /ISO/flickering/EV bias/shutter/meteoring, and anything else. And the | ||
167 | * maximum/minimum exposure gain value depending on M-5MOLS firmware, may be | ||
168 | * different. So, this category also provide getting the max/min values. And, | ||
169 | * each MONITOR and CAPTURE mode has each gain/shutter/max exposure values. | ||
170 | */ | ||
171 | #define CAT3_AE_LOCK 0x00 /* locking Auto exposure */ | ||
172 | #define CAT3_AE_MODE 0x01 /* set AE mode, mode means range */ | ||
173 | #define CAT3_ISO 0x05 /* set ISO */ | ||
174 | #define CAT3_EV_PRESET_MONITOR 0x0a /* EV(scenemode) preset for MONITOR */ | ||
175 | #define CAT3_EV_PRESET_CAPTURE 0x0b /* EV(scenemode) preset for CAPTURE */ | ||
176 | #define CAT3_MANUAL_GAIN_MON 0x12 /* meteoring value for the MONITOR */ | ||
177 | #define CAT3_MAX_GAIN_MON 0x1a /* max gain value for the MONITOR */ | ||
178 | #define CAT3_MANUAL_GAIN_CAP 0x26 /* meteoring value for the CAPTURE */ | ||
179 | #define CAT3_AE_INDEX 0x38 /* AE index */ | ||
180 | |||
181 | #define AE_LOCK I2C_REG(CAT_AE, CAT3_AE_LOCK, 1) | ||
182 | #define REG_AE_UNLOCK 0x00 | ||
183 | #define REG_AE_LOCK 0x01 | ||
184 | |||
185 | #define AE_MODE I2C_REG(CAT_AE, CAT3_AE_MODE, 1) | ||
186 | #define REG_AE_OFF 0x00 /* AE off */ | ||
187 | #define REG_AE_ALL 0x01 /* calc AE in all block integral */ | ||
188 | #define REG_AE_CENTER 0x03 /* calc AE in center weighted */ | ||
189 | #define REG_AE_SPOT 0x06 /* calc AE in specific spot */ | ||
190 | |||
191 | #define AE_ISO I2C_REG(CAT_AE, CAT3_ISO, 1) | ||
192 | #define REG_ISO_AUTO 0x00 | ||
193 | #define REG_ISO_50 0x01 | ||
194 | #define REG_ISO_100 0x02 | ||
195 | #define REG_ISO_200 0x03 | ||
196 | #define REG_ISO_400 0x04 | ||
197 | #define REG_ISO_800 0x05 | ||
198 | |||
199 | #define AE_EV_PRESET_MONITOR I2C_REG(CAT_AE, CAT3_EV_PRESET_MONITOR, 1) | ||
200 | #define AE_EV_PRESET_CAPTURE I2C_REG(CAT_AE, CAT3_EV_PRESET_CAPTURE, 1) | ||
201 | #define REG_SCENE_NORMAL 0x00 | ||
202 | #define REG_SCENE_PORTRAIT 0x01 | ||
203 | #define REG_SCENE_LANDSCAPE 0x02 | ||
204 | #define REG_SCENE_SPORTS 0x03 | ||
205 | #define REG_SCENE_PARTY_INDOOR 0x04 | ||
206 | #define REG_SCENE_BEACH_SNOW 0x05 | ||
207 | #define REG_SCENE_SUNSET 0x06 | ||
208 | #define REG_SCENE_DAWN_DUSK 0x07 | ||
209 | #define REG_SCENE_FALL 0x08 | ||
210 | #define REG_SCENE_NIGHT 0x09 | ||
211 | #define REG_SCENE_AGAINST_LIGHT 0x0a | ||
212 | #define REG_SCENE_FIRE 0x0b | ||
213 | #define REG_SCENE_TEXT 0x0c | ||
214 | #define REG_SCENE_CANDLE 0x0d | ||
215 | |||
216 | #define AE_MAN_GAIN_MON I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_MON, 2) | ||
217 | #define AE_MAX_GAIN_MON I2C_REG(CAT_AE, CAT3_MAX_GAIN_MON, 2) | ||
218 | #define AE_MAN_GAIN_CAP I2C_REG(CAT_AE, CAT3_MANUAL_GAIN_CAP, 2) | ||
219 | |||
220 | #define AE_INDEX I2C_REG(CAT_AE, CAT3_AE_INDEX, 1) | ||
221 | #define REG_AE_INDEX_20_NEG 0x00 | ||
222 | #define REG_AE_INDEX_15_NEG 0x01 | ||
223 | #define REG_AE_INDEX_10_NEG 0x02 | ||
224 | #define REG_AE_INDEX_05_NEG 0x03 | ||
225 | #define REG_AE_INDEX_00 0x04 | ||
226 | #define REG_AE_INDEX_05_POS 0x05 | ||
227 | #define REG_AE_INDEX_10_POS 0x06 | ||
228 | #define REG_AE_INDEX_15_POS 0x07 | ||
229 | #define REG_AE_INDEX_20_POS 0x08 | ||
230 | |||
231 | /* | ||
232 | * Category 6 - White Balance | ||
233 | * | ||
234 | * This category provide AWB locking/mode/preset/speed/gain bias, etc. | ||
235 | */ | ||
236 | #define CAT6_AWB_LOCK 0x00 /* locking Auto Whitebalance */ | ||
237 | #define CAT6_AWB_MODE 0x02 /* set Auto or Manual */ | ||
238 | #define CAT6_AWB_MANUAL 0x03 /* set Manual(preset) value */ | ||
239 | |||
240 | #define AWB_LOCK I2C_REG(CAT_WB, CAT6_AWB_LOCK, 1) | ||
241 | #define REG_AWB_UNLOCK 0x00 | ||
242 | #define REG_AWB_LOCK 0x01 | ||
243 | |||
244 | #define AWB_MODE I2C_REG(CAT_WB, CAT6_AWB_MODE, 1) | ||
245 | #define REG_AWB_AUTO 0x01 /* AWB off */ | ||
246 | #define REG_AWB_PRESET 0x02 /* AWB preset */ | ||
247 | |||
248 | #define AWB_MANUAL I2C_REG(CAT_WB, CAT6_AWB_MANUAL, 1) | ||
249 | #define REG_AWB_INCANDESCENT 0x01 | ||
250 | #define REG_AWB_FLUORESCENT_1 0x02 | ||
251 | #define REG_AWB_FLUORESCENT_2 0x03 | ||
252 | #define REG_AWB_DAYLIGHT 0x04 | ||
253 | #define REG_AWB_CLOUDY 0x05 | ||
254 | #define REG_AWB_SHADE 0x06 | ||
255 | #define REG_AWB_HORIZON 0x07 | ||
256 | #define REG_AWB_LEDLIGHT 0x09 | ||
257 | |||
258 | /* | ||
259 | * Category 7 - EXIF information | ||
260 | */ | ||
261 | #define CAT7_INFO_EXPTIME_NU 0x00 | ||
262 | #define CAT7_INFO_EXPTIME_DE 0x04 | ||
263 | #define CAT7_INFO_TV_NU 0x08 | ||
264 | #define CAT7_INFO_TV_DE 0x0c | ||
265 | #define CAT7_INFO_AV_NU 0x10 | ||
266 | #define CAT7_INFO_AV_DE 0x14 | ||
267 | #define CAT7_INFO_BV_NU 0x18 | ||
268 | #define CAT7_INFO_BV_DE 0x1c | ||
269 | #define CAT7_INFO_EBV_NU 0x20 | ||
270 | #define CAT7_INFO_EBV_DE 0x24 | ||
271 | #define CAT7_INFO_ISO 0x28 | ||
272 | #define CAT7_INFO_FLASH 0x2a | ||
273 | #define CAT7_INFO_SDR 0x2c | ||
274 | #define CAT7_INFO_QVAL 0x2e | ||
275 | |||
276 | #define EXIF_INFO_EXPTIME_NU I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_NU, 4) | ||
277 | #define EXIF_INFO_EXPTIME_DE I2C_REG(CAT_EXIF, CAT7_INFO_EXPTIME_DE, 4) | ||
278 | #define EXIF_INFO_TV_NU I2C_REG(CAT_EXIF, CAT7_INFO_TV_NU, 4) | ||
279 | #define EXIF_INFO_TV_DE I2C_REG(CAT_EXIF, CAT7_INFO_TV_DE, 4) | ||
280 | #define EXIF_INFO_AV_NU I2C_REG(CAT_EXIF, CAT7_INFO_AV_NU, 4) | ||
281 | #define EXIF_INFO_AV_DE I2C_REG(CAT_EXIF, CAT7_INFO_AV_DE, 4) | ||
282 | #define EXIF_INFO_BV_NU I2C_REG(CAT_EXIF, CAT7_INFO_BV_NU, 4) | ||
283 | #define EXIF_INFO_BV_DE I2C_REG(CAT_EXIF, CAT7_INFO_BV_DE, 4) | ||
284 | #define EXIF_INFO_EBV_NU I2C_REG(CAT_EXIF, CAT7_INFO_EBV_NU, 4) | ||
285 | #define EXIF_INFO_EBV_DE I2C_REG(CAT_EXIF, CAT7_INFO_EBV_DE, 4) | ||
286 | #define EXIF_INFO_ISO I2C_REG(CAT_EXIF, CAT7_INFO_ISO, 2) | ||
287 | #define EXIF_INFO_FLASH I2C_REG(CAT_EXIF, CAT7_INFO_FLASH, 2) | ||
288 | #define EXIF_INFO_SDR I2C_REG(CAT_EXIF, CAT7_INFO_SDR, 2) | ||
289 | #define EXIF_INFO_QVAL I2C_REG(CAT_EXIF, CAT7_INFO_QVAL, 2) | ||
290 | |||
291 | /* | ||
292 | * Category 9 - Face Detection | ||
293 | */ | ||
294 | #define CAT9_FD_CTL 0x00 | ||
295 | |||
296 | #define FD_CTL I2C_REG(CAT_FD, CAT9_FD_CTL, 1) | ||
297 | #define BIT_FD_EN 0 | ||
298 | #define BIT_FD_DRAW_FACE_FRAME 4 | ||
299 | #define BIT_FD_DRAW_SMILE_LVL 6 | ||
300 | #define REG_FD(shift) (1 << shift) | ||
301 | #define REG_FD_OFF 0x0 | ||
302 | |||
303 | /* | ||
304 | * Category A - Lens Parameter | ||
305 | */ | ||
306 | #define CATA_AF_MODE 0x01 | ||
307 | #define CATA_AF_EXECUTE 0x02 | ||
308 | #define CATA_AF_STATUS 0x03 | ||
309 | #define CATA_AF_VERSION 0x0a | ||
310 | |||
311 | #define AF_MODE I2C_REG(CAT_LENS, CATA_AF_MODE, 1) | ||
312 | #define REG_AF_NORMAL 0x00 /* Normal AF, one time */ | ||
313 | #define REG_AF_MACRO 0x01 /* Macro AF, one time */ | ||
314 | #define REG_AF_POWEROFF 0x07 | ||
315 | |||
316 | #define AF_EXECUTE I2C_REG(CAT_LENS, CATA_AF_EXECUTE, 1) | ||
317 | #define REG_AF_STOP 0x00 | ||
318 | #define REG_AF_EXE_AUTO 0x01 | ||
319 | #define REG_AF_EXE_CAF 0x02 | ||
320 | |||
321 | #define AF_STATUS I2C_REG(CAT_LENS, CATA_AF_STATUS, 1) | ||
322 | #define REG_AF_FAIL 0x00 | ||
323 | #define REG_AF_SUCCESS 0x02 | ||
324 | #define REG_AF_IDLE 0x04 | ||
325 | #define REG_AF_BUSY 0x05 | ||
326 | |||
327 | #define AF_VERSION I2C_REG(CAT_LENS, CATA_AF_VERSION, 1) | ||
328 | |||
329 | /* | ||
330 | * Category B - CAPTURE Parameter | ||
331 | */ | ||
332 | #define CATB_YUVOUT_MAIN 0x00 | ||
333 | #define CATB_MAIN_IMAGE_SIZE 0x01 | ||
334 | #define CATB_MCC_MODE 0x1d | ||
335 | #define CATB_WDR_EN 0x2c | ||
336 | #define CATB_LIGHT_CTRL 0x40 | ||
337 | #define CATB_FLASH_CTRL 0x41 | ||
338 | |||
339 | #define CAPP_YUVOUT_MAIN I2C_REG(CAT_CAPT_PARM, CATB_YUVOUT_MAIN, 1) | ||
340 | #define REG_YUV422 0x00 | ||
341 | #define REG_BAYER10 0x05 | ||
342 | #define REG_BAYER8 0x06 | ||
343 | #define REG_JPEG 0x10 | ||
344 | |||
345 | #define CAPP_MAIN_IMAGE_SIZE I2C_REG(CAT_CAPT_PARM, CATB_MAIN_IMAGE_SIZE, 1) | ||
346 | |||
347 | #define CAPP_MCC_MODE I2C_REG(CAT_CAPT_PARM, CATB_MCC_MODE, 1) | ||
348 | #define REG_MCC_OFF 0x00 | ||
349 | #define REG_MCC_NORMAL 0x01 | ||
350 | |||
351 | #define CAPP_WDR_EN I2C_REG(CAT_CAPT_PARM, CATB_WDR_EN, 1) | ||
352 | #define REG_WDR_OFF 0x00 | ||
353 | #define REG_WDR_ON 0x01 | ||
354 | #define REG_WDR_AUTO 0x02 | ||
355 | |||
356 | #define CAPP_LIGHT_CTRL I2C_REG(CAT_CAPT_PARM, CATB_LIGHT_CTRL, 1) | ||
357 | #define REG_LIGHT_OFF 0x00 | ||
358 | #define REG_LIGHT_ON 0x01 | ||
359 | #define REG_LIGHT_AUTO 0x02 | ||
360 | |||
361 | #define CAPP_FLASH_CTRL I2C_REG(CAT_CAPT_PARM, CATB_FLASH_CTRL, 1) | ||
362 | #define REG_FLASH_OFF 0x00 | ||
363 | #define REG_FLASH_ON 0x01 | ||
364 | #define REG_FLASH_AUTO 0x02 | ||
365 | |||
366 | /* | ||
367 | * Category C - CAPTURE Control | ||
368 | */ | ||
369 | #define CATC_CAP_MODE 0x00 | ||
370 | #define CATC_CAP_SEL_FRAME 0x06 /* It determines Single or Multi */ | ||
371 | #define CATC_CAP_START 0x09 | ||
372 | #define CATC_CAP_IMAGE_SIZE 0x0d | ||
373 | #define CATC_CAP_THUMB_SIZE 0x11 | ||
374 | |||
375 | #define CAPC_MODE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_MODE, 1) | ||
376 | #define REG_CAP_NONE 0x00 | ||
377 | #define REG_CAP_ANTI_SHAKE 0x02 | ||
378 | |||
379 | #define CAPC_SEL_FRAME I2C_REG(CAT_CAPT_CTRL, CATC_CAP_SEL_FRAME, 1) | ||
380 | |||
381 | #define CAPC_START I2C_REG(CAT_CAPT_CTRL, CATC_CAP_START, 1) | ||
382 | #define REG_CAP_START_MAIN 0x01 | ||
383 | #define REG_CAP_START_THUMB 0x03 | ||
384 | |||
385 | #define CAPC_IMAGE_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_IMAGE_SIZE, 1) | ||
386 | #define CAPC_THUMB_SIZE I2C_REG(CAT_CAPT_CTRL, CATC_CAP_THUMB_SIZE, 1) | ||
387 | |||
388 | /* | ||
389 | * Category F - Flash | ||
390 | * | ||
391 | * This mode provides functions about internal flash stuff and system startup. | ||
392 | */ | ||
393 | #define CATF_CAM_START 0x12 /* It starts internal ARM core booting | ||
394 | * after power-up */ | ||
395 | |||
396 | #define FLASH_CAM_START I2C_REG(CAT_FLASH, CATF_CAM_START, 1) | ||
397 | #define REG_START_ARM_BOOT 0x01 | ||
398 | |||
399 | #endif /* M5MOLS_REG_H */ | ||
diff --git a/drivers/media/video/timblogiw.c b/drivers/media/video/timblogiw.c index 84d4c7c83435..fc611ebeb82c 100644 --- a/drivers/media/video/timblogiw.c +++ b/drivers/media/video/timblogiw.c | |||
@@ -24,7 +24,6 @@ | |||
24 | #include <linux/platform_device.h> | 24 | #include <linux/platform_device.h> |
25 | #include <linux/slab.h> | 25 | #include <linux/slab.h> |
26 | #include <linux/dmaengine.h> | 26 | #include <linux/dmaengine.h> |
27 | #include <linux/mfd/core.h> | ||
28 | #include <linux/scatterlist.h> | 27 | #include <linux/scatterlist.h> |
29 | #include <linux/interrupt.h> | 28 | #include <linux/interrupt.h> |
30 | #include <linux/list.h> | 29 | #include <linux/list.h> |
@@ -791,7 +790,7 @@ static int __devinit timblogiw_probe(struct platform_device *pdev) | |||
791 | { | 790 | { |
792 | int err; | 791 | int err; |
793 | struct timblogiw *lw = NULL; | 792 | struct timblogiw *lw = NULL; |
794 | struct timb_video_platform_data *pdata = mfd_get_data(pdev); | 793 | struct timb_video_platform_data *pdata = pdev->dev.platform_data; |
795 | 794 | ||
796 | if (!pdata) { | 795 | if (!pdata) { |
797 | dev_err(&pdev->dev, "No platform data\n"); | 796 | dev_err(&pdev->dev, "No platform data\n"); |
diff --git a/drivers/media/video/uvc/Makefile b/drivers/media/video/uvc/Makefile index 968c1994eda0..2071ca8a2f03 100644 --- a/drivers/media/video/uvc/Makefile +++ b/drivers/media/video/uvc/Makefile | |||
@@ -1,3 +1,6 @@ | |||
1 | uvcvideo-objs := uvc_driver.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_ctrl.o \ | 1 | uvcvideo-objs := uvc_driver.o uvc_queue.o uvc_v4l2.o uvc_video.o uvc_ctrl.o \ |
2 | uvc_status.o uvc_isight.o | 2 | uvc_status.o uvc_isight.o |
3 | ifeq ($(CONFIG_MEDIA_CONTROLLER),y) | ||
4 | uvcvideo-objs += uvc_entity.o | ||
5 | endif | ||
3 | obj-$(CONFIG_USB_VIDEO_CLASS) += uvcvideo.o | 6 | obj-$(CONFIG_USB_VIDEO_CLASS) += uvcvideo.o |
diff --git a/drivers/media/video/uvc/uvc_driver.c b/drivers/media/video/uvc/uvc_driver.c index 823f4b389745..b6eae48d7fb8 100644 --- a/drivers/media/video/uvc/uvc_driver.c +++ b/drivers/media/video/uvc/uvc_driver.c | |||
@@ -248,7 +248,7 @@ uint32_t uvc_fraction_to_interval(uint32_t numerator, uint32_t denominator) | |||
248 | * Terminal and unit management | 248 | * Terminal and unit management |
249 | */ | 249 | */ |
250 | 250 | ||
251 | static struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id) | 251 | struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id) |
252 | { | 252 | { |
253 | struct uvc_entity *entity; | 253 | struct uvc_entity *entity; |
254 | 254 | ||
@@ -795,9 +795,12 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id, | |||
795 | struct uvc_entity *entity; | 795 | struct uvc_entity *entity; |
796 | unsigned int num_inputs; | 796 | unsigned int num_inputs; |
797 | unsigned int size; | 797 | unsigned int size; |
798 | unsigned int i; | ||
798 | 799 | ||
800 | extra_size = ALIGN(extra_size, sizeof(*entity->pads)); | ||
799 | num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1; | 801 | num_inputs = (type & UVC_TERM_OUTPUT) ? num_pads : num_pads - 1; |
800 | size = sizeof(*entity) + extra_size + num_inputs; | 802 | size = sizeof(*entity) + extra_size + sizeof(*entity->pads) * num_pads |
803 | + num_inputs; | ||
801 | entity = kzalloc(size, GFP_KERNEL); | 804 | entity = kzalloc(size, GFP_KERNEL); |
802 | if (entity == NULL) | 805 | if (entity == NULL) |
803 | return NULL; | 806 | return NULL; |
@@ -805,8 +808,17 @@ static struct uvc_entity *uvc_alloc_entity(u16 type, u8 id, | |||
805 | entity->id = id; | 808 | entity->id = id; |
806 | entity->type = type; | 809 | entity->type = type; |
807 | 810 | ||
811 | entity->num_links = 0; | ||
812 | entity->num_pads = num_pads; | ||
813 | entity->pads = ((void *)(entity + 1)) + extra_size; | ||
814 | |||
815 | for (i = 0; i < num_inputs; ++i) | ||
816 | entity->pads[i].flags = MEDIA_PAD_FL_SINK; | ||
817 | if (!UVC_ENTITY_IS_OTERM(entity)) | ||
818 | entity->pads[num_pads-1].flags = MEDIA_PAD_FL_SOURCE; | ||
819 | |||
808 | entity->bNrInPins = num_inputs; | 820 | entity->bNrInPins = num_inputs; |
809 | entity->baSourceID = ((__u8 *)entity) + sizeof(*entity) + extra_size; | 821 | entity->baSourceID = (__u8 *)(&entity->pads[num_pads]); |
810 | 822 | ||
811 | return entity; | 823 | return entity; |
812 | } | 824 | } |
@@ -1585,6 +1597,13 @@ static void uvc_delete(struct uvc_device *dev) | |||
1585 | uvc_status_cleanup(dev); | 1597 | uvc_status_cleanup(dev); |
1586 | uvc_ctrl_cleanup_device(dev); | 1598 | uvc_ctrl_cleanup_device(dev); |
1587 | 1599 | ||
1600 | if (dev->vdev.dev) | ||
1601 | v4l2_device_unregister(&dev->vdev); | ||
1602 | #ifdef CONFIG_MEDIA_CONTROLLER | ||
1603 | if (media_devnode_is_registered(&dev->mdev.devnode)) | ||
1604 | media_device_unregister(&dev->mdev); | ||
1605 | #endif | ||
1606 | |||
1588 | list_for_each_safe(p, n, &dev->chains) { | 1607 | list_for_each_safe(p, n, &dev->chains) { |
1589 | struct uvc_video_chain *chain; | 1608 | struct uvc_video_chain *chain; |
1590 | chain = list_entry(p, struct uvc_video_chain, list); | 1609 | chain = list_entry(p, struct uvc_video_chain, list); |
@@ -1594,6 +1613,13 @@ static void uvc_delete(struct uvc_device *dev) | |||
1594 | list_for_each_safe(p, n, &dev->entities) { | 1613 | list_for_each_safe(p, n, &dev->entities) { |
1595 | struct uvc_entity *entity; | 1614 | struct uvc_entity *entity; |
1596 | entity = list_entry(p, struct uvc_entity, list); | 1615 | entity = list_entry(p, struct uvc_entity, list); |
1616 | #ifdef CONFIG_MEDIA_CONTROLLER | ||
1617 | uvc_mc_cleanup_entity(entity); | ||
1618 | #endif | ||
1619 | if (entity->vdev) { | ||
1620 | video_device_release(entity->vdev); | ||
1621 | entity->vdev = NULL; | ||
1622 | } | ||
1597 | kfree(entity); | 1623 | kfree(entity); |
1598 | } | 1624 | } |
1599 | 1625 | ||
@@ -1616,8 +1642,6 @@ static void uvc_release(struct video_device *vdev) | |||
1616 | struct uvc_streaming *stream = video_get_drvdata(vdev); | 1642 | struct uvc_streaming *stream = video_get_drvdata(vdev); |
1617 | struct uvc_device *dev = stream->dev; | 1643 | struct uvc_device *dev = stream->dev; |
1618 | 1644 | ||
1619 | video_device_release(vdev); | ||
1620 | |||
1621 | /* Decrement the registered streams count and delete the device when it | 1645 | /* Decrement the registered streams count and delete the device when it |
1622 | * reaches zero. | 1646 | * reaches zero. |
1623 | */ | 1647 | */ |
@@ -1682,7 +1706,7 @@ static int uvc_register_video(struct uvc_device *dev, | |||
1682 | * unregistered before the reference is released, so we don't need to | 1706 | * unregistered before the reference is released, so we don't need to |
1683 | * get another one. | 1707 | * get another one. |
1684 | */ | 1708 | */ |
1685 | vdev->parent = &dev->intf->dev; | 1709 | vdev->v4l2_dev = &dev->vdev; |
1686 | vdev->fops = &uvc_fops; | 1710 | vdev->fops = &uvc_fops; |
1687 | vdev->release = uvc_release; | 1711 | vdev->release = uvc_release; |
1688 | strlcpy(vdev->name, dev->name, sizeof vdev->name); | 1712 | strlcpy(vdev->name, dev->name, sizeof vdev->name); |
@@ -1731,6 +1755,8 @@ static int uvc_register_terms(struct uvc_device *dev, | |||
1731 | ret = uvc_register_video(dev, stream); | 1755 | ret = uvc_register_video(dev, stream); |
1732 | if (ret < 0) | 1756 | if (ret < 0) |
1733 | return ret; | 1757 | return ret; |
1758 | |||
1759 | term->vdev = stream->vdev; | ||
1734 | } | 1760 | } |
1735 | 1761 | ||
1736 | return 0; | 1762 | return 0; |
@@ -1745,6 +1771,14 @@ static int uvc_register_chains(struct uvc_device *dev) | |||
1745 | ret = uvc_register_terms(dev, chain); | 1771 | ret = uvc_register_terms(dev, chain); |
1746 | if (ret < 0) | 1772 | if (ret < 0) |
1747 | return ret; | 1773 | return ret; |
1774 | |||
1775 | #ifdef CONFIG_MEDIA_CONTROLLER | ||
1776 | ret = uvc_mc_register_entities(chain); | ||
1777 | if (ret < 0) { | ||
1778 | uvc_printk(KERN_INFO, "Failed to register entites " | ||
1779 | "(%d).\n", ret); | ||
1780 | } | ||
1781 | #endif | ||
1748 | } | 1782 | } |
1749 | 1783 | ||
1750 | return 0; | 1784 | return 0; |
@@ -1814,6 +1848,24 @@ static int uvc_probe(struct usb_interface *intf, | |||
1814 | "linux-uvc-devel mailing list.\n"); | 1848 | "linux-uvc-devel mailing list.\n"); |
1815 | } | 1849 | } |
1816 | 1850 | ||
1851 | /* Register the media and V4L2 devices. */ | ||
1852 | #ifdef CONFIG_MEDIA_CONTROLLER | ||
1853 | dev->mdev.dev = &intf->dev; | ||
1854 | strlcpy(dev->mdev.model, dev->name, sizeof(dev->mdev.model)); | ||
1855 | if (udev->serial) | ||
1856 | strlcpy(dev->mdev.serial, udev->serial, | ||
1857 | sizeof(dev->mdev.serial)); | ||
1858 | strcpy(dev->mdev.bus_info, udev->devpath); | ||
1859 | dev->mdev.hw_revision = le16_to_cpu(udev->descriptor.bcdDevice); | ||
1860 | dev->mdev.driver_version = DRIVER_VERSION_NUMBER; | ||
1861 | if (media_device_register(&dev->mdev) < 0) | ||
1862 | goto error; | ||
1863 | |||
1864 | dev->vdev.mdev = &dev->mdev; | ||
1865 | #endif | ||
1866 | if (v4l2_device_register(&intf->dev, &dev->vdev) < 0) | ||
1867 | goto error; | ||
1868 | |||
1817 | /* Initialize controls. */ | 1869 | /* Initialize controls. */ |
1818 | if (uvc_ctrl_init_device(dev) < 0) | 1870 | if (uvc_ctrl_init_device(dev) < 0) |
1819 | goto error; | 1871 | goto error; |
@@ -1822,7 +1874,7 @@ static int uvc_probe(struct usb_interface *intf, | |||
1822 | if (uvc_scan_device(dev) < 0) | 1874 | if (uvc_scan_device(dev) < 0) |
1823 | goto error; | 1875 | goto error; |
1824 | 1876 | ||
1825 | /* Register video devices. */ | 1877 | /* Register video device nodes. */ |
1826 | if (uvc_register_chains(dev) < 0) | 1878 | if (uvc_register_chains(dev) < 0) |
1827 | goto error; | 1879 | goto error; |
1828 | 1880 | ||
diff --git a/drivers/media/video/uvc/uvc_entity.c b/drivers/media/video/uvc/uvc_entity.c new file mode 100644 index 000000000000..ede7852bb1df --- /dev/null +++ b/drivers/media/video/uvc/uvc_entity.c | |||
@@ -0,0 +1,118 @@ | |||
1 | /* | ||
2 | * uvc_entity.c -- USB Video Class driver | ||
3 | * | ||
4 | * Copyright (C) 2005-2011 | ||
5 | * Laurent Pinchart (laurent.pinchart@ideasonboard.com) | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | */ | ||
13 | |||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/list.h> | ||
16 | #include <linux/videodev2.h> | ||
17 | |||
18 | #include <media/v4l2-common.h> | ||
19 | |||
20 | #include "uvcvideo.h" | ||
21 | |||
22 | /* ------------------------------------------------------------------------ | ||
23 | * Video subdevices registration and unregistration | ||
24 | */ | ||
25 | |||
26 | static int uvc_mc_register_entity(struct uvc_video_chain *chain, | ||
27 | struct uvc_entity *entity) | ||
28 | { | ||
29 | const u32 flags = MEDIA_LNK_FL_ENABLED | MEDIA_LNK_FL_IMMUTABLE; | ||
30 | struct uvc_entity *remote; | ||
31 | unsigned int i; | ||
32 | u8 remote_pad; | ||
33 | int ret; | ||
34 | |||
35 | for (i = 0; i < entity->num_pads; ++i) { | ||
36 | struct media_entity *source; | ||
37 | struct media_entity *sink; | ||
38 | |||
39 | if (!(entity->pads[i].flags & MEDIA_PAD_FL_SINK)) | ||
40 | continue; | ||
41 | |||
42 | remote = uvc_entity_by_id(chain->dev, entity->baSourceID[i]); | ||
43 | if (remote == NULL) | ||
44 | return -EINVAL; | ||
45 | |||
46 | source = (UVC_ENTITY_TYPE(remote) == UVC_TT_STREAMING) | ||
47 | ? &remote->vdev->entity : &remote->subdev.entity; | ||
48 | sink = (UVC_ENTITY_TYPE(entity) == UVC_TT_STREAMING) | ||
49 | ? &entity->vdev->entity : &entity->subdev.entity; | ||
50 | |||
51 | remote_pad = remote->num_pads - 1; | ||
52 | ret = media_entity_create_link(source, remote_pad, | ||
53 | sink, i, flags); | ||
54 | if (ret < 0) | ||
55 | return ret; | ||
56 | } | ||
57 | |||
58 | if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) | ||
59 | ret = v4l2_device_register_subdev(&chain->dev->vdev, | ||
60 | &entity->subdev); | ||
61 | |||
62 | return ret; | ||
63 | } | ||
64 | |||
65 | static struct v4l2_subdev_ops uvc_subdev_ops = { | ||
66 | }; | ||
67 | |||
68 | void uvc_mc_cleanup_entity(struct uvc_entity *entity) | ||
69 | { | ||
70 | if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) | ||
71 | media_entity_cleanup(&entity->subdev.entity); | ||
72 | else if (entity->vdev != NULL) | ||
73 | media_entity_cleanup(&entity->vdev->entity); | ||
74 | } | ||
75 | |||
76 | static int uvc_mc_init_entity(struct uvc_entity *entity) | ||
77 | { | ||
78 | int ret; | ||
79 | |||
80 | if (UVC_ENTITY_TYPE(entity) != UVC_TT_STREAMING) { | ||
81 | v4l2_subdev_init(&entity->subdev, &uvc_subdev_ops); | ||
82 | strlcpy(entity->subdev.name, entity->name, | ||
83 | sizeof(entity->subdev.name)); | ||
84 | |||
85 | ret = media_entity_init(&entity->subdev.entity, | ||
86 | entity->num_pads, entity->pads, 0); | ||
87 | } else | ||
88 | ret = media_entity_init(&entity->vdev->entity, | ||
89 | entity->num_pads, entity->pads, 0); | ||
90 | |||
91 | return ret; | ||
92 | } | ||
93 | |||
94 | int uvc_mc_register_entities(struct uvc_video_chain *chain) | ||
95 | { | ||
96 | struct uvc_entity *entity; | ||
97 | int ret; | ||
98 | |||
99 | list_for_each_entry(entity, &chain->entities, chain) { | ||
100 | ret = uvc_mc_init_entity(entity); | ||
101 | if (ret < 0) { | ||
102 | uvc_printk(KERN_INFO, "Failed to initialize entity for " | ||
103 | "entity %u\n", entity->id); | ||
104 | return ret; | ||
105 | } | ||
106 | } | ||
107 | |||
108 | list_for_each_entry(entity, &chain->entities, chain) { | ||
109 | ret = uvc_mc_register_entity(chain, entity); | ||
110 | if (ret < 0) { | ||
111 | uvc_printk(KERN_INFO, "Failed to register entity for " | ||
112 | "entity %u\n", entity->id); | ||
113 | return ret; | ||
114 | } | ||
115 | } | ||
116 | |||
117 | return 0; | ||
118 | } | ||
diff --git a/drivers/media/video/uvc/uvcvideo.h b/drivers/media/video/uvc/uvcvideo.h index 7cf224bae2e5..20107fd3574d 100644 --- a/drivers/media/video/uvc/uvcvideo.h +++ b/drivers/media/video/uvc/uvcvideo.h | |||
@@ -98,8 +98,11 @@ struct uvc_xu_control { | |||
98 | #ifdef __KERNEL__ | 98 | #ifdef __KERNEL__ |
99 | 99 | ||
100 | #include <linux/poll.h> | 100 | #include <linux/poll.h> |
101 | #include <linux/usb.h> | ||
101 | #include <linux/usb/video.h> | 102 | #include <linux/usb/video.h> |
102 | #include <linux/uvcvideo.h> | 103 | #include <linux/uvcvideo.h> |
104 | #include <media/media-device.h> | ||
105 | #include <media/v4l2-device.h> | ||
103 | 106 | ||
104 | /* -------------------------------------------------------------------------- | 107 | /* -------------------------------------------------------------------------- |
105 | * UVC constants | 108 | * UVC constants |
@@ -301,6 +304,13 @@ struct uvc_entity { | |||
301 | __u16 type; | 304 | __u16 type; |
302 | char name[64]; | 305 | char name[64]; |
303 | 306 | ||
307 | /* Media controller-related fields. */ | ||
308 | struct video_device *vdev; | ||
309 | struct v4l2_subdev subdev; | ||
310 | unsigned int num_pads; | ||
311 | unsigned int num_links; | ||
312 | struct media_pad *pads; | ||
313 | |||
304 | union { | 314 | union { |
305 | struct { | 315 | struct { |
306 | __u16 wObjectiveFocalLengthMin; | 316 | __u16 wObjectiveFocalLengthMin; |
@@ -504,6 +514,10 @@ struct uvc_device { | |||
504 | atomic_t nmappings; | 514 | atomic_t nmappings; |
505 | 515 | ||
506 | /* Video control interface */ | 516 | /* Video control interface */ |
517 | #ifdef CONFIG_MEDIA_CONTROLLER | ||
518 | struct media_device mdev; | ||
519 | #endif | ||
520 | struct v4l2_device vdev; | ||
507 | __u16 uvc_version; | 521 | __u16 uvc_version; |
508 | __u32 clock_frequency; | 522 | __u32 clock_frequency; |
509 | 523 | ||
@@ -583,6 +597,8 @@ extern unsigned int uvc_timeout_param; | |||
583 | /* Core driver */ | 597 | /* Core driver */ |
584 | extern struct uvc_driver uvc_driver; | 598 | extern struct uvc_driver uvc_driver; |
585 | 599 | ||
600 | extern struct uvc_entity *uvc_entity_by_id(struct uvc_device *dev, int id); | ||
601 | |||
586 | /* Video buffers queue management. */ | 602 | /* Video buffers queue management. */ |
587 | extern void uvc_queue_init(struct uvc_video_queue *queue, | 603 | extern void uvc_queue_init(struct uvc_video_queue *queue, |
588 | enum v4l2_buf_type type, int drop_corrupted); | 604 | enum v4l2_buf_type type, int drop_corrupted); |
@@ -616,6 +632,10 @@ static inline int uvc_queue_streaming(struct uvc_video_queue *queue) | |||
616 | /* V4L2 interface */ | 632 | /* V4L2 interface */ |
617 | extern const struct v4l2_file_operations uvc_fops; | 633 | extern const struct v4l2_file_operations uvc_fops; |
618 | 634 | ||
635 | /* Media controller */ | ||
636 | extern int uvc_mc_register_entities(struct uvc_video_chain *chain); | ||
637 | extern void uvc_mc_cleanup_entity(struct uvc_entity *entity); | ||
638 | |||
619 | /* Video */ | 639 | /* Video */ |
620 | extern int uvc_video_init(struct uvc_streaming *stream); | 640 | extern int uvc_video_init(struct uvc_streaming *stream); |
621 | extern int uvc_video_suspend(struct uvc_streaming *stream); | 641 | extern int uvc_video_suspend(struct uvc_streaming *stream); |
diff --git a/drivers/mfd/88pm860x-core.c b/drivers/mfd/88pm860x-core.c index 011cb6ce861b..17dfe9bb6d27 100644 --- a/drivers/mfd/88pm860x-core.c +++ b/drivers/mfd/88pm860x-core.c | |||
@@ -21,13 +21,13 @@ | |||
21 | 21 | ||
22 | #define INT_STATUS_NUM 3 | 22 | #define INT_STATUS_NUM 3 |
23 | 23 | ||
24 | static struct resource bk_resources[] __initdata = { | 24 | static struct resource bk_resources[] __devinitdata = { |
25 | {PM8606_BACKLIGHT1, PM8606_BACKLIGHT1, "backlight-0", IORESOURCE_IO,}, | 25 | {PM8606_BACKLIGHT1, PM8606_BACKLIGHT1, "backlight-0", IORESOURCE_IO,}, |
26 | {PM8606_BACKLIGHT2, PM8606_BACKLIGHT2, "backlight-1", IORESOURCE_IO,}, | 26 | {PM8606_BACKLIGHT2, PM8606_BACKLIGHT2, "backlight-1", IORESOURCE_IO,}, |
27 | {PM8606_BACKLIGHT3, PM8606_BACKLIGHT3, "backlight-2", IORESOURCE_IO,}, | 27 | {PM8606_BACKLIGHT3, PM8606_BACKLIGHT3, "backlight-2", IORESOURCE_IO,}, |
28 | }; | 28 | }; |
29 | 29 | ||
30 | static struct resource led_resources[] __initdata = { | 30 | static struct resource led_resources[] __devinitdata = { |
31 | {PM8606_LED1_RED, PM8606_LED1_RED, "led0-red", IORESOURCE_IO,}, | 31 | {PM8606_LED1_RED, PM8606_LED1_RED, "led0-red", IORESOURCE_IO,}, |
32 | {PM8606_LED1_GREEN, PM8606_LED1_GREEN, "led0-green", IORESOURCE_IO,}, | 32 | {PM8606_LED1_GREEN, PM8606_LED1_GREEN, "led0-green", IORESOURCE_IO,}, |
33 | {PM8606_LED1_BLUE, PM8606_LED1_BLUE, "led0-blue", IORESOURCE_IO,}, | 33 | {PM8606_LED1_BLUE, PM8606_LED1_BLUE, "led0-blue", IORESOURCE_IO,}, |
@@ -36,7 +36,7 @@ static struct resource led_resources[] __initdata = { | |||
36 | {PM8606_LED2_BLUE, PM8606_LED2_BLUE, "led1-blue", IORESOURCE_IO,}, | 36 | {PM8606_LED2_BLUE, PM8606_LED2_BLUE, "led1-blue", IORESOURCE_IO,}, |
37 | }; | 37 | }; |
38 | 38 | ||
39 | static struct resource regulator_resources[] __initdata = { | 39 | static struct resource regulator_resources[] __devinitdata = { |
40 | {PM8607_ID_BUCK1, PM8607_ID_BUCK1, "buck-1", IORESOURCE_IO,}, | 40 | {PM8607_ID_BUCK1, PM8607_ID_BUCK1, "buck-1", IORESOURCE_IO,}, |
41 | {PM8607_ID_BUCK2, PM8607_ID_BUCK2, "buck-2", IORESOURCE_IO,}, | 41 | {PM8607_ID_BUCK2, PM8607_ID_BUCK2, "buck-2", IORESOURCE_IO,}, |
42 | {PM8607_ID_BUCK3, PM8607_ID_BUCK3, "buck-3", IORESOURCE_IO,}, | 42 | {PM8607_ID_BUCK3, PM8607_ID_BUCK3, "buck-3", IORESOURCE_IO,}, |
@@ -57,15 +57,15 @@ static struct resource regulator_resources[] __initdata = { | |||
57 | {PM8607_ID_LDO15, PM8607_ID_LDO15, "ldo-15", IORESOURCE_IO,}, | 57 | {PM8607_ID_LDO15, PM8607_ID_LDO15, "ldo-15", IORESOURCE_IO,}, |
58 | }; | 58 | }; |
59 | 59 | ||
60 | static struct resource touch_resources[] __initdata = { | 60 | static struct resource touch_resources[] __devinitdata = { |
61 | {PM8607_IRQ_PEN, PM8607_IRQ_PEN, "touch", IORESOURCE_IRQ,}, | 61 | {PM8607_IRQ_PEN, PM8607_IRQ_PEN, "touch", IORESOURCE_IRQ,}, |
62 | }; | 62 | }; |
63 | 63 | ||
64 | static struct resource onkey_resources[] __initdata = { | 64 | static struct resource onkey_resources[] __devinitdata = { |
65 | {PM8607_IRQ_ONKEY, PM8607_IRQ_ONKEY, "onkey", IORESOURCE_IRQ,}, | 65 | {PM8607_IRQ_ONKEY, PM8607_IRQ_ONKEY, "onkey", IORESOURCE_IRQ,}, |
66 | }; | 66 | }; |
67 | 67 | ||
68 | static struct resource codec_resources[] __initdata = { | 68 | static struct resource codec_resources[] __devinitdata = { |
69 | /* Headset microphone insertion or removal */ | 69 | /* Headset microphone insertion or removal */ |
70 | {PM8607_IRQ_MICIN, PM8607_IRQ_MICIN, "micin", IORESOURCE_IRQ,}, | 70 | {PM8607_IRQ_MICIN, PM8607_IRQ_MICIN, "micin", IORESOURCE_IRQ,}, |
71 | /* Hook-switch press or release */ | 71 | /* Hook-switch press or release */ |
@@ -76,12 +76,12 @@ static struct resource codec_resources[] __initdata = { | |||
76 | {PM8607_IRQ_AUDIO_SHORT, PM8607_IRQ_AUDIO_SHORT, "audio-short", IORESOURCE_IRQ,}, | 76 | {PM8607_IRQ_AUDIO_SHORT, PM8607_IRQ_AUDIO_SHORT, "audio-short", IORESOURCE_IRQ,}, |
77 | }; | 77 | }; |
78 | 78 | ||
79 | static struct resource battery_resources[] __initdata = { | 79 | static struct resource battery_resources[] __devinitdata = { |
80 | {PM8607_IRQ_CC, PM8607_IRQ_CC, "columb counter", IORESOURCE_IRQ,}, | 80 | {PM8607_IRQ_CC, PM8607_IRQ_CC, "columb counter", IORESOURCE_IRQ,}, |
81 | {PM8607_IRQ_BAT, PM8607_IRQ_BAT, "battery", IORESOURCE_IRQ,}, | 81 | {PM8607_IRQ_BAT, PM8607_IRQ_BAT, "battery", IORESOURCE_IRQ,}, |
82 | }; | 82 | }; |
83 | 83 | ||
84 | static struct resource charger_resources[] __initdata = { | 84 | static struct resource charger_resources[] __devinitdata = { |
85 | {PM8607_IRQ_CHG, PM8607_IRQ_CHG, "charger detect", IORESOURCE_IRQ,}, | 85 | {PM8607_IRQ_CHG, PM8607_IRQ_CHG, "charger detect", IORESOURCE_IRQ,}, |
86 | {PM8607_IRQ_CHG_DONE, PM8607_IRQ_CHG_DONE, "charging done", IORESOURCE_IRQ,}, | 86 | {PM8607_IRQ_CHG_DONE, PM8607_IRQ_CHG_DONE, "charging done", IORESOURCE_IRQ,}, |
87 | {PM8607_IRQ_CHG_FAULT, PM8607_IRQ_CHG_FAULT, "charging timeout", IORESOURCE_IRQ,}, | 87 | {PM8607_IRQ_CHG_FAULT, PM8607_IRQ_CHG_FAULT, "charging timeout", IORESOURCE_IRQ,}, |
@@ -90,13 +90,17 @@ static struct resource charger_resources[] __initdata = { | |||
90 | {PM8607_IRQ_VCHG, PM8607_IRQ_VCHG, "vchg voltage", IORESOURCE_IRQ,}, | 90 | {PM8607_IRQ_VCHG, PM8607_IRQ_VCHG, "vchg voltage", IORESOURCE_IRQ,}, |
91 | }; | 91 | }; |
92 | 92 | ||
93 | static struct mfd_cell bk_devs[] __initdata = { | 93 | static struct resource rtc_resources[] __devinitdata = { |
94 | {PM8607_IRQ_RTC, PM8607_IRQ_RTC, "rtc", IORESOURCE_IRQ,}, | ||
95 | }; | ||
96 | |||
97 | static struct mfd_cell bk_devs[] = { | ||
94 | {"88pm860x-backlight", 0,}, | 98 | {"88pm860x-backlight", 0,}, |
95 | {"88pm860x-backlight", 1,}, | 99 | {"88pm860x-backlight", 1,}, |
96 | {"88pm860x-backlight", 2,}, | 100 | {"88pm860x-backlight", 2,}, |
97 | }; | 101 | }; |
98 | 102 | ||
99 | static struct mfd_cell led_devs[] __initdata = { | 103 | static struct mfd_cell led_devs[] = { |
100 | {"88pm860x-led", 0,}, | 104 | {"88pm860x-led", 0,}, |
101 | {"88pm860x-led", 1,}, | 105 | {"88pm860x-led", 1,}, |
102 | {"88pm860x-led", 2,}, | 106 | {"88pm860x-led", 2,}, |
@@ -105,7 +109,7 @@ static struct mfd_cell led_devs[] __initdata = { | |||
105 | {"88pm860x-led", 5,}, | 109 | {"88pm860x-led", 5,}, |
106 | }; | 110 | }; |
107 | 111 | ||
108 | static struct mfd_cell regulator_devs[] __initdata = { | 112 | static struct mfd_cell regulator_devs[] = { |
109 | {"88pm860x-regulator", 0,}, | 113 | {"88pm860x-regulator", 0,}, |
110 | {"88pm860x-regulator", 1,}, | 114 | {"88pm860x-regulator", 1,}, |
111 | {"88pm860x-regulator", 2,}, | 115 | {"88pm860x-regulator", 2,}, |
@@ -126,15 +130,15 @@ static struct mfd_cell regulator_devs[] __initdata = { | |||
126 | {"88pm860x-regulator", 17,}, | 130 | {"88pm860x-regulator", 17,}, |
127 | }; | 131 | }; |
128 | 132 | ||
129 | static struct mfd_cell touch_devs[] __initdata = { | 133 | static struct mfd_cell touch_devs[] = { |
130 | {"88pm860x-touch", -1,}, | 134 | {"88pm860x-touch", -1,}, |
131 | }; | 135 | }; |
132 | 136 | ||
133 | static struct mfd_cell onkey_devs[] __initdata = { | 137 | static struct mfd_cell onkey_devs[] = { |
134 | {"88pm860x-onkey", -1,}, | 138 | {"88pm860x-onkey", -1,}, |
135 | }; | 139 | }; |
136 | 140 | ||
137 | static struct mfd_cell codec_devs[] __initdata = { | 141 | static struct mfd_cell codec_devs[] = { |
138 | {"88pm860x-codec", -1,}, | 142 | {"88pm860x-codec", -1,}, |
139 | }; | 143 | }; |
140 | 144 | ||
@@ -143,11 +147,10 @@ static struct mfd_cell power_devs[] = { | |||
143 | {"88pm860x-charger", -1,}, | 147 | {"88pm860x-charger", -1,}, |
144 | }; | 148 | }; |
145 | 149 | ||
146 | static struct pm860x_backlight_pdata bk_pdata[ARRAY_SIZE(bk_devs)]; | 150 | static struct mfd_cell rtc_devs[] = { |
147 | static struct pm860x_led_pdata led_pdata[ARRAY_SIZE(led_devs)]; | 151 | {"88pm860x-rtc", -1,}, |
148 | static struct regulator_init_data regulator_pdata[ARRAY_SIZE(regulator_devs)]; | 152 | }; |
149 | static struct pm860x_touch_pdata touch_pdata; | 153 | |
150 | static struct pm860x_power_pdata power_pdata; | ||
151 | 154 | ||
152 | struct pm860x_irq_data { | 155 | struct pm860x_irq_data { |
153 | int reg; | 156 | int reg; |
@@ -501,7 +504,6 @@ static void device_irq_exit(struct pm860x_chip *chip) | |||
501 | } | 504 | } |
502 | 505 | ||
503 | static void __devinit device_bk_init(struct pm860x_chip *chip, | 506 | static void __devinit device_bk_init(struct pm860x_chip *chip, |
504 | struct i2c_client *i2c, | ||
505 | struct pm860x_platform_data *pdata) | 507 | struct pm860x_platform_data *pdata) |
506 | { | 508 | { |
507 | int ret; | 509 | int ret; |
@@ -514,13 +516,12 @@ static void __devinit device_bk_init(struct pm860x_chip *chip, | |||
514 | pdata->num_backlights = ARRAY_SIZE(bk_devs); | 516 | pdata->num_backlights = ARRAY_SIZE(bk_devs); |
515 | 517 | ||
516 | for (i = 0; i < pdata->num_backlights; i++) { | 518 | for (i = 0; i < pdata->num_backlights; i++) { |
517 | memcpy(&bk_pdata[i], &pdata->backlight[i], | 519 | bk_devs[i].platform_data = &pdata->backlight[i]; |
518 | sizeof(struct pm860x_backlight_pdata)); | 520 | bk_devs[i].pdata_size = sizeof(struct pm860x_backlight_pdata); |
519 | bk_devs[i].mfd_data = &bk_pdata[i]; | ||
520 | 521 | ||
521 | for (j = 0; j < ARRAY_SIZE(bk_devs); j++) { | 522 | for (j = 0; j < ARRAY_SIZE(bk_devs); j++) { |
522 | id = bk_resources[j].start; | 523 | id = bk_resources[j].start; |
523 | if (bk_pdata[i].flags != id) | 524 | if (pdata->backlight[i].flags != id) |
524 | continue; | 525 | continue; |
525 | 526 | ||
526 | bk_devs[i].num_resources = 1; | 527 | bk_devs[i].num_resources = 1; |
@@ -538,7 +539,6 @@ static void __devinit device_bk_init(struct pm860x_chip *chip, | |||
538 | } | 539 | } |
539 | 540 | ||
540 | static void __devinit device_led_init(struct pm860x_chip *chip, | 541 | static void __devinit device_led_init(struct pm860x_chip *chip, |
541 | struct i2c_client *i2c, | ||
542 | struct pm860x_platform_data *pdata) | 542 | struct pm860x_platform_data *pdata) |
543 | { | 543 | { |
544 | int ret; | 544 | int ret; |
@@ -551,13 +551,12 @@ static void __devinit device_led_init(struct pm860x_chip *chip, | |||
551 | pdata->num_leds = ARRAY_SIZE(led_devs); | 551 | pdata->num_leds = ARRAY_SIZE(led_devs); |
552 | 552 | ||
553 | for (i = 0; i < pdata->num_leds; i++) { | 553 | for (i = 0; i < pdata->num_leds; i++) { |
554 | memcpy(&led_pdata[i], &pdata->led[i], | 554 | led_devs[i].platform_data = &pdata->led[i]; |
555 | sizeof(struct pm860x_led_pdata)); | 555 | led_devs[i].pdata_size = sizeof(struct pm860x_led_pdata); |
556 | led_devs[i].mfd_data = &led_pdata[i]; | ||
557 | 556 | ||
558 | for (j = 0; j < ARRAY_SIZE(led_devs); j++) { | 557 | for (j = 0; j < ARRAY_SIZE(led_devs); j++) { |
559 | id = led_resources[j].start; | 558 | id = led_resources[j].start; |
560 | if (led_pdata[i].flags != id) | 559 | if (pdata->led[i].flags != id) |
561 | continue; | 560 | continue; |
562 | 561 | ||
563 | led_devs[i].num_resources = 1; | 562 | led_devs[i].num_resources = 1; |
@@ -575,12 +574,11 @@ static void __devinit device_led_init(struct pm860x_chip *chip, | |||
575 | } | 574 | } |
576 | 575 | ||
577 | static void __devinit device_regulator_init(struct pm860x_chip *chip, | 576 | static void __devinit device_regulator_init(struct pm860x_chip *chip, |
578 | struct i2c_client *i2c, | ||
579 | struct pm860x_platform_data *pdata) | 577 | struct pm860x_platform_data *pdata) |
580 | { | 578 | { |
581 | struct regulator_init_data *initdata; | 579 | struct regulator_init_data *initdata; |
582 | int ret; | 580 | int ret; |
583 | int i, j; | 581 | int i, seq; |
584 | 582 | ||
585 | if ((pdata == NULL) || (pdata->regulator == NULL)) | 583 | if ((pdata == NULL) || (pdata->regulator == NULL)) |
586 | return; | 584 | return; |
@@ -588,41 +586,21 @@ static void __devinit device_regulator_init(struct pm860x_chip *chip, | |||
588 | if (pdata->num_regulators > ARRAY_SIZE(regulator_devs)) | 586 | if (pdata->num_regulators > ARRAY_SIZE(regulator_devs)) |
589 | pdata->num_regulators = ARRAY_SIZE(regulator_devs); | 587 | pdata->num_regulators = ARRAY_SIZE(regulator_devs); |
590 | 588 | ||
591 | for (i = 0, j = -1; i < pdata->num_regulators; i++) { | 589 | for (i = 0, seq = -1; i < pdata->num_regulators; i++) { |
592 | initdata = &pdata->regulator[i]; | 590 | initdata = &pdata->regulator[i]; |
593 | if (strstr(initdata->constraints.name, "BUCK")) { | 591 | seq = *(unsigned int *)initdata->driver_data; |
594 | sscanf(initdata->constraints.name, "BUCK%d", &j); | 592 | if ((seq < 0) || (seq > PM8607_ID_RG_MAX)) { |
595 | /* BUCK1 ~ BUCK3 */ | 593 | dev_err(chip->dev, "Wrong ID(%d) on regulator(%s)\n", |
596 | if ((j < 1) || (j > 3)) { | 594 | seq, initdata->constraints.name); |
597 | dev_err(chip->dev, "Failed to add constraint " | ||
598 | "(%s)\n", initdata->constraints.name); | ||
599 | goto out; | ||
600 | } | ||
601 | j = (j - 1) + PM8607_ID_BUCK1; | ||
602 | } | ||
603 | if (strstr(initdata->constraints.name, "LDO")) { | ||
604 | sscanf(initdata->constraints.name, "LDO%d", &j); | ||
605 | /* LDO1 ~ LDO15 */ | ||
606 | if ((j < 1) || (j > 15)) { | ||
607 | dev_err(chip->dev, "Failed to add constraint " | ||
608 | "(%s)\n", initdata->constraints.name); | ||
609 | goto out; | ||
610 | } | ||
611 | j = (j - 1) + PM8607_ID_LDO1; | ||
612 | } | ||
613 | if (j == -1) { | ||
614 | dev_err(chip->dev, "Failed to add constraint (%s)\n", | ||
615 | initdata->constraints.name); | ||
616 | goto out; | 595 | goto out; |
617 | } | 596 | } |
618 | memcpy(®ulator_pdata[i], &pdata->regulator[i], | 597 | regulator_devs[i].platform_data = &pdata->regulator[i]; |
619 | sizeof(struct regulator_init_data)); | 598 | regulator_devs[i].pdata_size = sizeof(struct regulator_init_data); |
620 | regulator_devs[i].mfd_data = ®ulator_pdata[i]; | ||
621 | regulator_devs[i].num_resources = 1; | 599 | regulator_devs[i].num_resources = 1; |
622 | regulator_devs[i].resources = ®ulator_resources[j]; | 600 | regulator_devs[i].resources = ®ulator_resources[seq]; |
623 | 601 | ||
624 | ret = mfd_add_devices(chip->dev, 0, ®ulator_devs[i], 1, | 602 | ret = mfd_add_devices(chip->dev, 0, ®ulator_devs[i], 1, |
625 | ®ulator_resources[j], 0); | 603 | ®ulator_resources[seq], 0); |
626 | if (ret < 0) { | 604 | if (ret < 0) { |
627 | dev_err(chip->dev, "Failed to add regulator subdev\n"); | 605 | dev_err(chip->dev, "Failed to add regulator subdev\n"); |
628 | goto out; | 606 | goto out; |
@@ -632,17 +610,35 @@ out: | |||
632 | return; | 610 | return; |
633 | } | 611 | } |
634 | 612 | ||
613 | static void __devinit device_rtc_init(struct pm860x_chip *chip, | ||
614 | struct pm860x_platform_data *pdata) | ||
615 | { | ||
616 | int ret; | ||
617 | |||
618 | if ((pdata == NULL)) | ||
619 | return; | ||
620 | |||
621 | rtc_devs[0].platform_data = pdata->rtc; | ||
622 | rtc_devs[0].pdata_size = sizeof(struct pm860x_rtc_pdata); | ||
623 | rtc_devs[0].num_resources = ARRAY_SIZE(rtc_resources); | ||
624 | rtc_devs[0].resources = &rtc_resources[0]; | ||
625 | ret = mfd_add_devices(chip->dev, 0, &rtc_devs[0], | ||
626 | ARRAY_SIZE(rtc_devs), &rtc_resources[0], | ||
627 | chip->irq_base); | ||
628 | if (ret < 0) | ||
629 | dev_err(chip->dev, "Failed to add rtc subdev\n"); | ||
630 | } | ||
631 | |||
635 | static void __devinit device_touch_init(struct pm860x_chip *chip, | 632 | static void __devinit device_touch_init(struct pm860x_chip *chip, |
636 | struct i2c_client *i2c, | ||
637 | struct pm860x_platform_data *pdata) | 633 | struct pm860x_platform_data *pdata) |
638 | { | 634 | { |
639 | int ret; | 635 | int ret; |
640 | 636 | ||
641 | if ((pdata == NULL) || (pdata->touch == NULL)) | 637 | if (pdata == NULL) |
642 | return; | 638 | return; |
643 | 639 | ||
644 | memcpy(&touch_pdata, pdata->touch, sizeof(struct pm860x_touch_pdata)); | 640 | touch_devs[0].platform_data = pdata->touch; |
645 | touch_devs[0].mfd_data = &touch_pdata; | 641 | touch_devs[0].pdata_size = sizeof(struct pm860x_touch_pdata); |
646 | touch_devs[0].num_resources = ARRAY_SIZE(touch_resources); | 642 | touch_devs[0].num_resources = ARRAY_SIZE(touch_resources); |
647 | touch_devs[0].resources = &touch_resources[0]; | 643 | touch_devs[0].resources = &touch_resources[0]; |
648 | ret = mfd_add_devices(chip->dev, 0, &touch_devs[0], | 644 | ret = mfd_add_devices(chip->dev, 0, &touch_devs[0], |
@@ -653,16 +649,15 @@ static void __devinit device_touch_init(struct pm860x_chip *chip, | |||
653 | } | 649 | } |
654 | 650 | ||
655 | static void __devinit device_power_init(struct pm860x_chip *chip, | 651 | static void __devinit device_power_init(struct pm860x_chip *chip, |
656 | struct i2c_client *i2c, | ||
657 | struct pm860x_platform_data *pdata) | 652 | struct pm860x_platform_data *pdata) |
658 | { | 653 | { |
659 | int ret; | 654 | int ret; |
660 | 655 | ||
661 | if ((pdata == NULL) || (pdata->power == NULL)) | 656 | if (pdata == NULL) |
662 | return; | 657 | return; |
663 | 658 | ||
664 | memcpy(&power_pdata, pdata->power, sizeof(struct pm860x_power_pdata)); | 659 | power_devs[0].platform_data = pdata->power; |
665 | power_devs[0].mfd_data = &power_pdata; | 660 | power_devs[0].pdata_size = sizeof(struct pm860x_power_pdata); |
666 | power_devs[0].num_resources = ARRAY_SIZE(battery_resources); | 661 | power_devs[0].num_resources = ARRAY_SIZE(battery_resources); |
667 | power_devs[0].resources = &battery_resources[0], | 662 | power_devs[0].resources = &battery_resources[0], |
668 | ret = mfd_add_devices(chip->dev, 0, &power_devs[0], 1, | 663 | ret = mfd_add_devices(chip->dev, 0, &power_devs[0], 1, |
@@ -670,7 +665,8 @@ static void __devinit device_power_init(struct pm860x_chip *chip, | |||
670 | if (ret < 0) | 665 | if (ret < 0) |
671 | dev_err(chip->dev, "Failed to add battery subdev\n"); | 666 | dev_err(chip->dev, "Failed to add battery subdev\n"); |
672 | 667 | ||
673 | power_devs[1].mfd_data = &power_pdata; | 668 | power_devs[1].platform_data = pdata->power; |
669 | power_devs[1].pdata_size = sizeof(struct pm860x_power_pdata); | ||
674 | power_devs[1].num_resources = ARRAY_SIZE(charger_resources); | 670 | power_devs[1].num_resources = ARRAY_SIZE(charger_resources); |
675 | power_devs[1].resources = &charger_resources[0], | 671 | power_devs[1].resources = &charger_resources[0], |
676 | ret = mfd_add_devices(chip->dev, 0, &power_devs[1], 1, | 672 | ret = mfd_add_devices(chip->dev, 0, &power_devs[1], 1, |
@@ -680,7 +676,6 @@ static void __devinit device_power_init(struct pm860x_chip *chip, | |||
680 | } | 676 | } |
681 | 677 | ||
682 | static void __devinit device_onkey_init(struct pm860x_chip *chip, | 678 | static void __devinit device_onkey_init(struct pm860x_chip *chip, |
683 | struct i2c_client *i2c, | ||
684 | struct pm860x_platform_data *pdata) | 679 | struct pm860x_platform_data *pdata) |
685 | { | 680 | { |
686 | int ret; | 681 | int ret; |
@@ -695,7 +690,6 @@ static void __devinit device_onkey_init(struct pm860x_chip *chip, | |||
695 | } | 690 | } |
696 | 691 | ||
697 | static void __devinit device_codec_init(struct pm860x_chip *chip, | 692 | static void __devinit device_codec_init(struct pm860x_chip *chip, |
698 | struct i2c_client *i2c, | ||
699 | struct pm860x_platform_data *pdata) | 693 | struct pm860x_platform_data *pdata) |
700 | { | 694 | { |
701 | int ret; | 695 | int ret; |
@@ -763,11 +757,12 @@ static void __devinit device_8607_init(struct pm860x_chip *chip, | |||
763 | if (ret < 0) | 757 | if (ret < 0) |
764 | goto out; | 758 | goto out; |
765 | 759 | ||
766 | device_regulator_init(chip, i2c, pdata); | 760 | device_regulator_init(chip, pdata); |
767 | device_onkey_init(chip, i2c, pdata); | 761 | device_rtc_init(chip, pdata); |
768 | device_touch_init(chip, i2c, pdata); | 762 | device_onkey_init(chip, pdata); |
769 | device_power_init(chip, i2c, pdata); | 763 | device_touch_init(chip, pdata); |
770 | device_codec_init(chip, i2c, pdata); | 764 | device_power_init(chip, pdata); |
765 | device_codec_init(chip, pdata); | ||
771 | out: | 766 | out: |
772 | return; | 767 | return; |
773 | } | 768 | } |
@@ -779,8 +774,8 @@ int __devinit pm860x_device_init(struct pm860x_chip *chip, | |||
779 | 774 | ||
780 | switch (chip->id) { | 775 | switch (chip->id) { |
781 | case CHIP_PM8606: | 776 | case CHIP_PM8606: |
782 | device_bk_init(chip, chip->client, pdata); | 777 | device_bk_init(chip, pdata); |
783 | device_led_init(chip, chip->client, pdata); | 778 | device_led_init(chip, pdata); |
784 | break; | 779 | break; |
785 | case CHIP_PM8607: | 780 | case CHIP_PM8607: |
786 | device_8607_init(chip, chip->client, pdata); | 781 | device_8607_init(chip, chip->client, pdata); |
@@ -790,8 +785,8 @@ int __devinit pm860x_device_init(struct pm860x_chip *chip, | |||
790 | if (chip->companion) { | 785 | if (chip->companion) { |
791 | switch (chip->id) { | 786 | switch (chip->id) { |
792 | case CHIP_PM8607: | 787 | case CHIP_PM8607: |
793 | device_bk_init(chip, chip->companion, pdata); | 788 | device_bk_init(chip, pdata); |
794 | device_led_init(chip, chip->companion, pdata); | 789 | device_led_init(chip, pdata); |
795 | break; | 790 | break; |
796 | case CHIP_PM8606: | 791 | case CHIP_PM8606: |
797 | device_8607_init(chip, chip->companion, pdata); | 792 | device_8607_init(chip, chip->companion, pdata); |
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig index 481770ab2716..b6c267724e14 100644 --- a/drivers/mfd/Kconfig +++ b/drivers/mfd/Kconfig | |||
@@ -157,6 +157,20 @@ config TPS6507X | |||
157 | This driver can also be built as a module. If so, the module | 157 | This driver can also be built as a module. If so, the module |
158 | will be called tps6507x. | 158 | will be called tps6507x. |
159 | 159 | ||
160 | config MFD_TPS6586X | ||
161 | bool "TPS6586x Power Management chips" | ||
162 | depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS | ||
163 | select MFD_CORE | ||
164 | help | ||
165 | If you say yes here you get support for the TPS6586X series of | ||
166 | Power Management chips. | ||
167 | This driver provides common support for accessing the device, | ||
168 | additional drivers must be enabled in order to use the | ||
169 | functionality of the device. | ||
170 | |||
171 | This driver can also be built as a module. If so, the module | ||
172 | will be called tps6586x. | ||
173 | |||
160 | config MENELAUS | 174 | config MENELAUS |
161 | bool "Texas Instruments TWL92330/Menelaus PM chip" | 175 | bool "Texas Instruments TWL92330/Menelaus PM chip" |
162 | depends on I2C=y && ARCH_OMAP2 | 176 | depends on I2C=y && ARCH_OMAP2 |
@@ -455,6 +469,20 @@ config MFD_PCF50633 | |||
455 | facilities, and registers devices for the various functions | 469 | facilities, and registers devices for the various functions |
456 | so that function-specific drivers can bind to them. | 470 | so that function-specific drivers can bind to them. |
457 | 471 | ||
472 | config PCF50633_ADC | ||
473 | tristate "Support for NXP PCF50633 ADC" | ||
474 | depends on MFD_PCF50633 | ||
475 | help | ||
476 | Say yes here if you want to include support for ADC in the | ||
477 | NXP PCF50633 chip. | ||
478 | |||
479 | config PCF50633_GPIO | ||
480 | tristate "Support for NXP PCF50633 GPIO" | ||
481 | depends on MFD_PCF50633 | ||
482 | help | ||
483 | Say yes here if you want to include support GPIO for pins on | ||
484 | the PCF50633 chip. | ||
485 | |||
458 | config MFD_MC13783 | 486 | config MFD_MC13783 |
459 | tristate | 487 | tristate |
460 | 488 | ||
@@ -470,20 +498,6 @@ config MFD_MC13XXX | |||
470 | additional drivers must be enabled in order to use the | 498 | additional drivers must be enabled in order to use the |
471 | functionality of the device. | 499 | functionality of the device. |
472 | 500 | ||
473 | config PCF50633_ADC | ||
474 | tristate "Support for NXP PCF50633 ADC" | ||
475 | depends on MFD_PCF50633 | ||
476 | help | ||
477 | Say yes here if you want to include support for ADC in the | ||
478 | NXP PCF50633 chip. | ||
479 | |||
480 | config PCF50633_GPIO | ||
481 | tristate "Support for NXP PCF50633 GPIO" | ||
482 | depends on MFD_PCF50633 | ||
483 | help | ||
484 | Say yes here if you want to include support GPIO for pins on | ||
485 | the PCF50633 chip. | ||
486 | |||
487 | config ABX500_CORE | 501 | config ABX500_CORE |
488 | bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions" | 502 | bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions" |
489 | default y if ARCH_U300 || ARCH_U8500 | 503 | default y if ARCH_U300 || ARCH_U8500 |
@@ -649,20 +663,6 @@ config MFD_JZ4740_ADC | |||
649 | Say yes here if you want support for the ADC unit in the JZ4740 SoC. | 663 | Say yes here if you want support for the ADC unit in the JZ4740 SoC. |
650 | This driver is necessary for jz4740-battery and jz4740-hwmon driver. | 664 | This driver is necessary for jz4740-battery and jz4740-hwmon driver. |
651 | 665 | ||
652 | config MFD_TPS6586X | ||
653 | bool "TPS6586x Power Management chips" | ||
654 | depends on I2C=y && GPIOLIB && GENERIC_HARDIRQS | ||
655 | select MFD_CORE | ||
656 | help | ||
657 | If you say yes here you get support for the TPS6586X series of | ||
658 | Power Management chips. | ||
659 | This driver provides common support for accessing the device, | ||
660 | additional drivers must be enabled in order to use the | ||
661 | functionality of the device. | ||
662 | |||
663 | This driver can also be built as a module. If so, the module | ||
664 | will be called tps6586x. | ||
665 | |||
666 | config MFD_VX855 | 666 | config MFD_VX855 |
667 | tristate "Support for VIA VX855/VX875 integrated south bridge" | 667 | tristate "Support for VIA VX855/VX875 integrated south bridge" |
668 | depends on PCI | 668 | depends on PCI |
@@ -691,6 +691,43 @@ config MFD_OMAP_USB_HOST | |||
691 | This MFD driver does the required setup functionalities for | 691 | This MFD driver does the required setup functionalities for |
692 | OMAP USB Host drivers. | 692 | OMAP USB Host drivers. |
693 | 693 | ||
694 | config MFD_PM8XXX | ||
695 | tristate | ||
696 | |||
697 | config MFD_PM8921_CORE | ||
698 | tristate "Qualcomm PM8921 PMIC chip" | ||
699 | depends on MSM_SSBI | ||
700 | select MFD_CORE | ||
701 | select MFD_PM8XXX | ||
702 | help | ||
703 | If you say yes to this option, support will be included for the | ||
704 | built-in PM8921 PMIC chip. | ||
705 | |||
706 | This is required if your board has a PM8921 and uses its features, | ||
707 | such as: MPPs, GPIOs, regulators, interrupts, and PWM. | ||
708 | |||
709 | Say M here if you want to include support for PM8921 chip as a module. | ||
710 | This will build a module called "pm8921-core". | ||
711 | |||
712 | config MFD_PM8XXX_IRQ | ||
713 | bool "Support for Qualcomm PM8xxx IRQ features" | ||
714 | depends on MFD_PM8XXX | ||
715 | default y if MFD_PM8XXX | ||
716 | help | ||
717 | This is the IRQ driver for Qualcomm PM 8xxx PMIC chips. | ||
718 | |||
719 | This is required to use certain other PM 8xxx features, such as GPIO | ||
720 | and MPP. | ||
721 | |||
722 | config MFD_TPS65910 | ||
723 | bool "TPS65910 Power Management chip" | ||
724 | depends on I2C=y | ||
725 | select MFD_CORE | ||
726 | select GPIO_TPS65910 | ||
727 | help | ||
728 | if you say yes here you get support for the TPS65910 series of | ||
729 | Power Management chips. | ||
730 | |||
694 | endif # MFD_SUPPORT | 731 | endif # MFD_SUPPORT |
695 | 732 | ||
696 | menu "Multimedia Capabilities Port drivers" | 733 | menu "Multimedia Capabilities Port drivers" |
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile index 24aa44448daf..efe3cc33ed92 100644 --- a/drivers/mfd/Makefile +++ b/drivers/mfd/Makefile | |||
@@ -91,3 +91,6 @@ obj-$(CONFIG_MFD_VX855) += vx855.o | |||
91 | obj-$(CONFIG_MFD_WL1273_CORE) += wl1273-core.o | 91 | obj-$(CONFIG_MFD_WL1273_CORE) += wl1273-core.o |
92 | obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o | 92 | obj-$(CONFIG_MFD_CS5535) += cs5535-mfd.o |
93 | obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o | 93 | obj-$(CONFIG_MFD_OMAP_USB_HOST) += omap-usb-host.o |
94 | obj-$(CONFIG_MFD_PM8921_CORE) += pm8921-core.o | ||
95 | obj-$(CONFIG_MFD_PM8XXX_IRQ) += pm8xxx-irq.o | ||
96 | obj-$(CONFIG_MFD_TPS65910) += tps65910.o tps65910-irq.o | ||
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c index a751927047ac..a20e1c41bed2 100644 --- a/drivers/mfd/ab3100-core.c +++ b/drivers/mfd/ab3100-core.c | |||
@@ -949,8 +949,10 @@ static int __devinit ab3100_probe(struct i2c_client *client, | |||
949 | goto exit_no_ops; | 949 | goto exit_no_ops; |
950 | 950 | ||
951 | /* Set up and register the platform devices. */ | 951 | /* Set up and register the platform devices. */ |
952 | for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++) | 952 | for (i = 0; i < ARRAY_SIZE(ab3100_devs); i++) { |
953 | ab3100_devs[i].mfd_data = ab3100_plf_data; | 953 | ab3100_devs[i].platform_data = ab3100_plf_data; |
954 | ab3100_devs[i].pdata_size = sizeof(struct ab3100_platform_data); | ||
955 | } | ||
954 | 956 | ||
955 | err = mfd_add_devices(&client->dev, 0, ab3100_devs, | 957 | err = mfd_add_devices(&client->dev, 0, ab3100_devs, |
956 | ARRAY_SIZE(ab3100_devs), NULL, 0); | 958 | ARRAY_SIZE(ab3100_devs), NULL, 0); |
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c index ff86acf3e6bd..3d7dce671b93 100644 --- a/drivers/mfd/ab3550-core.c +++ b/drivers/mfd/ab3550-core.c | |||
@@ -1320,8 +1320,10 @@ static int __init ab3550_probe(struct i2c_client *client, | |||
1320 | goto exit_no_ops; | 1320 | goto exit_no_ops; |
1321 | 1321 | ||
1322 | /* Set up and register the platform devices. */ | 1322 | /* Set up and register the platform devices. */ |
1323 | for (i = 0; i < AB3550_NUM_DEVICES; i++) | 1323 | for (i = 0; i < AB3550_NUM_DEVICES; i++) { |
1324 | ab3550_devs[i].mfd_data = ab3550_plf_data->dev_data[i]; | 1324 | ab3550_devs[i].platform_data = ab3550_plf_data->dev_data[i]; |
1325 | ab3550_devs[i].pdata_size = ab3550_plf_data->dev_data_sz[i]; | ||
1326 | } | ||
1325 | 1327 | ||
1326 | err = mfd_add_devices(&client->dev, 0, ab3550_devs, | 1328 | err = mfd_add_devices(&client->dev, 0, ab3550_devs, |
1327 | ARRAY_SIZE(ab3550_devs), NULL, | 1329 | ARRAY_SIZE(ab3550_devs), NULL, |
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c index 67d01c938284..fc0c1af1566e 100644 --- a/drivers/mfd/ab8500-core.c +++ b/drivers/mfd/ab8500-core.c | |||
@@ -254,8 +254,9 @@ static void ab8500_irq_sync_unlock(struct irq_data *data) | |||
254 | if (new == old) | 254 | if (new == old) |
255 | continue; | 255 | continue; |
256 | 256 | ||
257 | /* Interrupt register 12 does'nt exist prior to version 0x20 */ | 257 | /* Interrupt register 12 doesn't exist prior to version 2.0 */ |
258 | if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20) | 258 | if (ab8500_irq_regoffset[i] == 11 && |
259 | ab8500->chip_id < AB8500_CUT2P0) | ||
259 | continue; | 260 | continue; |
260 | 261 | ||
261 | ab8500->oldmask[i] = new; | 262 | ab8500->oldmask[i] = new; |
@@ -307,8 +308,8 @@ static irqreturn_t ab8500_irq(int irq, void *dev) | |||
307 | int status; | 308 | int status; |
308 | u8 value; | 309 | u8 value; |
309 | 310 | ||
310 | /* Interrupt register 12 does'nt exist prior to version 0x20 */ | 311 | /* Interrupt register 12 doesn't exist prior to version 2.0 */ |
311 | if (regoffset == 11 && ab8500->chip_id < 0x20) | 312 | if (regoffset == 11 && ab8500->chip_id < AB8500_CUT2P0) |
312 | continue; | 313 | continue; |
313 | 314 | ||
314 | status = get_register_interruptible(ab8500, AB8500_INTERRUPT, | 315 | status = get_register_interruptible(ab8500, AB8500_INTERRUPT, |
@@ -724,17 +725,15 @@ int __devinit ab8500_init(struct ab8500 *ab8500) | |||
724 | if (ret < 0) | 725 | if (ret < 0) |
725 | return ret; | 726 | return ret; |
726 | 727 | ||
727 | /* | 728 | switch (value) { |
728 | * 0x0 - Early Drop | 729 | case AB8500_CUTEARLY: |
729 | * 0x10 - Cut 1.0 | 730 | case AB8500_CUT1P0: |
730 | * 0x11 - Cut 1.1 | 731 | case AB8500_CUT1P1: |
731 | * 0x20 - Cut 2.0 | 732 | case AB8500_CUT2P0: |
732 | * 0x30 - Cut 3.0 | 733 | case AB8500_CUT3P0: |
733 | */ | ||
734 | if (value == 0x0 || value == 0x10 || value == 0x11 || value == 0x20 || | ||
735 | value == 0x30) { | ||
736 | dev_info(ab8500->dev, "detected chip, revision: %#x\n", value); | 734 | dev_info(ab8500->dev, "detected chip, revision: %#x\n", value); |
737 | } else { | 735 | break; |
736 | default: | ||
738 | dev_err(ab8500->dev, "unknown chip, revision: %#x\n", value); | 737 | dev_err(ab8500->dev, "unknown chip, revision: %#x\n", value); |
739 | return -EINVAL; | 738 | return -EINVAL; |
740 | } | 739 | } |
@@ -763,8 +762,9 @@ int __devinit ab8500_init(struct ab8500 *ab8500) | |||
763 | 762 | ||
764 | /* Clear and mask all interrupts */ | 763 | /* Clear and mask all interrupts */ |
765 | for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) { | 764 | for (i = 0; i < AB8500_NUM_IRQ_REGS; i++) { |
766 | /* Interrupt register 12 does'nt exist prior to version 0x20 */ | 765 | /* Interrupt register 12 doesn't exist prior to version 2.0 */ |
767 | if (ab8500_irq_regoffset[i] == 11 && ab8500->chip_id < 0x20) | 766 | if (ab8500_irq_regoffset[i] == 11 && |
767 | ab8500->chip_id < AB8500_CUT2P0) | ||
768 | continue; | 768 | continue; |
769 | 769 | ||
770 | get_register_interruptible(ab8500, AB8500_INTERRUPT, | 770 | get_register_interruptible(ab8500, AB8500_INTERRUPT, |
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c index 6421ad1160de..f16afb234ff9 100644 --- a/drivers/mfd/ab8500-gpadc.c +++ b/drivers/mfd/ab8500-gpadc.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #define SW_AVG_16 0x60 | 57 | #define SW_AVG_16 0x60 |
58 | #define ADC_SW_CONV 0x04 | 58 | #define ADC_SW_CONV 0x04 |
59 | #define EN_ICHAR 0x80 | 59 | #define EN_ICHAR 0x80 |
60 | #define BTEMP_PULL_UP 0x08 | ||
60 | #define EN_BUF 0x40 | 61 | #define EN_BUF 0x40 |
61 | #define DIS_ZERO 0x00 | 62 | #define DIS_ZERO 0x00 |
62 | #define GPADC_BUSY 0x01 | 63 | #define GPADC_BUSY 0x01 |
@@ -101,6 +102,7 @@ struct adc_cal_data { | |||
101 | 102 | ||
102 | /** | 103 | /** |
103 | * struct ab8500_gpadc - AB8500 GPADC device information | 104 | * struct ab8500_gpadc - AB8500 GPADC device information |
105 | * @chip_id ABB chip id | ||
104 | * @dev: pointer to the struct device | 106 | * @dev: pointer to the struct device |
105 | * @node: a list of AB8500 GPADCs, hence prepared for | 107 | * @node: a list of AB8500 GPADCs, hence prepared for |
106 | reentrance | 108 | reentrance |
@@ -112,6 +114,7 @@ struct adc_cal_data { | |||
112 | * @cal_data array of ADC calibration data structs | 114 | * @cal_data array of ADC calibration data structs |
113 | */ | 115 | */ |
114 | struct ab8500_gpadc { | 116 | struct ab8500_gpadc { |
117 | u8 chip_id; | ||
115 | struct device *dev; | 118 | struct device *dev; |
116 | struct list_head node; | 119 | struct list_head node; |
117 | struct completion ab8500_gpadc_complete; | 120 | struct completion ab8500_gpadc_complete; |
@@ -274,6 +277,7 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input) | |||
274 | dev_err(gpadc->dev, "gpadc_conversion: enable gpadc failed\n"); | 277 | dev_err(gpadc->dev, "gpadc_conversion: enable gpadc failed\n"); |
275 | goto out; | 278 | goto out; |
276 | } | 279 | } |
280 | |||
277 | /* Select the input source and set average samples to 16 */ | 281 | /* Select the input source and set average samples to 16 */ |
278 | ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC, | 282 | ret = abx500_set_register_interruptible(gpadc->dev, AB8500_GPADC, |
279 | AB8500_GPADC_CTRL2_REG, (input | SW_AVG_16)); | 283 | AB8500_GPADC_CTRL2_REG, (input | SW_AVG_16)); |
@@ -282,9 +286,11 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input) | |||
282 | "gpadc_conversion: set avg samples failed\n"); | 286 | "gpadc_conversion: set avg samples failed\n"); |
283 | goto out; | 287 | goto out; |
284 | } | 288 | } |
289 | |||
285 | /* | 290 | /* |
286 | * Enable ADC, buffering, select rising edge and enable ADC path | 291 | * Enable ADC, buffering, select rising edge and enable ADC path |
287 | * charging current sense if it needed | 292 | * charging current sense if it needed, ABB 3.0 needs some special |
293 | * treatment too. | ||
288 | */ | 294 | */ |
289 | switch (input) { | 295 | switch (input) { |
290 | case MAIN_CHARGER_C: | 296 | case MAIN_CHARGER_C: |
@@ -294,6 +300,23 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input) | |||
294 | EN_BUF | EN_ICHAR, | 300 | EN_BUF | EN_ICHAR, |
295 | EN_BUF | EN_ICHAR); | 301 | EN_BUF | EN_ICHAR); |
296 | break; | 302 | break; |
303 | case BTEMP_BALL: | ||
304 | if (gpadc->chip_id >= AB8500_CUT3P0) { | ||
305 | /* Turn on btemp pull-up on ABB 3.0 */ | ||
306 | ret = abx500_mask_and_set_register_interruptible( | ||
307 | gpadc->dev, | ||
308 | AB8500_GPADC, AB8500_GPADC_CTRL1_REG, | ||
309 | EN_BUF | BTEMP_PULL_UP, | ||
310 | EN_BUF | BTEMP_PULL_UP); | ||
311 | |||
312 | /* | ||
313 | * Delay might be needed for ABB8500 cut 3.0, if not, remove | ||
314 | * when hardware will be availible | ||
315 | */ | ||
316 | msleep(1); | ||
317 | break; | ||
318 | } | ||
319 | /* Intentional fallthrough */ | ||
297 | default: | 320 | default: |
298 | ret = abx500_mask_and_set_register_interruptible(gpadc->dev, | 321 | ret = abx500_mask_and_set_register_interruptible(gpadc->dev, |
299 | AB8500_GPADC, AB8500_GPADC_CTRL1_REG, EN_BUF, EN_BUF); | 322 | AB8500_GPADC, AB8500_GPADC_CTRL1_REG, EN_BUF, EN_BUF); |
@@ -304,6 +327,7 @@ int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 input) | |||
304 | "gpadc_conversion: select falling edge failed\n"); | 327 | "gpadc_conversion: select falling edge failed\n"); |
305 | goto out; | 328 | goto out; |
306 | } | 329 | } |
330 | |||
307 | ret = abx500_mask_and_set_register_interruptible(gpadc->dev, | 331 | ret = abx500_mask_and_set_register_interruptible(gpadc->dev, |
308 | AB8500_GPADC, AB8500_GPADC_CTRL1_REG, ADC_SW_CONV, ADC_SW_CONV); | 332 | AB8500_GPADC, AB8500_GPADC_CTRL1_REG, ADC_SW_CONV, ADC_SW_CONV); |
309 | if (ret < 0) { | 333 | if (ret < 0) { |
@@ -552,6 +576,14 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev) | |||
552 | goto fail; | 576 | goto fail; |
553 | } | 577 | } |
554 | 578 | ||
579 | /* Get Chip ID of the ABB ASIC */ | ||
580 | ret = abx500_get_chip_id(gpadc->dev); | ||
581 | if (ret < 0) { | ||
582 | dev_err(gpadc->dev, "failed to get chip ID\n"); | ||
583 | goto fail_irq; | ||
584 | } | ||
585 | gpadc->chip_id = (u8) ret; | ||
586 | |||
555 | /* VTVout LDO used to power up ab8500-GPADC */ | 587 | /* VTVout LDO used to power up ab8500-GPADC */ |
556 | gpadc->regu = regulator_get(&pdev->dev, "vddadc"); | 588 | gpadc->regu = regulator_get(&pdev->dev, "vddadc"); |
557 | if (IS_ERR(gpadc->regu)) { | 589 | if (IS_ERR(gpadc->regu)) { |
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c index 0b4d5b23bec9..c27fd1fc3b86 100644 --- a/drivers/mfd/asic3.c +++ b/drivers/mfd/asic3.c | |||
@@ -88,19 +88,19 @@ struct asic3 { | |||
88 | 88 | ||
89 | static int asic3_gpio_get(struct gpio_chip *chip, unsigned offset); | 89 | static int asic3_gpio_get(struct gpio_chip *chip, unsigned offset); |
90 | 90 | ||
91 | static inline void asic3_write_register(struct asic3 *asic, | 91 | void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 value) |
92 | unsigned int reg, u32 value) | ||
93 | { | 92 | { |
94 | iowrite16(value, asic->mapping + | 93 | iowrite16(value, asic->mapping + |
95 | (reg >> asic->bus_shift)); | 94 | (reg >> asic->bus_shift)); |
96 | } | 95 | } |
96 | EXPORT_SYMBOL_GPL(asic3_write_register); | ||
97 | 97 | ||
98 | static inline u32 asic3_read_register(struct asic3 *asic, | 98 | u32 asic3_read_register(struct asic3 *asic, unsigned int reg) |
99 | unsigned int reg) | ||
100 | { | 99 | { |
101 | return ioread16(asic->mapping + | 100 | return ioread16(asic->mapping + |
102 | (reg >> asic->bus_shift)); | 101 | (reg >> asic->bus_shift)); |
103 | } | 102 | } |
103 | EXPORT_SYMBOL_GPL(asic3_read_register); | ||
104 | 104 | ||
105 | static void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set) | 105 | static void asic3_set_register(struct asic3 *asic, u32 reg, u32 bits, bool set) |
106 | { | 106 | { |
@@ -676,7 +676,8 @@ static struct mfd_cell asic3_cell_ds1wm = { | |||
676 | .name = "ds1wm", | 676 | .name = "ds1wm", |
677 | .enable = ds1wm_enable, | 677 | .enable = ds1wm_enable, |
678 | .disable = ds1wm_disable, | 678 | .disable = ds1wm_disable, |
679 | .mfd_data = &ds1wm_pdata, | 679 | .platform_data = &ds1wm_pdata, |
680 | .pdata_size = sizeof(ds1wm_pdata), | ||
680 | .num_resources = ARRAY_SIZE(ds1wm_resources), | 681 | .num_resources = ARRAY_SIZE(ds1wm_resources), |
681 | .resources = ds1wm_resources, | 682 | .resources = ds1wm_resources, |
682 | }; | 683 | }; |
@@ -777,12 +778,61 @@ static struct mfd_cell asic3_cell_mmc = { | |||
777 | .name = "tmio-mmc", | 778 | .name = "tmio-mmc", |
778 | .enable = asic3_mmc_enable, | 779 | .enable = asic3_mmc_enable, |
779 | .disable = asic3_mmc_disable, | 780 | .disable = asic3_mmc_disable, |
780 | .mfd_data = &asic3_mmc_data, | 781 | .platform_data = &asic3_mmc_data, |
782 | .pdata_size = sizeof(asic3_mmc_data), | ||
781 | .num_resources = ARRAY_SIZE(asic3_mmc_resources), | 783 | .num_resources = ARRAY_SIZE(asic3_mmc_resources), |
782 | .resources = asic3_mmc_resources, | 784 | .resources = asic3_mmc_resources, |
783 | }; | 785 | }; |
784 | 786 | ||
787 | static const int clock_ledn[ASIC3_NUM_LEDS] = { | ||
788 | [0] = ASIC3_CLOCK_LED0, | ||
789 | [1] = ASIC3_CLOCK_LED1, | ||
790 | [2] = ASIC3_CLOCK_LED2, | ||
791 | }; | ||
792 | |||
793 | static int asic3_leds_enable(struct platform_device *pdev) | ||
794 | { | ||
795 | const struct mfd_cell *cell = mfd_get_cell(pdev); | ||
796 | struct asic3 *asic = dev_get_drvdata(pdev->dev.parent); | ||
797 | |||
798 | asic3_clk_enable(asic, &asic->clocks[clock_ledn[cell->id]]); | ||
799 | |||
800 | return 0; | ||
801 | } | ||
802 | |||
803 | static int asic3_leds_disable(struct platform_device *pdev) | ||
804 | { | ||
805 | const struct mfd_cell *cell = mfd_get_cell(pdev); | ||
806 | struct asic3 *asic = dev_get_drvdata(pdev->dev.parent); | ||
807 | |||
808 | asic3_clk_disable(asic, &asic->clocks[clock_ledn[cell->id]]); | ||
809 | |||
810 | return 0; | ||
811 | } | ||
812 | |||
813 | static struct mfd_cell asic3_cell_leds[ASIC3_NUM_LEDS] = { | ||
814 | [0] = { | ||
815 | .name = "leds-asic3", | ||
816 | .id = 0, | ||
817 | .enable = asic3_leds_enable, | ||
818 | .disable = asic3_leds_disable, | ||
819 | }, | ||
820 | [1] = { | ||
821 | .name = "leds-asic3", | ||
822 | .id = 1, | ||
823 | .enable = asic3_leds_enable, | ||
824 | .disable = asic3_leds_disable, | ||
825 | }, | ||
826 | [2] = { | ||
827 | .name = "leds-asic3", | ||
828 | .id = 2, | ||
829 | .enable = asic3_leds_enable, | ||
830 | .disable = asic3_leds_disable, | ||
831 | }, | ||
832 | }; | ||
833 | |||
785 | static int __init asic3_mfd_probe(struct platform_device *pdev, | 834 | static int __init asic3_mfd_probe(struct platform_device *pdev, |
835 | struct asic3_platform_data *pdata, | ||
786 | struct resource *mem) | 836 | struct resource *mem) |
787 | { | 837 | { |
788 | struct asic3 *asic = platform_get_drvdata(pdev); | 838 | struct asic3 *asic = platform_get_drvdata(pdev); |
@@ -806,7 +856,8 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, | |||
806 | 856 | ||
807 | /* MMC */ | 857 | /* MMC */ |
808 | asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) + | 858 | asic->tmio_cnf = ioremap((ASIC3_SD_CONFIG_BASE >> asic->bus_shift) + |
809 | mem_sdio->start, 0x400 >> asic->bus_shift); | 859 | mem_sdio->start, |
860 | ASIC3_SD_CONFIG_SIZE >> asic->bus_shift); | ||
810 | if (!asic->tmio_cnf) { | 861 | if (!asic->tmio_cnf) { |
811 | ret = -ENOMEM; | 862 | ret = -ENOMEM; |
812 | dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n"); | 863 | dev_dbg(asic->dev, "Couldn't ioremap SD_CONFIG\n"); |
@@ -820,9 +871,23 @@ static int __init asic3_mfd_probe(struct platform_device *pdev, | |||
820 | if (ret < 0) | 871 | if (ret < 0) |
821 | goto out; | 872 | goto out; |
822 | 873 | ||
823 | if (mem_sdio && (irq >= 0)) | 874 | if (mem_sdio && (irq >= 0)) { |
824 | ret = mfd_add_devices(&pdev->dev, pdev->id, | 875 | ret = mfd_add_devices(&pdev->dev, pdev->id, |
825 | &asic3_cell_mmc, 1, mem_sdio, irq); | 876 | &asic3_cell_mmc, 1, mem_sdio, irq); |
877 | if (ret < 0) | ||
878 | goto out; | ||
879 | } | ||
880 | |||
881 | if (pdata->leds) { | ||
882 | int i; | ||
883 | |||
884 | for (i = 0; i < ASIC3_NUM_LEDS; ++i) { | ||
885 | asic3_cell_leds[i].platform_data = &pdata->leds[i]; | ||
886 | asic3_cell_leds[i].pdata_size = sizeof(pdata->leds[i]); | ||
887 | } | ||
888 | ret = mfd_add_devices(&pdev->dev, 0, | ||
889 | asic3_cell_leds, ASIC3_NUM_LEDS, NULL, 0); | ||
890 | } | ||
826 | 891 | ||
827 | out: | 892 | out: |
828 | return ret; | 893 | return ret; |
@@ -903,7 +968,7 @@ static int __init asic3_probe(struct platform_device *pdev) | |||
903 | */ | 968 | */ |
904 | memcpy(asic->clocks, asic3_clk_init, sizeof(asic3_clk_init)); | 969 | memcpy(asic->clocks, asic3_clk_init, sizeof(asic3_clk_init)); |
905 | 970 | ||
906 | asic3_mfd_probe(pdev, mem); | 971 | asic3_mfd_probe(pdev, pdata, mem); |
907 | 972 | ||
908 | dev_info(asic->dev, "ASIC3 Core driver\n"); | 973 | dev_info(asic->dev, "ASIC3 Core driver\n"); |
909 | 974 | ||
diff --git a/drivers/mfd/davinci_voicecodec.c b/drivers/mfd/davinci_voicecodec.c index 414783b04849..4e2af2cb2d26 100644 --- a/drivers/mfd/davinci_voicecodec.c +++ b/drivers/mfd/davinci_voicecodec.c | |||
@@ -119,12 +119,14 @@ static int __init davinci_vc_probe(struct platform_device *pdev) | |||
119 | /* Voice codec interface client */ | 119 | /* Voice codec interface client */ |
120 | cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; | 120 | cell = &davinci_vc->cells[DAVINCI_VC_VCIF_CELL]; |
121 | cell->name = "davinci-vcif"; | 121 | cell->name = "davinci-vcif"; |
122 | cell->mfd_data = davinci_vc; | 122 | cell->platform_data = davinci_vc; |
123 | cell->pdata_size = sizeof(*davinci_vc); | ||
123 | 124 | ||
124 | /* Voice codec CQ93VC client */ | 125 | /* Voice codec CQ93VC client */ |
125 | cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; | 126 | cell = &davinci_vc->cells[DAVINCI_VC_CQ93VC_CELL]; |
126 | cell->name = "cq93vc-codec"; | 127 | cell->name = "cq93vc-codec"; |
127 | cell->mfd_data = davinci_vc; | 128 | cell->platform_data = davinci_vc; |
129 | cell->pdata_size = sizeof(*davinci_vc); | ||
128 | 130 | ||
129 | ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, | 131 | ret = mfd_add_devices(&pdev->dev, pdev->id, davinci_vc->cells, |
130 | DAVINCI_VC_CELLS, NULL, 0); | 132 | DAVINCI_VC_CELLS, NULL, 0); |
diff --git a/drivers/mfd/htc-pasic3.c b/drivers/mfd/htc-pasic3.c index fb9770b39a32..2808bd125d13 100644 --- a/drivers/mfd/htc-pasic3.c +++ b/drivers/mfd/htc-pasic3.c | |||
@@ -117,7 +117,8 @@ static struct mfd_cell ds1wm_cell __initdata = { | |||
117 | .name = "ds1wm", | 117 | .name = "ds1wm", |
118 | .enable = ds1wm_enable, | 118 | .enable = ds1wm_enable, |
119 | .disable = ds1wm_disable, | 119 | .disable = ds1wm_disable, |
120 | .mfd_data = &ds1wm_pdata, | 120 | .platform_data = &ds1wm_pdata, |
121 | .pdata_size = sizeof(ds1wm_pdata), | ||
121 | .num_resources = 2, | 122 | .num_resources = 2, |
122 | .resources = ds1wm_resources, | 123 | .resources = ds1wm_resources, |
123 | }; | 124 | }; |
@@ -172,6 +173,8 @@ static int __init pasic3_probe(struct platform_device *pdev) | |||
172 | } | 173 | } |
173 | 174 | ||
174 | if (pdata && pdata->led_pdata) { | 175 | if (pdata && pdata->led_pdata) { |
176 | led_cell.platform_data = pdata->led_pdata; | ||
177 | led_cell.pdata_size = sizeof(struct pasic3_leds_machinfo); | ||
175 | ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r, 0); | 178 | ret = mfd_add_devices(&pdev->dev, pdev->id, &led_cell, 1, r, 0); |
176 | if (ret < 0) | 179 | if (ret < 0) |
177 | dev_warn(dev, "failed to register LED device\n"); | 180 | dev_warn(dev, "failed to register LED device\n"); |
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c index fc4191137e90..5c2a06acb77f 100644 --- a/drivers/mfd/janz-cmodio.c +++ b/drivers/mfd/janz-cmodio.c | |||
@@ -86,7 +86,8 @@ static int __devinit cmodio_setup_subdevice(struct cmodio_device *priv, | |||
86 | 86 | ||
87 | /* Add platform data */ | 87 | /* Add platform data */ |
88 | pdata->modno = modno; | 88 | pdata->modno = modno; |
89 | cell->mfd_data = pdata; | 89 | cell->platform_data = pdata; |
90 | cell->pdata_size = sizeof(*pdata); | ||
90 | 91 | ||
91 | /* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */ | 92 | /* MODULbus registers -- PCI BAR3 is big-endian MODULbus access */ |
92 | res->flags = IORESOURCE_MEM; | 93 | res->flags = IORESOURCE_MEM; |
diff --git a/drivers/mfd/max8925-core.c b/drivers/mfd/max8925-core.c index 58cc5fdde016..e1e59c92f758 100644 --- a/drivers/mfd/max8925-core.c +++ b/drivers/mfd/max8925-core.c | |||
@@ -627,7 +627,7 @@ int __devinit max8925_device_init(struct max8925_chip *chip, | |||
627 | goto out_dev; | 627 | goto out_dev; |
628 | } | 628 | } |
629 | 629 | ||
630 | if (pdata && pdata->regulator[0]) { | 630 | if (pdata) { |
631 | ret = mfd_add_devices(chip->dev, 0, ®ulator_devs[0], | 631 | ret = mfd_add_devices(chip->dev, 0, ®ulator_devs[0], |
632 | ARRAY_SIZE(regulator_devs), | 632 | ARRAY_SIZE(regulator_devs), |
633 | ®ulator_resources[0], 0); | 633 | ®ulator_resources[0], 0); |
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c index 668634e89e81..7e4d44bf92ab 100644 --- a/drivers/mfd/mc13xxx-core.c +++ b/drivers/mfd/mc13xxx-core.c | |||
@@ -683,13 +683,14 @@ out: | |||
683 | EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion); | 683 | EXPORT_SYMBOL_GPL(mc13783_adc_do_conversion); |
684 | 684 | ||
685 | static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx, | 685 | static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx, |
686 | const char *format, void *pdata) | 686 | const char *format, void *pdata, size_t pdata_size) |
687 | { | 687 | { |
688 | char buf[30]; | 688 | char buf[30]; |
689 | const char *name = mc13xxx_get_chipname(mc13xxx); | 689 | const char *name = mc13xxx_get_chipname(mc13xxx); |
690 | 690 | ||
691 | struct mfd_cell cell = { | 691 | struct mfd_cell cell = { |
692 | .mfd_data = pdata, | 692 | .platform_data = pdata, |
693 | .pdata_size = pdata_size, | ||
693 | }; | 694 | }; |
694 | 695 | ||
695 | /* there is no asnprintf in the kernel :-( */ | 696 | /* there is no asnprintf in the kernel :-( */ |
@@ -705,7 +706,7 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx, | |||
705 | 706 | ||
706 | static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format) | 707 | static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format) |
707 | { | 708 | { |
708 | return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL); | 709 | return mc13xxx_add_subdevice_pdata(mc13xxx, format, NULL, 0); |
709 | } | 710 | } |
710 | 711 | ||
711 | static int mc13xxx_probe(struct spi_device *spi) | 712 | static int mc13xxx_probe(struct spi_device *spi) |
@@ -764,7 +765,7 @@ err_revision: | |||
764 | 765 | ||
765 | if (pdata->flags & MC13XXX_USE_REGULATOR) { | 766 | if (pdata->flags & MC13XXX_USE_REGULATOR) { |
766 | mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator", | 767 | mc13xxx_add_subdevice_pdata(mc13xxx, "%s-regulator", |
767 | &pdata->regulators); | 768 | &pdata->regulators, sizeof(pdata->regulators)); |
768 | } | 769 | } |
769 | 770 | ||
770 | if (pdata->flags & MC13XXX_USE_RTC) | 771 | if (pdata->flags & MC13XXX_USE_RTC) |
@@ -774,7 +775,8 @@ err_revision: | |||
774 | mc13xxx_add_subdevice(mc13xxx, "%s-ts"); | 775 | mc13xxx_add_subdevice(mc13xxx, "%s-ts"); |
775 | 776 | ||
776 | if (pdata->flags & MC13XXX_USE_LED) | 777 | if (pdata->flags & MC13XXX_USE_LED) |
777 | mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led", pdata->leds); | 778 | mc13xxx_add_subdevice_pdata(mc13xxx, "%s-led", |
779 | pdata->leds, sizeof(*pdata->leds)); | ||
778 | 780 | ||
779 | return 0; | 781 | return 0; |
780 | } | 782 | } |
diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c index f4c8c844b913..0902523af62d 100644 --- a/drivers/mfd/mfd-core.c +++ b/drivers/mfd/mfd-core.c | |||
@@ -88,6 +88,13 @@ static int mfd_add_device(struct device *parent, int id, | |||
88 | 88 | ||
89 | pdev->dev.parent = parent; | 89 | pdev->dev.parent = parent; |
90 | 90 | ||
91 | if (cell->pdata_size) { | ||
92 | ret = platform_device_add_data(pdev, | ||
93 | cell->platform_data, cell->pdata_size); | ||
94 | if (ret) | ||
95 | goto fail_res; | ||
96 | } | ||
97 | |||
91 | ret = mfd_platform_add_cell(pdev, cell); | 98 | ret = mfd_platform_add_cell(pdev, cell); |
92 | if (ret) | 99 | if (ret) |
93 | goto fail_res; | 100 | goto fail_res; |
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c index 3ab9ffa00aad..855219526ccb 100644 --- a/drivers/mfd/omap-usb-host.c +++ b/drivers/mfd/omap-usb-host.c | |||
@@ -26,6 +26,7 @@ | |||
26 | #include <linux/spinlock.h> | 26 | #include <linux/spinlock.h> |
27 | #include <linux/gpio.h> | 27 | #include <linux/gpio.h> |
28 | #include <plat/usb.h> | 28 | #include <plat/usb.h> |
29 | #include <linux/pm_runtime.h> | ||
29 | 30 | ||
30 | #define USBHS_DRIVER_NAME "usbhs-omap" | 31 | #define USBHS_DRIVER_NAME "usbhs-omap" |
31 | #define OMAP_EHCI_DEVICE "ehci-omap" | 32 | #define OMAP_EHCI_DEVICE "ehci-omap" |
@@ -146,9 +147,6 @@ | |||
146 | 147 | ||
147 | 148 | ||
148 | struct usbhs_hcd_omap { | 149 | struct usbhs_hcd_omap { |
149 | struct clk *usbhost_ick; | ||
150 | struct clk *usbhost_hs_fck; | ||
151 | struct clk *usbhost_fs_fck; | ||
152 | struct clk *xclk60mhsp1_ck; | 150 | struct clk *xclk60mhsp1_ck; |
153 | struct clk *xclk60mhsp2_ck; | 151 | struct clk *xclk60mhsp2_ck; |
154 | struct clk *utmi_p1_fck; | 152 | struct clk *utmi_p1_fck; |
@@ -158,8 +156,6 @@ struct usbhs_hcd_omap { | |||
158 | struct clk *usbhost_p2_fck; | 156 | struct clk *usbhost_p2_fck; |
159 | struct clk *usbtll_p2_fck; | 157 | struct clk *usbtll_p2_fck; |
160 | struct clk *init_60m_fclk; | 158 | struct clk *init_60m_fclk; |
161 | struct clk *usbtll_fck; | ||
162 | struct clk *usbtll_ick; | ||
163 | 159 | ||
164 | void __iomem *uhh_base; | 160 | void __iomem *uhh_base; |
165 | void __iomem *tll_base; | 161 | void __iomem *tll_base; |
@@ -281,6 +277,7 @@ static int omap_usbhs_alloc_children(struct platform_device *pdev) | |||
281 | 277 | ||
282 | if (!ehci) { | 278 | if (!ehci) { |
283 | dev_err(dev, "omap_usbhs_alloc_child failed\n"); | 279 | dev_err(dev, "omap_usbhs_alloc_child failed\n"); |
280 | ret = -ENOMEM; | ||
284 | goto err_end; | 281 | goto err_end; |
285 | } | 282 | } |
286 | 283 | ||
@@ -304,13 +301,14 @@ static int omap_usbhs_alloc_children(struct platform_device *pdev) | |||
304 | sizeof(*ohci_data), dev); | 301 | sizeof(*ohci_data), dev); |
305 | if (!ohci) { | 302 | if (!ohci) { |
306 | dev_err(dev, "omap_usbhs_alloc_child failed\n"); | 303 | dev_err(dev, "omap_usbhs_alloc_child failed\n"); |
304 | ret = -ENOMEM; | ||
307 | goto err_ehci; | 305 | goto err_ehci; |
308 | } | 306 | } |
309 | 307 | ||
310 | return 0; | 308 | return 0; |
311 | 309 | ||
312 | err_ehci: | 310 | err_ehci: |
313 | platform_device_put(ehci); | 311 | platform_device_unregister(ehci); |
314 | 312 | ||
315 | err_end: | 313 | err_end: |
316 | return ret; | 314 | return ret; |
@@ -351,46 +349,13 @@ static int __devinit usbhs_omap_probe(struct platform_device *pdev) | |||
351 | omap->platdata.ehci_data = pdata->ehci_data; | 349 | omap->platdata.ehci_data = pdata->ehci_data; |
352 | omap->platdata.ohci_data = pdata->ohci_data; | 350 | omap->platdata.ohci_data = pdata->ohci_data; |
353 | 351 | ||
354 | omap->usbhost_ick = clk_get(dev, "usbhost_ick"); | 352 | pm_runtime_enable(&pdev->dev); |
355 | if (IS_ERR(omap->usbhost_ick)) { | ||
356 | ret = PTR_ERR(omap->usbhost_ick); | ||
357 | dev_err(dev, "usbhost_ick failed error:%d\n", ret); | ||
358 | goto err_end; | ||
359 | } | ||
360 | |||
361 | omap->usbhost_hs_fck = clk_get(dev, "hs_fck"); | ||
362 | if (IS_ERR(omap->usbhost_hs_fck)) { | ||
363 | ret = PTR_ERR(omap->usbhost_hs_fck); | ||
364 | dev_err(dev, "usbhost_hs_fck failed error:%d\n", ret); | ||
365 | goto err_usbhost_ick; | ||
366 | } | ||
367 | |||
368 | omap->usbhost_fs_fck = clk_get(dev, "fs_fck"); | ||
369 | if (IS_ERR(omap->usbhost_fs_fck)) { | ||
370 | ret = PTR_ERR(omap->usbhost_fs_fck); | ||
371 | dev_err(dev, "usbhost_fs_fck failed error:%d\n", ret); | ||
372 | goto err_usbhost_hs_fck; | ||
373 | } | ||
374 | |||
375 | omap->usbtll_fck = clk_get(dev, "usbtll_fck"); | ||
376 | if (IS_ERR(omap->usbtll_fck)) { | ||
377 | ret = PTR_ERR(omap->usbtll_fck); | ||
378 | dev_err(dev, "usbtll_fck failed error:%d\n", ret); | ||
379 | goto err_usbhost_fs_fck; | ||
380 | } | ||
381 | |||
382 | omap->usbtll_ick = clk_get(dev, "usbtll_ick"); | ||
383 | if (IS_ERR(omap->usbtll_ick)) { | ||
384 | ret = PTR_ERR(omap->usbtll_ick); | ||
385 | dev_err(dev, "usbtll_ick failed error:%d\n", ret); | ||
386 | goto err_usbtll_fck; | ||
387 | } | ||
388 | 353 | ||
389 | omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk"); | 354 | omap->utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk"); |
390 | if (IS_ERR(omap->utmi_p1_fck)) { | 355 | if (IS_ERR(omap->utmi_p1_fck)) { |
391 | ret = PTR_ERR(omap->utmi_p1_fck); | 356 | ret = PTR_ERR(omap->utmi_p1_fck); |
392 | dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret); | 357 | dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret); |
393 | goto err_usbtll_ick; | 358 | goto err_end; |
394 | } | 359 | } |
395 | 360 | ||
396 | omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck"); | 361 | omap->xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck"); |
@@ -520,22 +485,8 @@ err_xclk60mhsp1_ck: | |||
520 | err_utmi_p1_fck: | 485 | err_utmi_p1_fck: |
521 | clk_put(omap->utmi_p1_fck); | 486 | clk_put(omap->utmi_p1_fck); |
522 | 487 | ||
523 | err_usbtll_ick: | ||
524 | clk_put(omap->usbtll_ick); | ||
525 | |||
526 | err_usbtll_fck: | ||
527 | clk_put(omap->usbtll_fck); | ||
528 | |||
529 | err_usbhost_fs_fck: | ||
530 | clk_put(omap->usbhost_fs_fck); | ||
531 | |||
532 | err_usbhost_hs_fck: | ||
533 | clk_put(omap->usbhost_hs_fck); | ||
534 | |||
535 | err_usbhost_ick: | ||
536 | clk_put(omap->usbhost_ick); | ||
537 | |||
538 | err_end: | 488 | err_end: |
489 | pm_runtime_disable(&pdev->dev); | ||
539 | kfree(omap); | 490 | kfree(omap); |
540 | 491 | ||
541 | end_probe: | 492 | end_probe: |
@@ -569,11 +520,7 @@ static int __devexit usbhs_omap_remove(struct platform_device *pdev) | |||
569 | clk_put(omap->utmi_p2_fck); | 520 | clk_put(omap->utmi_p2_fck); |
570 | clk_put(omap->xclk60mhsp1_ck); | 521 | clk_put(omap->xclk60mhsp1_ck); |
571 | clk_put(omap->utmi_p1_fck); | 522 | clk_put(omap->utmi_p1_fck); |
572 | clk_put(omap->usbtll_ick); | 523 | pm_runtime_disable(&pdev->dev); |
573 | clk_put(omap->usbtll_fck); | ||
574 | clk_put(omap->usbhost_fs_fck); | ||
575 | clk_put(omap->usbhost_hs_fck); | ||
576 | clk_put(omap->usbhost_ick); | ||
577 | kfree(omap); | 524 | kfree(omap); |
578 | 525 | ||
579 | return 0; | 526 | return 0; |
@@ -693,7 +640,6 @@ static int usbhs_enable(struct device *dev) | |||
693 | struct usbhs_omap_platform_data *pdata = &omap->platdata; | 640 | struct usbhs_omap_platform_data *pdata = &omap->platdata; |
694 | unsigned long flags = 0; | 641 | unsigned long flags = 0; |
695 | int ret = 0; | 642 | int ret = 0; |
696 | unsigned long timeout; | ||
697 | unsigned reg; | 643 | unsigned reg; |
698 | 644 | ||
699 | dev_dbg(dev, "starting TI HSUSB Controller\n"); | 645 | dev_dbg(dev, "starting TI HSUSB Controller\n"); |
@@ -706,11 +652,7 @@ static int usbhs_enable(struct device *dev) | |||
706 | if (omap->count > 0) | 652 | if (omap->count > 0) |
707 | goto end_count; | 653 | goto end_count; |
708 | 654 | ||
709 | clk_enable(omap->usbhost_ick); | 655 | pm_runtime_get_sync(dev); |
710 | clk_enable(omap->usbhost_hs_fck); | ||
711 | clk_enable(omap->usbhost_fs_fck); | ||
712 | clk_enable(omap->usbtll_fck); | ||
713 | clk_enable(omap->usbtll_ick); | ||
714 | 656 | ||
715 | if (pdata->ehci_data->phy_reset) { | 657 | if (pdata->ehci_data->phy_reset) { |
716 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) { | 658 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) { |
@@ -734,50 +676,6 @@ static int usbhs_enable(struct device *dev) | |||
734 | omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION); | 676 | omap->usbhs_rev = usbhs_read(omap->uhh_base, OMAP_UHH_REVISION); |
735 | dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev); | 677 | dev_dbg(dev, "OMAP UHH_REVISION 0x%x\n", omap->usbhs_rev); |
736 | 678 | ||
737 | /* perform TLL soft reset, and wait until reset is complete */ | ||
738 | usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG, | ||
739 | OMAP_USBTLL_SYSCONFIG_SOFTRESET); | ||
740 | |||
741 | /* Wait for TLL reset to complete */ | ||
742 | timeout = jiffies + msecs_to_jiffies(1000); | ||
743 | while (!(usbhs_read(omap->tll_base, OMAP_USBTLL_SYSSTATUS) | ||
744 | & OMAP_USBTLL_SYSSTATUS_RESETDONE)) { | ||
745 | cpu_relax(); | ||
746 | |||
747 | if (time_after(jiffies, timeout)) { | ||
748 | dev_dbg(dev, "operation timed out\n"); | ||
749 | ret = -EINVAL; | ||
750 | goto err_tll; | ||
751 | } | ||
752 | } | ||
753 | |||
754 | dev_dbg(dev, "TLL RESET DONE\n"); | ||
755 | |||
756 | /* (1<<3) = no idle mode only for initial debugging */ | ||
757 | usbhs_write(omap->tll_base, OMAP_USBTLL_SYSCONFIG, | ||
758 | OMAP_USBTLL_SYSCONFIG_ENAWAKEUP | | ||
759 | OMAP_USBTLL_SYSCONFIG_SIDLEMODE | | ||
760 | OMAP_USBTLL_SYSCONFIG_AUTOIDLE); | ||
761 | |||
762 | /* Put UHH in NoIdle/NoStandby mode */ | ||
763 | reg = usbhs_read(omap->uhh_base, OMAP_UHH_SYSCONFIG); | ||
764 | if (is_omap_usbhs_rev1(omap)) { | ||
765 | reg |= (OMAP_UHH_SYSCONFIG_ENAWAKEUP | ||
766 | | OMAP_UHH_SYSCONFIG_SIDLEMODE | ||
767 | | OMAP_UHH_SYSCONFIG_CACTIVITY | ||
768 | | OMAP_UHH_SYSCONFIG_MIDLEMODE); | ||
769 | reg &= ~OMAP_UHH_SYSCONFIG_AUTOIDLE; | ||
770 | |||
771 | |||
772 | } else if (is_omap_usbhs_rev2(omap)) { | ||
773 | reg &= ~OMAP4_UHH_SYSCONFIG_IDLEMODE_CLEAR; | ||
774 | reg |= OMAP4_UHH_SYSCONFIG_NOIDLE; | ||
775 | reg &= ~OMAP4_UHH_SYSCONFIG_STDBYMODE_CLEAR; | ||
776 | reg |= OMAP4_UHH_SYSCONFIG_NOSTDBY; | ||
777 | } | ||
778 | |||
779 | usbhs_write(omap->uhh_base, OMAP_UHH_SYSCONFIG, reg); | ||
780 | |||
781 | reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG); | 679 | reg = usbhs_read(omap->uhh_base, OMAP_UHH_HOSTCONFIG); |
782 | /* setup ULPI bypass and burst configurations */ | 680 | /* setup ULPI bypass and burst configurations */ |
783 | reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN | 681 | reg |= (OMAP_UHH_HOSTCONFIG_INCR4_BURST_EN |
@@ -917,6 +815,8 @@ end_count: | |||
917 | return 0; | 815 | return 0; |
918 | 816 | ||
919 | err_tll: | 817 | err_tll: |
818 | pm_runtime_put_sync(dev); | ||
819 | spin_unlock_irqrestore(&omap->lock, flags); | ||
920 | if (pdata->ehci_data->phy_reset) { | 820 | if (pdata->ehci_data->phy_reset) { |
921 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) | 821 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) |
922 | gpio_free(pdata->ehci_data->reset_gpio_port[0]); | 822 | gpio_free(pdata->ehci_data->reset_gpio_port[0]); |
@@ -924,13 +824,6 @@ err_tll: | |||
924 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) | 824 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) |
925 | gpio_free(pdata->ehci_data->reset_gpio_port[1]); | 825 | gpio_free(pdata->ehci_data->reset_gpio_port[1]); |
926 | } | 826 | } |
927 | |||
928 | clk_disable(omap->usbtll_ick); | ||
929 | clk_disable(omap->usbtll_fck); | ||
930 | clk_disable(omap->usbhost_fs_fck); | ||
931 | clk_disable(omap->usbhost_hs_fck); | ||
932 | clk_disable(omap->usbhost_ick); | ||
933 | spin_unlock_irqrestore(&omap->lock, flags); | ||
934 | return ret; | 827 | return ret; |
935 | } | 828 | } |
936 | 829 | ||
@@ -994,6 +887,20 @@ static void usbhs_disable(struct device *dev) | |||
994 | dev_dbg(dev, "operation timed out\n"); | 887 | dev_dbg(dev, "operation timed out\n"); |
995 | } | 888 | } |
996 | 889 | ||
890 | if (is_omap_usbhs_rev2(omap)) { | ||
891 | if (is_ehci_tll_mode(pdata->port_mode[0])) | ||
892 | clk_enable(omap->usbtll_p1_fck); | ||
893 | if (is_ehci_tll_mode(pdata->port_mode[1])) | ||
894 | clk_enable(omap->usbtll_p2_fck); | ||
895 | clk_disable(omap->utmi_p2_fck); | ||
896 | clk_disable(omap->utmi_p1_fck); | ||
897 | } | ||
898 | |||
899 | pm_runtime_put_sync(dev); | ||
900 | |||
901 | /* The gpio_free migh sleep; so unlock the spinlock */ | ||
902 | spin_unlock_irqrestore(&omap->lock, flags); | ||
903 | |||
997 | if (pdata->ehci_data->phy_reset) { | 904 | if (pdata->ehci_data->phy_reset) { |
998 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) | 905 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[0])) |
999 | gpio_free(pdata->ehci_data->reset_gpio_port[0]); | 906 | gpio_free(pdata->ehci_data->reset_gpio_port[0]); |
@@ -1001,14 +908,7 @@ static void usbhs_disable(struct device *dev) | |||
1001 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) | 908 | if (gpio_is_valid(pdata->ehci_data->reset_gpio_port[1])) |
1002 | gpio_free(pdata->ehci_data->reset_gpio_port[1]); | 909 | gpio_free(pdata->ehci_data->reset_gpio_port[1]); |
1003 | } | 910 | } |
1004 | 911 | return; | |
1005 | clk_disable(omap->utmi_p2_fck); | ||
1006 | clk_disable(omap->utmi_p1_fck); | ||
1007 | clk_disable(omap->usbtll_ick); | ||
1008 | clk_disable(omap->usbtll_fck); | ||
1009 | clk_disable(omap->usbhost_fs_fck); | ||
1010 | clk_disable(omap->usbhost_hs_fck); | ||
1011 | clk_disable(omap->usbhost_ick); | ||
1012 | 912 | ||
1013 | end_disble: | 913 | end_disble: |
1014 | spin_unlock_irqrestore(&omap->lock, flags); | 914 | spin_unlock_irqrestore(&omap->lock, flags); |
diff --git a/drivers/mfd/pm8921-core.c b/drivers/mfd/pm8921-core.c new file mode 100644 index 000000000000..e873b15753d8 --- /dev/null +++ b/drivers/mfd/pm8921-core.c | |||
@@ -0,0 +1,212 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011, Code Aurora Forum. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/platform_device.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/err.h> | ||
20 | #include <linux/msm_ssbi.h> | ||
21 | #include <linux/mfd/core.h> | ||
22 | #include <linux/mfd/pm8xxx/pm8921.h> | ||
23 | #include <linux/mfd/pm8xxx/core.h> | ||
24 | |||
25 | #define REG_HWREV 0x002 /* PMIC4 revision */ | ||
26 | #define REG_HWREV_2 0x0E8 /* PMIC4 revision 2 */ | ||
27 | |||
28 | struct pm8921 { | ||
29 | struct device *dev; | ||
30 | struct pm_irq_chip *irq_chip; | ||
31 | }; | ||
32 | |||
33 | static int pm8921_readb(const struct device *dev, u16 addr, u8 *val) | ||
34 | { | ||
35 | const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); | ||
36 | const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; | ||
37 | |||
38 | return msm_ssbi_read(pmic->dev->parent, addr, val, 1); | ||
39 | } | ||
40 | |||
41 | static int pm8921_writeb(const struct device *dev, u16 addr, u8 val) | ||
42 | { | ||
43 | const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); | ||
44 | const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; | ||
45 | |||
46 | return msm_ssbi_write(pmic->dev->parent, addr, &val, 1); | ||
47 | } | ||
48 | |||
49 | static int pm8921_read_buf(const struct device *dev, u16 addr, u8 *buf, | ||
50 | int cnt) | ||
51 | { | ||
52 | const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); | ||
53 | const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; | ||
54 | |||
55 | return msm_ssbi_read(pmic->dev->parent, addr, buf, cnt); | ||
56 | } | ||
57 | |||
58 | static int pm8921_write_buf(const struct device *dev, u16 addr, u8 *buf, | ||
59 | int cnt) | ||
60 | { | ||
61 | const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); | ||
62 | const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; | ||
63 | |||
64 | return msm_ssbi_write(pmic->dev->parent, addr, buf, cnt); | ||
65 | } | ||
66 | |||
67 | static int pm8921_read_irq_stat(const struct device *dev, int irq) | ||
68 | { | ||
69 | const struct pm8xxx_drvdata *pm8921_drvdata = dev_get_drvdata(dev); | ||
70 | const struct pm8921 *pmic = pm8921_drvdata->pm_chip_data; | ||
71 | |||
72 | return pm8xxx_get_irq_stat(pmic->irq_chip, irq); | ||
73 | } | ||
74 | |||
75 | static struct pm8xxx_drvdata pm8921_drvdata = { | ||
76 | .pmic_readb = pm8921_readb, | ||
77 | .pmic_writeb = pm8921_writeb, | ||
78 | .pmic_read_buf = pm8921_read_buf, | ||
79 | .pmic_write_buf = pm8921_write_buf, | ||
80 | .pmic_read_irq_stat = pm8921_read_irq_stat, | ||
81 | }; | ||
82 | |||
83 | static int __devinit pm8921_add_subdevices(const struct pm8921_platform_data | ||
84 | *pdata, | ||
85 | struct pm8921 *pmic, | ||
86 | u32 rev) | ||
87 | { | ||
88 | int ret = 0, irq_base = 0; | ||
89 | struct pm_irq_chip *irq_chip; | ||
90 | |||
91 | if (pdata->irq_pdata) { | ||
92 | pdata->irq_pdata->irq_cdata.nirqs = PM8921_NR_IRQS; | ||
93 | pdata->irq_pdata->irq_cdata.rev = rev; | ||
94 | irq_base = pdata->irq_pdata->irq_base; | ||
95 | irq_chip = pm8xxx_irq_init(pmic->dev, pdata->irq_pdata); | ||
96 | |||
97 | if (IS_ERR(irq_chip)) { | ||
98 | pr_err("Failed to init interrupts ret=%ld\n", | ||
99 | PTR_ERR(irq_chip)); | ||
100 | return PTR_ERR(irq_chip); | ||
101 | } | ||
102 | pmic->irq_chip = irq_chip; | ||
103 | } | ||
104 | return ret; | ||
105 | } | ||
106 | |||
107 | static int __devinit pm8921_probe(struct platform_device *pdev) | ||
108 | { | ||
109 | const struct pm8921_platform_data *pdata = pdev->dev.platform_data; | ||
110 | struct pm8921 *pmic; | ||
111 | int rc; | ||
112 | u8 val; | ||
113 | u32 rev; | ||
114 | |||
115 | if (!pdata) { | ||
116 | pr_err("missing platform data\n"); | ||
117 | return -EINVAL; | ||
118 | } | ||
119 | |||
120 | pmic = kzalloc(sizeof(struct pm8921), GFP_KERNEL); | ||
121 | if (!pmic) { | ||
122 | pr_err("Cannot alloc pm8921 struct\n"); | ||
123 | return -ENOMEM; | ||
124 | } | ||
125 | |||
126 | /* Read PMIC chip revision */ | ||
127 | rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV, &val, sizeof(val)); | ||
128 | if (rc) { | ||
129 | pr_err("Failed to read hw rev reg %d:rc=%d\n", REG_HWREV, rc); | ||
130 | goto err_read_rev; | ||
131 | } | ||
132 | pr_info("PMIC revision 1: %02X\n", val); | ||
133 | rev = val; | ||
134 | |||
135 | /* Read PMIC chip revision 2 */ | ||
136 | rc = msm_ssbi_read(pdev->dev.parent, REG_HWREV_2, &val, sizeof(val)); | ||
137 | if (rc) { | ||
138 | pr_err("Failed to read hw rev 2 reg %d:rc=%d\n", | ||
139 | REG_HWREV_2, rc); | ||
140 | goto err_read_rev; | ||
141 | } | ||
142 | pr_info("PMIC revision 2: %02X\n", val); | ||
143 | rev |= val << BITS_PER_BYTE; | ||
144 | |||
145 | pmic->dev = &pdev->dev; | ||
146 | pm8921_drvdata.pm_chip_data = pmic; | ||
147 | platform_set_drvdata(pdev, &pm8921_drvdata); | ||
148 | |||
149 | rc = pm8921_add_subdevices(pdata, pmic, rev); | ||
150 | if (rc) { | ||
151 | pr_err("Cannot add subdevices rc=%d\n", rc); | ||
152 | goto err; | ||
153 | } | ||
154 | |||
155 | /* gpio might not work if no irq device is found */ | ||
156 | WARN_ON(pmic->irq_chip == NULL); | ||
157 | |||
158 | return 0; | ||
159 | |||
160 | err: | ||
161 | mfd_remove_devices(pmic->dev); | ||
162 | platform_set_drvdata(pdev, NULL); | ||
163 | err_read_rev: | ||
164 | kfree(pmic); | ||
165 | return rc; | ||
166 | } | ||
167 | |||
168 | static int __devexit pm8921_remove(struct platform_device *pdev) | ||
169 | { | ||
170 | struct pm8xxx_drvdata *drvdata; | ||
171 | struct pm8921 *pmic = NULL; | ||
172 | |||
173 | drvdata = platform_get_drvdata(pdev); | ||
174 | if (drvdata) | ||
175 | pmic = drvdata->pm_chip_data; | ||
176 | if (pmic) | ||
177 | mfd_remove_devices(pmic->dev); | ||
178 | if (pmic->irq_chip) { | ||
179 | pm8xxx_irq_exit(pmic->irq_chip); | ||
180 | pmic->irq_chip = NULL; | ||
181 | } | ||
182 | platform_set_drvdata(pdev, NULL); | ||
183 | kfree(pmic); | ||
184 | |||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static struct platform_driver pm8921_driver = { | ||
189 | .probe = pm8921_probe, | ||
190 | .remove = __devexit_p(pm8921_remove), | ||
191 | .driver = { | ||
192 | .name = "pm8921-core", | ||
193 | .owner = THIS_MODULE, | ||
194 | }, | ||
195 | }; | ||
196 | |||
197 | static int __init pm8921_init(void) | ||
198 | { | ||
199 | return platform_driver_register(&pm8921_driver); | ||
200 | } | ||
201 | subsys_initcall(pm8921_init); | ||
202 | |||
203 | static void __exit pm8921_exit(void) | ||
204 | { | ||
205 | platform_driver_unregister(&pm8921_driver); | ||
206 | } | ||
207 | module_exit(pm8921_exit); | ||
208 | |||
209 | MODULE_LICENSE("GPL v2"); | ||
210 | MODULE_DESCRIPTION("PMIC 8921 core driver"); | ||
211 | MODULE_VERSION("1.0"); | ||
212 | MODULE_ALIAS("platform:pm8921-core"); | ||
diff --git a/drivers/mfd/pm8xxx-irq.c b/drivers/mfd/pm8xxx-irq.c new file mode 100644 index 000000000000..d452dd013081 --- /dev/null +++ b/drivers/mfd/pm8xxx-irq.c | |||
@@ -0,0 +1,371 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011, Code Aurora Forum. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | |||
14 | #define pr_fmt(fmt) "%s: " fmt, __func__ | ||
15 | |||
16 | #include <linux/err.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/irq.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/mfd/pm8xxx/core.h> | ||
21 | #include <linux/mfd/pm8xxx/irq.h> | ||
22 | #include <linux/platform_device.h> | ||
23 | #include <linux/slab.h> | ||
24 | |||
25 | /* PMIC8xxx IRQ */ | ||
26 | |||
27 | #define SSBI_REG_ADDR_IRQ_BASE 0x1BB | ||
28 | |||
29 | #define SSBI_REG_ADDR_IRQ_ROOT (SSBI_REG_ADDR_IRQ_BASE + 0) | ||
30 | #define SSBI_REG_ADDR_IRQ_M_STATUS1 (SSBI_REG_ADDR_IRQ_BASE + 1) | ||
31 | #define SSBI_REG_ADDR_IRQ_M_STATUS2 (SSBI_REG_ADDR_IRQ_BASE + 2) | ||
32 | #define SSBI_REG_ADDR_IRQ_M_STATUS3 (SSBI_REG_ADDR_IRQ_BASE + 3) | ||
33 | #define SSBI_REG_ADDR_IRQ_M_STATUS4 (SSBI_REG_ADDR_IRQ_BASE + 4) | ||
34 | #define SSBI_REG_ADDR_IRQ_BLK_SEL (SSBI_REG_ADDR_IRQ_BASE + 5) | ||
35 | #define SSBI_REG_ADDR_IRQ_IT_STATUS (SSBI_REG_ADDR_IRQ_BASE + 6) | ||
36 | #define SSBI_REG_ADDR_IRQ_CONFIG (SSBI_REG_ADDR_IRQ_BASE + 7) | ||
37 | #define SSBI_REG_ADDR_IRQ_RT_STATUS (SSBI_REG_ADDR_IRQ_BASE + 8) | ||
38 | |||
39 | #define PM_IRQF_LVL_SEL 0x01 /* level select */ | ||
40 | #define PM_IRQF_MASK_FE 0x02 /* mask falling edge */ | ||
41 | #define PM_IRQF_MASK_RE 0x04 /* mask rising edge */ | ||
42 | #define PM_IRQF_CLR 0x08 /* clear interrupt */ | ||
43 | #define PM_IRQF_BITS_MASK 0x70 | ||
44 | #define PM_IRQF_BITS_SHIFT 4 | ||
45 | #define PM_IRQF_WRITE 0x80 | ||
46 | |||
47 | #define PM_IRQF_MASK_ALL (PM_IRQF_MASK_FE | \ | ||
48 | PM_IRQF_MASK_RE) | ||
49 | |||
50 | struct pm_irq_chip { | ||
51 | struct device *dev; | ||
52 | spinlock_t pm_irq_lock; | ||
53 | unsigned int devirq; | ||
54 | unsigned int irq_base; | ||
55 | unsigned int num_irqs; | ||
56 | unsigned int num_blocks; | ||
57 | unsigned int num_masters; | ||
58 | u8 config[0]; | ||
59 | }; | ||
60 | |||
61 | static int pm8xxx_read_root_irq(const struct pm_irq_chip *chip, u8 *rp) | ||
62 | { | ||
63 | return pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_ROOT, rp); | ||
64 | } | ||
65 | |||
66 | static int pm8xxx_read_master_irq(const struct pm_irq_chip *chip, u8 m, u8 *bp) | ||
67 | { | ||
68 | return pm8xxx_readb(chip->dev, | ||
69 | SSBI_REG_ADDR_IRQ_M_STATUS1 + m, bp); | ||
70 | } | ||
71 | |||
72 | static int pm8xxx_read_block_irq(struct pm_irq_chip *chip, u8 bp, u8 *ip) | ||
73 | { | ||
74 | int rc; | ||
75 | |||
76 | spin_lock(&chip->pm_irq_lock); | ||
77 | rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, bp); | ||
78 | if (rc) { | ||
79 | pr_err("Failed Selecting Block %d rc=%d\n", bp, rc); | ||
80 | goto bail; | ||
81 | } | ||
82 | |||
83 | rc = pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_IT_STATUS, ip); | ||
84 | if (rc) | ||
85 | pr_err("Failed Reading Status rc=%d\n", rc); | ||
86 | bail: | ||
87 | spin_unlock(&chip->pm_irq_lock); | ||
88 | return rc; | ||
89 | } | ||
90 | |||
91 | static int pm8xxx_config_irq(struct pm_irq_chip *chip, u8 bp, u8 cp) | ||
92 | { | ||
93 | int rc; | ||
94 | |||
95 | spin_lock(&chip->pm_irq_lock); | ||
96 | rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, bp); | ||
97 | if (rc) { | ||
98 | pr_err("Failed Selecting Block %d rc=%d\n", bp, rc); | ||
99 | goto bail; | ||
100 | } | ||
101 | |||
102 | cp |= PM_IRQF_WRITE; | ||
103 | rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_CONFIG, cp); | ||
104 | if (rc) | ||
105 | pr_err("Failed Configuring IRQ rc=%d\n", rc); | ||
106 | bail: | ||
107 | spin_unlock(&chip->pm_irq_lock); | ||
108 | return rc; | ||
109 | } | ||
110 | |||
111 | static int pm8xxx_irq_block_handler(struct pm_irq_chip *chip, int block) | ||
112 | { | ||
113 | int pmirq, irq, i, ret = 0; | ||
114 | u8 bits; | ||
115 | |||
116 | ret = pm8xxx_read_block_irq(chip, block, &bits); | ||
117 | if (ret) { | ||
118 | pr_err("Failed reading %d block ret=%d", block, ret); | ||
119 | return ret; | ||
120 | } | ||
121 | if (!bits) { | ||
122 | pr_err("block bit set in master but no irqs: %d", block); | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | /* Check IRQ bits */ | ||
127 | for (i = 0; i < 8; i++) { | ||
128 | if (bits & (1 << i)) { | ||
129 | pmirq = block * 8 + i; | ||
130 | irq = pmirq + chip->irq_base; | ||
131 | generic_handle_irq(irq); | ||
132 | } | ||
133 | } | ||
134 | return 0; | ||
135 | } | ||
136 | |||
137 | static int pm8xxx_irq_master_handler(struct pm_irq_chip *chip, int master) | ||
138 | { | ||
139 | u8 blockbits; | ||
140 | int block_number, i, ret = 0; | ||
141 | |||
142 | ret = pm8xxx_read_master_irq(chip, master, &blockbits); | ||
143 | if (ret) { | ||
144 | pr_err("Failed to read master %d ret=%d\n", master, ret); | ||
145 | return ret; | ||
146 | } | ||
147 | if (!blockbits) { | ||
148 | pr_err("master bit set in root but no blocks: %d", master); | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | for (i = 0; i < 8; i++) | ||
153 | if (blockbits & (1 << i)) { | ||
154 | block_number = master * 8 + i; /* block # */ | ||
155 | ret |= pm8xxx_irq_block_handler(chip, block_number); | ||
156 | } | ||
157 | return ret; | ||
158 | } | ||
159 | |||
160 | static void pm8xxx_irq_handler(unsigned int irq, struct irq_desc *desc) | ||
161 | { | ||
162 | struct pm_irq_chip *chip = irq_desc_get_handler_data(desc); | ||
163 | struct irq_chip *irq_chip = irq_desc_get_chip(desc); | ||
164 | u8 root; | ||
165 | int i, ret, masters = 0; | ||
166 | |||
167 | ret = pm8xxx_read_root_irq(chip, &root); | ||
168 | if (ret) { | ||
169 | pr_err("Can't read root status ret=%d\n", ret); | ||
170 | return; | ||
171 | } | ||
172 | |||
173 | /* on pm8xxx series masters start from bit 1 of the root */ | ||
174 | masters = root >> 1; | ||
175 | |||
176 | /* Read allowed masters for blocks. */ | ||
177 | for (i = 0; i < chip->num_masters; i++) | ||
178 | if (masters & (1 << i)) | ||
179 | pm8xxx_irq_master_handler(chip, i); | ||
180 | |||
181 | irq_chip->irq_ack(&desc->irq_data); | ||
182 | } | ||
183 | |||
184 | static void pm8xxx_irq_mask_ack(struct irq_data *d) | ||
185 | { | ||
186 | struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d); | ||
187 | unsigned int pmirq = d->irq - chip->irq_base; | ||
188 | int master, irq_bit; | ||
189 | u8 block, config; | ||
190 | |||
191 | block = pmirq / 8; | ||
192 | master = block / 8; | ||
193 | irq_bit = pmirq % 8; | ||
194 | |||
195 | config = chip->config[pmirq] | PM_IRQF_MASK_ALL | PM_IRQF_CLR; | ||
196 | pm8xxx_config_irq(chip, block, config); | ||
197 | } | ||
198 | |||
199 | static void pm8xxx_irq_unmask(struct irq_data *d) | ||
200 | { | ||
201 | struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d); | ||
202 | unsigned int pmirq = d->irq - chip->irq_base; | ||
203 | int master, irq_bit; | ||
204 | u8 block, config; | ||
205 | |||
206 | block = pmirq / 8; | ||
207 | master = block / 8; | ||
208 | irq_bit = pmirq % 8; | ||
209 | |||
210 | config = chip->config[pmirq]; | ||
211 | pm8xxx_config_irq(chip, block, config); | ||
212 | } | ||
213 | |||
214 | static int pm8xxx_irq_set_type(struct irq_data *d, unsigned int flow_type) | ||
215 | { | ||
216 | struct pm_irq_chip *chip = irq_data_get_irq_chip_data(d); | ||
217 | unsigned int pmirq = d->irq - chip->irq_base; | ||
218 | int master, irq_bit; | ||
219 | u8 block, config; | ||
220 | |||
221 | block = pmirq / 8; | ||
222 | master = block / 8; | ||
223 | irq_bit = pmirq % 8; | ||
224 | |||
225 | chip->config[pmirq] = (irq_bit << PM_IRQF_BITS_SHIFT) | ||
226 | | PM_IRQF_MASK_ALL; | ||
227 | if (flow_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { | ||
228 | if (flow_type & IRQF_TRIGGER_RISING) | ||
229 | chip->config[pmirq] &= ~PM_IRQF_MASK_RE; | ||
230 | if (flow_type & IRQF_TRIGGER_FALLING) | ||
231 | chip->config[pmirq] &= ~PM_IRQF_MASK_FE; | ||
232 | } else { | ||
233 | chip->config[pmirq] |= PM_IRQF_LVL_SEL; | ||
234 | |||
235 | if (flow_type & IRQF_TRIGGER_HIGH) | ||
236 | chip->config[pmirq] &= ~PM_IRQF_MASK_RE; | ||
237 | else | ||
238 | chip->config[pmirq] &= ~PM_IRQF_MASK_FE; | ||
239 | } | ||
240 | |||
241 | config = chip->config[pmirq] | PM_IRQF_CLR; | ||
242 | return pm8xxx_config_irq(chip, block, config); | ||
243 | } | ||
244 | |||
245 | static int pm8xxx_irq_set_wake(struct irq_data *d, unsigned int on) | ||
246 | { | ||
247 | return 0; | ||
248 | } | ||
249 | |||
250 | static struct irq_chip pm8xxx_irq_chip = { | ||
251 | .name = "pm8xxx", | ||
252 | .irq_mask_ack = pm8xxx_irq_mask_ack, | ||
253 | .irq_unmask = pm8xxx_irq_unmask, | ||
254 | .irq_set_type = pm8xxx_irq_set_type, | ||
255 | .irq_set_wake = pm8xxx_irq_set_wake, | ||
256 | .flags = IRQCHIP_MASK_ON_SUSPEND, | ||
257 | }; | ||
258 | |||
259 | /** | ||
260 | * pm8xxx_get_irq_stat - get the status of the irq line | ||
261 | * @chip: pointer to identify a pmic irq controller | ||
262 | * @irq: the irq number | ||
263 | * | ||
264 | * The pm8xxx gpio and mpp rely on the interrupt block to read | ||
265 | * the values on their pins. This function is to facilitate reading | ||
266 | * the status of a gpio or an mpp line. The caller has to convert the | ||
267 | * gpio number to irq number. | ||
268 | * | ||
269 | * RETURNS: | ||
270 | * an int indicating the value read on that line | ||
271 | */ | ||
272 | int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq) | ||
273 | { | ||
274 | int pmirq, rc; | ||
275 | u8 block, bits, bit; | ||
276 | unsigned long flags; | ||
277 | |||
278 | if (chip == NULL || irq < chip->irq_base || | ||
279 | irq >= chip->irq_base + chip->num_irqs) | ||
280 | return -EINVAL; | ||
281 | |||
282 | pmirq = irq - chip->irq_base; | ||
283 | |||
284 | block = pmirq / 8; | ||
285 | bit = pmirq % 8; | ||
286 | |||
287 | spin_lock_irqsave(&chip->pm_irq_lock, flags); | ||
288 | |||
289 | rc = pm8xxx_writeb(chip->dev, SSBI_REG_ADDR_IRQ_BLK_SEL, block); | ||
290 | if (rc) { | ||
291 | pr_err("Failed Selecting block irq=%d pmirq=%d blk=%d rc=%d\n", | ||
292 | irq, pmirq, block, rc); | ||
293 | goto bail_out; | ||
294 | } | ||
295 | |||
296 | rc = pm8xxx_readb(chip->dev, SSBI_REG_ADDR_IRQ_RT_STATUS, &bits); | ||
297 | if (rc) { | ||
298 | pr_err("Failed Configuring irq=%d pmirq=%d blk=%d rc=%d\n", | ||
299 | irq, pmirq, block, rc); | ||
300 | goto bail_out; | ||
301 | } | ||
302 | |||
303 | rc = (bits & (1 << bit)) ? 1 : 0; | ||
304 | |||
305 | bail_out: | ||
306 | spin_unlock_irqrestore(&chip->pm_irq_lock, flags); | ||
307 | |||
308 | return rc; | ||
309 | } | ||
310 | EXPORT_SYMBOL_GPL(pm8xxx_get_irq_stat); | ||
311 | |||
312 | struct pm_irq_chip * __devinit pm8xxx_irq_init(struct device *dev, | ||
313 | const struct pm8xxx_irq_platform_data *pdata) | ||
314 | { | ||
315 | struct pm_irq_chip *chip; | ||
316 | int devirq, rc; | ||
317 | unsigned int pmirq; | ||
318 | |||
319 | if (!pdata) { | ||
320 | pr_err("No platform data\n"); | ||
321 | return ERR_PTR(-EINVAL); | ||
322 | } | ||
323 | |||
324 | devirq = pdata->devirq; | ||
325 | if (devirq < 0) { | ||
326 | pr_err("missing devirq\n"); | ||
327 | rc = devirq; | ||
328 | return ERR_PTR(-EINVAL); | ||
329 | } | ||
330 | |||
331 | chip = kzalloc(sizeof(struct pm_irq_chip) | ||
332 | + sizeof(u8) * pdata->irq_cdata.nirqs, GFP_KERNEL); | ||
333 | if (!chip) { | ||
334 | pr_err("Cannot alloc pm_irq_chip struct\n"); | ||
335 | return ERR_PTR(-EINVAL); | ||
336 | } | ||
337 | |||
338 | chip->dev = dev; | ||
339 | chip->devirq = devirq; | ||
340 | chip->irq_base = pdata->irq_base; | ||
341 | chip->num_irqs = pdata->irq_cdata.nirqs; | ||
342 | chip->num_blocks = DIV_ROUND_UP(chip->num_irqs, 8); | ||
343 | chip->num_masters = DIV_ROUND_UP(chip->num_blocks, 8); | ||
344 | spin_lock_init(&chip->pm_irq_lock); | ||
345 | |||
346 | for (pmirq = 0; pmirq < chip->num_irqs; pmirq++) { | ||
347 | irq_set_chip_and_handler(chip->irq_base + pmirq, | ||
348 | &pm8xxx_irq_chip, | ||
349 | handle_level_irq); | ||
350 | irq_set_chip_data(chip->irq_base + pmirq, chip); | ||
351 | #ifdef CONFIG_ARM | ||
352 | set_irq_flags(chip->irq_base + pmirq, IRQF_VALID); | ||
353 | #else | ||
354 | irq_set_noprobe(chip->irq_base + pmirq); | ||
355 | #endif | ||
356 | } | ||
357 | |||
358 | irq_set_irq_type(devirq, pdata->irq_trigger_flag); | ||
359 | irq_set_handler_data(devirq, chip); | ||
360 | irq_set_chained_handler(devirq, pm8xxx_irq_handler); | ||
361 | set_irq_wake(devirq, 1); | ||
362 | |||
363 | return chip; | ||
364 | } | ||
365 | |||
366 | int __devexit pm8xxx_irq_exit(struct pm_irq_chip *chip) | ||
367 | { | ||
368 | irq_set_chained_handler(chip->devirq, NULL); | ||
369 | kfree(chip); | ||
370 | return 0; | ||
371 | } | ||
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c index 10dbe6374a89..809bd4a61089 100644 --- a/drivers/mfd/rdc321x-southbridge.c +++ b/drivers/mfd/rdc321x-southbridge.c | |||
@@ -61,12 +61,14 @@ static struct mfd_cell rdc321x_sb_cells[] = { | |||
61 | .name = "rdc321x-wdt", | 61 | .name = "rdc321x-wdt", |
62 | .resources = rdc321x_wdt_resource, | 62 | .resources = rdc321x_wdt_resource, |
63 | .num_resources = ARRAY_SIZE(rdc321x_wdt_resource), | 63 | .num_resources = ARRAY_SIZE(rdc321x_wdt_resource), |
64 | .mfd_data = &rdc321x_wdt_pdata, | 64 | .platform_data = &rdc321x_wdt_pdata, |
65 | .pdata_size = sizeof(rdc321x_wdt_pdata), | ||
65 | }, { | 66 | }, { |
66 | .name = "rdc321x-gpio", | 67 | .name = "rdc321x-gpio", |
67 | .resources = rdc321x_gpio_resources, | 68 | .resources = rdc321x_gpio_resources, |
68 | .num_resources = ARRAY_SIZE(rdc321x_gpio_resources), | 69 | .num_resources = ARRAY_SIZE(rdc321x_gpio_resources), |
69 | .mfd_data = &rdc321x_gpio_pdata, | 70 | .platform_data = &rdc321x_gpio_pdata, |
71 | .pdata_size = sizeof(rdc321x_gpio_pdata), | ||
70 | }, | 72 | }, |
71 | }; | 73 | }; |
72 | 74 | ||
diff --git a/drivers/mfd/t7l66xb.c b/drivers/mfd/t7l66xb.c index 42830e692964..91ad21ef7721 100644 --- a/drivers/mfd/t7l66xb.c +++ b/drivers/mfd/t7l66xb.c | |||
@@ -170,7 +170,8 @@ static struct mfd_cell t7l66xb_cells[] = { | |||
170 | .name = "tmio-mmc", | 170 | .name = "tmio-mmc", |
171 | .enable = t7l66xb_mmc_enable, | 171 | .enable = t7l66xb_mmc_enable, |
172 | .disable = t7l66xb_mmc_disable, | 172 | .disable = t7l66xb_mmc_disable, |
173 | .mfd_data = &t7166xb_mmc_data, | 173 | .platform_data = &t7166xb_mmc_data, |
174 | .pdata_size = sizeof(t7166xb_mmc_data), | ||
174 | .num_resources = ARRAY_SIZE(t7l66xb_mmc_resources), | 175 | .num_resources = ARRAY_SIZE(t7l66xb_mmc_resources), |
175 | .resources = t7l66xb_mmc_resources, | 176 | .resources = t7l66xb_mmc_resources, |
176 | }, | 177 | }, |
@@ -382,7 +383,8 @@ static int t7l66xb_probe(struct platform_device *dev) | |||
382 | 383 | ||
383 | t7l66xb_attach_irq(dev); | 384 | t7l66xb_attach_irq(dev); |
384 | 385 | ||
385 | t7l66xb_cells[T7L66XB_CELL_NAND].mfd_data = pdata->nand_data; | 386 | t7l66xb_cells[T7L66XB_CELL_NAND].platform_data = pdata->nand_data; |
387 | t7l66xb_cells[T7L66XB_CELL_NAND].pdata_size = sizeof(*pdata->nand_data); | ||
386 | 388 | ||
387 | ret = mfd_add_devices(&dev->dev, dev->id, | 389 | ret = mfd_add_devices(&dev->dev, dev->id, |
388 | t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells), | 390 | t7l66xb_cells, ARRAY_SIZE(t7l66xb_cells), |
diff --git a/drivers/mfd/tc6387xb.c b/drivers/mfd/tc6387xb.c index b006f7cee952..ad715bf49cac 100644 --- a/drivers/mfd/tc6387xb.c +++ b/drivers/mfd/tc6387xb.c | |||
@@ -131,7 +131,8 @@ static struct mfd_cell tc6387xb_cells[] = { | |||
131 | .name = "tmio-mmc", | 131 | .name = "tmio-mmc", |
132 | .enable = tc6387xb_mmc_enable, | 132 | .enable = tc6387xb_mmc_enable, |
133 | .disable = tc6387xb_mmc_disable, | 133 | .disable = tc6387xb_mmc_disable, |
134 | .mfd_data = &tc6387xb_mmc_data, | 134 | .platform_data = &tc6387xb_mmc_data, |
135 | .pdata_size = sizeof(tc6387xb_mmc_data), | ||
135 | .num_resources = ARRAY_SIZE(tc6387xb_mmc_resources), | 136 | .num_resources = ARRAY_SIZE(tc6387xb_mmc_resources), |
136 | .resources = tc6387xb_mmc_resources, | 137 | .resources = tc6387xb_mmc_resources, |
137 | }, | 138 | }, |
diff --git a/drivers/mfd/tc6393xb.c b/drivers/mfd/tc6393xb.c index fc53ce287601..9612264f0e6d 100644 --- a/drivers/mfd/tc6393xb.c +++ b/drivers/mfd/tc6393xb.c | |||
@@ -393,7 +393,8 @@ static struct mfd_cell __devinitdata tc6393xb_cells[] = { | |||
393 | .name = "tmio-mmc", | 393 | .name = "tmio-mmc", |
394 | .enable = tc6393xb_mmc_enable, | 394 | .enable = tc6393xb_mmc_enable, |
395 | .resume = tc6393xb_mmc_resume, | 395 | .resume = tc6393xb_mmc_resume, |
396 | .mfd_data = &tc6393xb_mmc_data, | 396 | .platform_data = &tc6393xb_mmc_data, |
397 | .pdata_size = sizeof(tc6393xb_mmc_data), | ||
397 | .num_resources = ARRAY_SIZE(tc6393xb_mmc_resources), | 398 | .num_resources = ARRAY_SIZE(tc6393xb_mmc_resources), |
398 | .resources = tc6393xb_mmc_resources, | 399 | .resources = tc6393xb_mmc_resources, |
399 | }, | 400 | }, |
@@ -692,8 +693,11 @@ static int __devinit tc6393xb_probe(struct platform_device *dev) | |||
692 | goto err_setup; | 693 | goto err_setup; |
693 | } | 694 | } |
694 | 695 | ||
695 | tc6393xb_cells[TC6393XB_CELL_NAND].mfd_data = tcpd->nand_data; | 696 | tc6393xb_cells[TC6393XB_CELL_NAND].platform_data = tcpd->nand_data; |
696 | tc6393xb_cells[TC6393XB_CELL_FB].mfd_data = tcpd->fb_data; | 697 | tc6393xb_cells[TC6393XB_CELL_NAND].pdata_size = |
698 | sizeof(*tcpd->nand_data); | ||
699 | tc6393xb_cells[TC6393XB_CELL_FB].platform_data = tcpd->fb_data; | ||
700 | tc6393xb_cells[TC6393XB_CELL_FB].pdata_size = sizeof(*tcpd->fb_data); | ||
697 | 701 | ||
698 | ret = mfd_add_devices(&dev->dev, dev->id, | 702 | ret = mfd_add_devices(&dev->dev, dev->id, |
699 | tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells), | 703 | tc6393xb_cells, ARRAY_SIZE(tc6393xb_cells), |
diff --git a/drivers/mfd/timberdale.c b/drivers/mfd/timberdale.c index 94c6c8afad12..69272e4e3459 100644 --- a/drivers/mfd/timberdale.c +++ b/drivers/mfd/timberdale.c | |||
@@ -384,7 +384,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = { | |||
384 | .name = "timb-dma", | 384 | .name = "timb-dma", |
385 | .num_resources = ARRAY_SIZE(timberdale_dma_resources), | 385 | .num_resources = ARRAY_SIZE(timberdale_dma_resources), |
386 | .resources = timberdale_dma_resources, | 386 | .resources = timberdale_dma_resources, |
387 | .mfd_data = &timb_dma_platform_data, | 387 | .platform_data = &timb_dma_platform_data, |
388 | .pdata_size = sizeof(timb_dma_platform_data), | ||
388 | }, | 389 | }, |
389 | { | 390 | { |
390 | .name = "timb-uart", | 391 | .name = "timb-uart", |
@@ -395,37 +396,43 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg0[] = { | |||
395 | .name = "xiic-i2c", | 396 | .name = "xiic-i2c", |
396 | .num_resources = ARRAY_SIZE(timberdale_xiic_resources), | 397 | .num_resources = ARRAY_SIZE(timberdale_xiic_resources), |
397 | .resources = timberdale_xiic_resources, | 398 | .resources = timberdale_xiic_resources, |
398 | .mfd_data = &timberdale_xiic_platform_data, | 399 | .platform_data = &timberdale_xiic_platform_data, |
400 | .pdata_size = sizeof(timberdale_xiic_platform_data), | ||
399 | }, | 401 | }, |
400 | { | 402 | { |
401 | .name = "timb-gpio", | 403 | .name = "timb-gpio", |
402 | .num_resources = ARRAY_SIZE(timberdale_gpio_resources), | 404 | .num_resources = ARRAY_SIZE(timberdale_gpio_resources), |
403 | .resources = timberdale_gpio_resources, | 405 | .resources = timberdale_gpio_resources, |
404 | .mfd_data = &timberdale_gpio_platform_data, | 406 | .platform_data = &timberdale_gpio_platform_data, |
407 | .pdata_size = sizeof(timberdale_gpio_platform_data), | ||
405 | }, | 408 | }, |
406 | { | 409 | { |
407 | .name = "timb-video", | 410 | .name = "timb-video", |
408 | .num_resources = ARRAY_SIZE(timberdale_video_resources), | 411 | .num_resources = ARRAY_SIZE(timberdale_video_resources), |
409 | .resources = timberdale_video_resources, | 412 | .resources = timberdale_video_resources, |
410 | .mfd_data = &timberdale_video_platform_data, | 413 | .platform_data = &timberdale_video_platform_data, |
414 | .pdata_size = sizeof(timberdale_video_platform_data), | ||
411 | }, | 415 | }, |
412 | { | 416 | { |
413 | .name = "timb-radio", | 417 | .name = "timb-radio", |
414 | .num_resources = ARRAY_SIZE(timberdale_radio_resources), | 418 | .num_resources = ARRAY_SIZE(timberdale_radio_resources), |
415 | .resources = timberdale_radio_resources, | 419 | .resources = timberdale_radio_resources, |
416 | .mfd_data = &timberdale_radio_platform_data, | 420 | .platform_data = &timberdale_radio_platform_data, |
421 | .pdata_size = sizeof(timberdale_radio_platform_data), | ||
417 | }, | 422 | }, |
418 | { | 423 | { |
419 | .name = "xilinx_spi", | 424 | .name = "xilinx_spi", |
420 | .num_resources = ARRAY_SIZE(timberdale_spi_resources), | 425 | .num_resources = ARRAY_SIZE(timberdale_spi_resources), |
421 | .resources = timberdale_spi_resources, | 426 | .resources = timberdale_spi_resources, |
422 | .mfd_data = &timberdale_xspi_platform_data, | 427 | .platform_data = &timberdale_xspi_platform_data, |
428 | .pdata_size = sizeof(timberdale_xspi_platform_data), | ||
423 | }, | 429 | }, |
424 | { | 430 | { |
425 | .name = "ks8842", | 431 | .name = "ks8842", |
426 | .num_resources = ARRAY_SIZE(timberdale_eth_resources), | 432 | .num_resources = ARRAY_SIZE(timberdale_eth_resources), |
427 | .resources = timberdale_eth_resources, | 433 | .resources = timberdale_eth_resources, |
428 | .mfd_data = &timberdale_ks8842_platform_data, | 434 | .platform_data = &timberdale_ks8842_platform_data, |
435 | .pdata_size = sizeof(timberdale_ks8842_platform_data), | ||
429 | }, | 436 | }, |
430 | }; | 437 | }; |
431 | 438 | ||
@@ -434,7 +441,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = { | |||
434 | .name = "timb-dma", | 441 | .name = "timb-dma", |
435 | .num_resources = ARRAY_SIZE(timberdale_dma_resources), | 442 | .num_resources = ARRAY_SIZE(timberdale_dma_resources), |
436 | .resources = timberdale_dma_resources, | 443 | .resources = timberdale_dma_resources, |
437 | .mfd_data = &timb_dma_platform_data, | 444 | .platform_data = &timb_dma_platform_data, |
445 | .pdata_size = sizeof(timb_dma_platform_data), | ||
438 | }, | 446 | }, |
439 | { | 447 | { |
440 | .name = "timb-uart", | 448 | .name = "timb-uart", |
@@ -450,13 +458,15 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = { | |||
450 | .name = "xiic-i2c", | 458 | .name = "xiic-i2c", |
451 | .num_resources = ARRAY_SIZE(timberdale_xiic_resources), | 459 | .num_resources = ARRAY_SIZE(timberdale_xiic_resources), |
452 | .resources = timberdale_xiic_resources, | 460 | .resources = timberdale_xiic_resources, |
453 | .mfd_data = &timberdale_xiic_platform_data, | 461 | .platform_data = &timberdale_xiic_platform_data, |
462 | .pdata_size = sizeof(timberdale_xiic_platform_data), | ||
454 | }, | 463 | }, |
455 | { | 464 | { |
456 | .name = "timb-gpio", | 465 | .name = "timb-gpio", |
457 | .num_resources = ARRAY_SIZE(timberdale_gpio_resources), | 466 | .num_resources = ARRAY_SIZE(timberdale_gpio_resources), |
458 | .resources = timberdale_gpio_resources, | 467 | .resources = timberdale_gpio_resources, |
459 | .mfd_data = &timberdale_gpio_platform_data, | 468 | .platform_data = &timberdale_gpio_platform_data, |
469 | .pdata_size = sizeof(timberdale_gpio_platform_data), | ||
460 | }, | 470 | }, |
461 | { | 471 | { |
462 | .name = "timb-mlogicore", | 472 | .name = "timb-mlogicore", |
@@ -467,25 +477,29 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg1[] = { | |||
467 | .name = "timb-video", | 477 | .name = "timb-video", |
468 | .num_resources = ARRAY_SIZE(timberdale_video_resources), | 478 | .num_resources = ARRAY_SIZE(timberdale_video_resources), |
469 | .resources = timberdale_video_resources, | 479 | .resources = timberdale_video_resources, |
470 | .mfd_data = &timberdale_video_platform_data, | 480 | .platform_data = &timberdale_video_platform_data, |
481 | .pdata_size = sizeof(timberdale_video_platform_data), | ||
471 | }, | 482 | }, |
472 | { | 483 | { |
473 | .name = "timb-radio", | 484 | .name = "timb-radio", |
474 | .num_resources = ARRAY_SIZE(timberdale_radio_resources), | 485 | .num_resources = ARRAY_SIZE(timberdale_radio_resources), |
475 | .resources = timberdale_radio_resources, | 486 | .resources = timberdale_radio_resources, |
476 | .mfd_data = &timberdale_radio_platform_data, | 487 | .platform_data = &timberdale_radio_platform_data, |
488 | .pdata_size = sizeof(timberdale_radio_platform_data), | ||
477 | }, | 489 | }, |
478 | { | 490 | { |
479 | .name = "xilinx_spi", | 491 | .name = "xilinx_spi", |
480 | .num_resources = ARRAY_SIZE(timberdale_spi_resources), | 492 | .num_resources = ARRAY_SIZE(timberdale_spi_resources), |
481 | .resources = timberdale_spi_resources, | 493 | .resources = timberdale_spi_resources, |
482 | .mfd_data = &timberdale_xspi_platform_data, | 494 | .platform_data = &timberdale_xspi_platform_data, |
495 | .pdata_size = sizeof(timberdale_xspi_platform_data), | ||
483 | }, | 496 | }, |
484 | { | 497 | { |
485 | .name = "ks8842", | 498 | .name = "ks8842", |
486 | .num_resources = ARRAY_SIZE(timberdale_eth_resources), | 499 | .num_resources = ARRAY_SIZE(timberdale_eth_resources), |
487 | .resources = timberdale_eth_resources, | 500 | .resources = timberdale_eth_resources, |
488 | .mfd_data = &timberdale_ks8842_platform_data, | 501 | .platform_data = &timberdale_ks8842_platform_data, |
502 | .pdata_size = sizeof(timberdale_ks8842_platform_data), | ||
489 | }, | 503 | }, |
490 | }; | 504 | }; |
491 | 505 | ||
@@ -494,7 +508,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = { | |||
494 | .name = "timb-dma", | 508 | .name = "timb-dma", |
495 | .num_resources = ARRAY_SIZE(timberdale_dma_resources), | 509 | .num_resources = ARRAY_SIZE(timberdale_dma_resources), |
496 | .resources = timberdale_dma_resources, | 510 | .resources = timberdale_dma_resources, |
497 | .mfd_data = &timb_dma_platform_data, | 511 | .platform_data = &timb_dma_platform_data, |
512 | .pdata_size = sizeof(timb_dma_platform_data), | ||
498 | }, | 513 | }, |
499 | { | 514 | { |
500 | .name = "timb-uart", | 515 | .name = "timb-uart", |
@@ -505,31 +520,36 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg2[] = { | |||
505 | .name = "xiic-i2c", | 520 | .name = "xiic-i2c", |
506 | .num_resources = ARRAY_SIZE(timberdale_xiic_resources), | 521 | .num_resources = ARRAY_SIZE(timberdale_xiic_resources), |
507 | .resources = timberdale_xiic_resources, | 522 | .resources = timberdale_xiic_resources, |
508 | .mfd_data = &timberdale_xiic_platform_data, | 523 | .platform_data = &timberdale_xiic_platform_data, |
524 | .pdata_size = sizeof(timberdale_xiic_platform_data), | ||
509 | }, | 525 | }, |
510 | { | 526 | { |
511 | .name = "timb-gpio", | 527 | .name = "timb-gpio", |
512 | .num_resources = ARRAY_SIZE(timberdale_gpio_resources), | 528 | .num_resources = ARRAY_SIZE(timberdale_gpio_resources), |
513 | .resources = timberdale_gpio_resources, | 529 | .resources = timberdale_gpio_resources, |
514 | .mfd_data = &timberdale_gpio_platform_data, | 530 | .platform_data = &timberdale_gpio_platform_data, |
531 | .pdata_size = sizeof(timberdale_gpio_platform_data), | ||
515 | }, | 532 | }, |
516 | { | 533 | { |
517 | .name = "timb-video", | 534 | .name = "timb-video", |
518 | .num_resources = ARRAY_SIZE(timberdale_video_resources), | 535 | .num_resources = ARRAY_SIZE(timberdale_video_resources), |
519 | .resources = timberdale_video_resources, | 536 | .resources = timberdale_video_resources, |
520 | .mfd_data = &timberdale_video_platform_data, | 537 | .platform_data = &timberdale_video_platform_data, |
538 | .pdata_size = sizeof(timberdale_video_platform_data), | ||
521 | }, | 539 | }, |
522 | { | 540 | { |
523 | .name = "timb-radio", | 541 | .name = "timb-radio", |
524 | .num_resources = ARRAY_SIZE(timberdale_radio_resources), | 542 | .num_resources = ARRAY_SIZE(timberdale_radio_resources), |
525 | .resources = timberdale_radio_resources, | 543 | .resources = timberdale_radio_resources, |
526 | .mfd_data = &timberdale_radio_platform_data, | 544 | .platform_data = &timberdale_radio_platform_data, |
545 | .pdata_size = sizeof(timberdale_radio_platform_data), | ||
527 | }, | 546 | }, |
528 | { | 547 | { |
529 | .name = "xilinx_spi", | 548 | .name = "xilinx_spi", |
530 | .num_resources = ARRAY_SIZE(timberdale_spi_resources), | 549 | .num_resources = ARRAY_SIZE(timberdale_spi_resources), |
531 | .resources = timberdale_spi_resources, | 550 | .resources = timberdale_spi_resources, |
532 | .mfd_data = &timberdale_xspi_platform_data, | 551 | .platform_data = &timberdale_xspi_platform_data, |
552 | .pdata_size = sizeof(timberdale_xspi_platform_data), | ||
533 | }, | 553 | }, |
534 | }; | 554 | }; |
535 | 555 | ||
@@ -538,7 +558,8 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = { | |||
538 | .name = "timb-dma", | 558 | .name = "timb-dma", |
539 | .num_resources = ARRAY_SIZE(timberdale_dma_resources), | 559 | .num_resources = ARRAY_SIZE(timberdale_dma_resources), |
540 | .resources = timberdale_dma_resources, | 560 | .resources = timberdale_dma_resources, |
541 | .mfd_data = &timb_dma_platform_data, | 561 | .platform_data = &timb_dma_platform_data, |
562 | .pdata_size = sizeof(timb_dma_platform_data), | ||
542 | }, | 563 | }, |
543 | { | 564 | { |
544 | .name = "timb-uart", | 565 | .name = "timb-uart", |
@@ -549,37 +570,43 @@ static __devinitdata struct mfd_cell timberdale_cells_bar0_cfg3[] = { | |||
549 | .name = "ocores-i2c", | 570 | .name = "ocores-i2c", |
550 | .num_resources = ARRAY_SIZE(timberdale_ocores_resources), | 571 | .num_resources = ARRAY_SIZE(timberdale_ocores_resources), |
551 | .resources = timberdale_ocores_resources, | 572 | .resources = timberdale_ocores_resources, |
552 | .mfd_data = &timberdale_ocores_platform_data, | 573 | .platform_data = &timberdale_ocores_platform_data, |
574 | .pdata_size = sizeof(timberdale_ocores_platform_data), | ||
553 | }, | 575 | }, |
554 | { | 576 | { |
555 | .name = "timb-gpio", | 577 | .name = "timb-gpio", |
556 | .num_resources = ARRAY_SIZE(timberdale_gpio_resources), | 578 | .num_resources = ARRAY_SIZE(timberdale_gpio_resources), |
557 | .resources = timberdale_gpio_resources, | 579 | .resources = timberdale_gpio_resources, |
558 | .mfd_data = &timberdale_gpio_platform_data, | 580 | .platform_data = &timberdale_gpio_platform_data, |
581 | .pdata_size = sizeof(timberdale_gpio_platform_data), | ||
559 | }, | 582 | }, |
560 | { | 583 | { |
561 | .name = "timb-video", | 584 | .name = "timb-video", |
562 | .num_resources = ARRAY_SIZE(timberdale_video_resources), | 585 | .num_resources = ARRAY_SIZE(timberdale_video_resources), |
563 | .resources = timberdale_video_resources, | 586 | .resources = timberdale_video_resources, |
564 | .mfd_data = &timberdale_video_platform_data, | 587 | .platform_data = &timberdale_video_platform_data, |
588 | .pdata_size = sizeof(timberdale_video_platform_data), | ||
565 | }, | 589 | }, |
566 | { | 590 | { |
567 | .name = "timb-radio", | 591 | .name = "timb-radio", |
568 | .num_resources = ARRAY_SIZE(timberdale_radio_resources), | 592 | .num_resources = ARRAY_SIZE(timberdale_radio_resources), |
569 | .resources = timberdale_radio_resources, | 593 | .resources = timberdale_radio_resources, |
570 | .mfd_data = &timberdale_radio_platform_data, | 594 | .platform_data = &timberdale_radio_platform_data, |
595 | .pdata_size = sizeof(timberdale_radio_platform_data), | ||
571 | }, | 596 | }, |
572 | { | 597 | { |
573 | .name = "xilinx_spi", | 598 | .name = "xilinx_spi", |
574 | .num_resources = ARRAY_SIZE(timberdale_spi_resources), | 599 | .num_resources = ARRAY_SIZE(timberdale_spi_resources), |
575 | .resources = timberdale_spi_resources, | 600 | .resources = timberdale_spi_resources, |
576 | .mfd_data = &timberdale_xspi_platform_data, | 601 | .platform_data = &timberdale_xspi_platform_data, |
602 | .pdata_size = sizeof(timberdale_xspi_platform_data), | ||
577 | }, | 603 | }, |
578 | { | 604 | { |
579 | .name = "ks8842", | 605 | .name = "ks8842", |
580 | .num_resources = ARRAY_SIZE(timberdale_eth_resources), | 606 | .num_resources = ARRAY_SIZE(timberdale_eth_resources), |
581 | .resources = timberdale_eth_resources, | 607 | .resources = timberdale_eth_resources, |
582 | .mfd_data = &timberdale_ks8842_platform_data, | 608 | .platform_data = &timberdale_ks8842_platform_data, |
609 | .pdata_size = sizeof(timberdale_ks8842_platform_data), | ||
583 | }, | 610 | }, |
584 | }; | 611 | }; |
585 | 612 | ||
diff --git a/drivers/mfd/tps6105x.c b/drivers/mfd/tps6105x.c index 46d8205646b6..a293b978e27c 100644 --- a/drivers/mfd/tps6105x.c +++ b/drivers/mfd/tps6105x.c | |||
@@ -183,7 +183,8 @@ static int __devinit tps6105x_probe(struct i2c_client *client, | |||
183 | /* Set up and register the platform devices. */ | 183 | /* Set up and register the platform devices. */ |
184 | for (i = 0; i < ARRAY_SIZE(tps6105x_cells); i++) { | 184 | for (i = 0; i < ARRAY_SIZE(tps6105x_cells); i++) { |
185 | /* One state holder for all drivers, this is simple */ | 185 | /* One state holder for all drivers, this is simple */ |
186 | tps6105x_cells[i].mfd_data = tps6105x; | 186 | tps6105x_cells[i].platform_data = tps6105x; |
187 | tps6105x_cells[i].pdata_size = sizeof(*tps6105x); | ||
187 | } | 188 | } |
188 | 189 | ||
189 | ret = mfd_add_devices(&client->dev, 0, tps6105x_cells, | 190 | ret = mfd_add_devices(&client->dev, 0, tps6105x_cells, |
diff --git a/drivers/mfd/tps6586x.c b/drivers/mfd/tps6586x.c index b600808690c1..bba26d96c240 100644 --- a/drivers/mfd/tps6586x.c +++ b/drivers/mfd/tps6586x.c | |||
@@ -270,8 +270,8 @@ static void tps6586x_gpio_set(struct gpio_chip *chip, unsigned offset, | |||
270 | { | 270 | { |
271 | struct tps6586x *tps6586x = container_of(chip, struct tps6586x, gpio); | 271 | struct tps6586x *tps6586x = container_of(chip, struct tps6586x, gpio); |
272 | 272 | ||
273 | __tps6586x_write(tps6586x->client, TPS6586X_GPIOSET2, | 273 | tps6586x_update(tps6586x->dev, TPS6586X_GPIOSET2, |
274 | value << offset); | 274 | value << offset, 1 << offset); |
275 | } | 275 | } |
276 | 276 | ||
277 | static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset, | 277 | static int tps6586x_gpio_output(struct gpio_chip *gc, unsigned offset, |
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c new file mode 100644 index 000000000000..2bfad5c86cc7 --- /dev/null +++ b/drivers/mfd/tps65910-irq.c | |||
@@ -0,0 +1,218 @@ | |||
1 | /* | ||
2 | * tps65910-irq.c -- TI TPS6591x | ||
3 | * | ||
4 | * Copyright 2010 Texas Instruments Inc. | ||
5 | * | ||
6 | * Author: Graeme Gregory <gg@slimlogic.co.uk> | ||
7 | * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/bug.h> | ||
20 | #include <linux/device.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/irq.h> | ||
23 | #include <linux/gpio.h> | ||
24 | #include <linux/mfd/tps65910.h> | ||
25 | |||
26 | static inline int irq_to_tps65910_irq(struct tps65910 *tps65910, | ||
27 | int irq) | ||
28 | { | ||
29 | return (irq - tps65910->irq_base); | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * This is a threaded IRQ handler so can access I2C/SPI. Since all | ||
34 | * interrupts are clear on read the IRQ line will be reasserted and | ||
35 | * the physical IRQ will be handled again if another interrupt is | ||
36 | * asserted while we run - in the normal course of events this is a | ||
37 | * rare occurrence so we save I2C/SPI reads. We're also assuming that | ||
38 | * it's rare to get lots of interrupts firing simultaneously so try to | ||
39 | * minimise I/O. | ||
40 | */ | ||
41 | static irqreturn_t tps65910_irq(int irq, void *irq_data) | ||
42 | { | ||
43 | struct tps65910 *tps65910 = irq_data; | ||
44 | u32 irq_sts; | ||
45 | u32 irq_mask; | ||
46 | u8 reg; | ||
47 | int i; | ||
48 | |||
49 | tps65910->read(tps65910, TPS65910_INT_STS, 1, ®); | ||
50 | irq_sts = reg; | ||
51 | tps65910->read(tps65910, TPS65910_INT_STS2, 1, ®); | ||
52 | irq_sts |= reg << 8; | ||
53 | switch (tps65910_chip_id(tps65910)) { | ||
54 | case TPS65911: | ||
55 | tps65910->read(tps65910, TPS65910_INT_STS3, 1, ®); | ||
56 | irq_sts |= reg << 16; | ||
57 | } | ||
58 | |||
59 | tps65910->read(tps65910, TPS65910_INT_MSK, 1, ®); | ||
60 | irq_mask = reg; | ||
61 | tps65910->read(tps65910, TPS65910_INT_MSK2, 1, ®); | ||
62 | irq_mask |= reg << 8; | ||
63 | switch (tps65910_chip_id(tps65910)) { | ||
64 | case TPS65911: | ||
65 | tps65910->read(tps65910, TPS65910_INT_MSK3, 1, ®); | ||
66 | irq_mask |= reg << 16; | ||
67 | } | ||
68 | |||
69 | irq_sts &= ~irq_mask; | ||
70 | |||
71 | if (!irq_sts) | ||
72 | return IRQ_NONE; | ||
73 | |||
74 | for (i = 0; i < tps65910->irq_num; i++) { | ||
75 | |||
76 | if (!(irq_sts & (1 << i))) | ||
77 | continue; | ||
78 | |||
79 | handle_nested_irq(tps65910->irq_base + i); | ||
80 | } | ||
81 | |||
82 | /* Write the STS register back to clear IRQs we handled */ | ||
83 | reg = irq_sts & 0xFF; | ||
84 | irq_sts >>= 8; | ||
85 | tps65910->write(tps65910, TPS65910_INT_STS, 1, ®); | ||
86 | reg = irq_sts & 0xFF; | ||
87 | tps65910->write(tps65910, TPS65910_INT_STS2, 1, ®); | ||
88 | switch (tps65910_chip_id(tps65910)) { | ||
89 | case TPS65911: | ||
90 | reg = irq_sts >> 8; | ||
91 | tps65910->write(tps65910, TPS65910_INT_STS3, 1, ®); | ||
92 | } | ||
93 | |||
94 | return IRQ_HANDLED; | ||
95 | } | ||
96 | |||
97 | static void tps65910_irq_lock(struct irq_data *data) | ||
98 | { | ||
99 | struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data); | ||
100 | |||
101 | mutex_lock(&tps65910->irq_lock); | ||
102 | } | ||
103 | |||
104 | static void tps65910_irq_sync_unlock(struct irq_data *data) | ||
105 | { | ||
106 | struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data); | ||
107 | u32 reg_mask; | ||
108 | u8 reg; | ||
109 | |||
110 | tps65910->read(tps65910, TPS65910_INT_MSK, 1, ®); | ||
111 | reg_mask = reg; | ||
112 | tps65910->read(tps65910, TPS65910_INT_MSK2, 1, ®); | ||
113 | reg_mask |= reg << 8; | ||
114 | switch (tps65910_chip_id(tps65910)) { | ||
115 | case TPS65911: | ||
116 | tps65910->read(tps65910, TPS65910_INT_MSK3, 1, ®); | ||
117 | reg_mask |= reg << 16; | ||
118 | } | ||
119 | |||
120 | if (tps65910->irq_mask != reg_mask) { | ||
121 | reg = tps65910->irq_mask & 0xFF; | ||
122 | tps65910->write(tps65910, TPS65910_INT_MSK, 1, ®); | ||
123 | reg = tps65910->irq_mask >> 8 & 0xFF; | ||
124 | tps65910->write(tps65910, TPS65910_INT_MSK2, 1, ®); | ||
125 | switch (tps65910_chip_id(tps65910)) { | ||
126 | case TPS65911: | ||
127 | reg = tps65910->irq_mask >> 16; | ||
128 | tps65910->write(tps65910, TPS65910_INT_MSK3, 1, ®); | ||
129 | } | ||
130 | } | ||
131 | mutex_unlock(&tps65910->irq_lock); | ||
132 | } | ||
133 | |||
134 | static void tps65910_irq_enable(struct irq_data *data) | ||
135 | { | ||
136 | struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data); | ||
137 | |||
138 | tps65910->irq_mask &= ~( 1 << irq_to_tps65910_irq(tps65910, data->irq)); | ||
139 | } | ||
140 | |||
141 | static void tps65910_irq_disable(struct irq_data *data) | ||
142 | { | ||
143 | struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data); | ||
144 | |||
145 | tps65910->irq_mask |= ( 1 << irq_to_tps65910_irq(tps65910, data->irq)); | ||
146 | } | ||
147 | |||
148 | static struct irq_chip tps65910_irq_chip = { | ||
149 | .name = "tps65910", | ||
150 | .irq_bus_lock = tps65910_irq_lock, | ||
151 | .irq_bus_sync_unlock = tps65910_irq_sync_unlock, | ||
152 | .irq_disable = tps65910_irq_disable, | ||
153 | .irq_enable = tps65910_irq_enable, | ||
154 | }; | ||
155 | |||
156 | int tps65910_irq_init(struct tps65910 *tps65910, int irq, | ||
157 | struct tps65910_platform_data *pdata) | ||
158 | { | ||
159 | int ret, cur_irq; | ||
160 | int flags = IRQF_ONESHOT; | ||
161 | |||
162 | if (!irq) { | ||
163 | dev_warn(tps65910->dev, "No interrupt support, no core IRQ\n"); | ||
164 | return -EINVAL; | ||
165 | } | ||
166 | |||
167 | if (!pdata || !pdata->irq_base) { | ||
168 | dev_warn(tps65910->dev, "No interrupt support, no IRQ base\n"); | ||
169 | return -EINVAL; | ||
170 | } | ||
171 | |||
172 | tps65910->irq_mask = 0xFFFFFF; | ||
173 | |||
174 | mutex_init(&tps65910->irq_lock); | ||
175 | tps65910->chip_irq = irq; | ||
176 | tps65910->irq_base = pdata->irq_base; | ||
177 | |||
178 | switch (tps65910_chip_id(tps65910)) { | ||
179 | case TPS65910: | ||
180 | tps65910->irq_num = TPS65910_NUM_IRQ; | ||
181 | case TPS65911: | ||
182 | tps65910->irq_num = TPS65911_NUM_IRQ; | ||
183 | } | ||
184 | |||
185 | /* Register with genirq */ | ||
186 | for (cur_irq = tps65910->irq_base; | ||
187 | cur_irq < tps65910->irq_num + tps65910->irq_base; | ||
188 | cur_irq++) { | ||
189 | irq_set_chip_data(cur_irq, tps65910); | ||
190 | irq_set_chip_and_handler(cur_irq, &tps65910_irq_chip, | ||
191 | handle_edge_irq); | ||
192 | irq_set_nested_thread(cur_irq, 1); | ||
193 | |||
194 | /* ARM needs us to explicitly flag the IRQ as valid | ||
195 | * and will set them noprobe when we do so. */ | ||
196 | #ifdef CONFIG_ARM | ||
197 | set_irq_flags(cur_irq, IRQF_VALID); | ||
198 | #else | ||
199 | irq_set_noprobe(cur_irq); | ||
200 | #endif | ||
201 | } | ||
202 | |||
203 | ret = request_threaded_irq(irq, NULL, tps65910_irq, flags, | ||
204 | "tps65910", tps65910); | ||
205 | |||
206 | irq_set_irq_type(irq, IRQ_TYPE_LEVEL_LOW); | ||
207 | |||
208 | if (ret != 0) | ||
209 | dev_err(tps65910->dev, "Failed to request IRQ: %d\n", ret); | ||
210 | |||
211 | return ret; | ||
212 | } | ||
213 | |||
214 | int tps65910_irq_exit(struct tps65910 *tps65910) | ||
215 | { | ||
216 | free_irq(tps65910->chip_irq, tps65910); | ||
217 | return 0; | ||
218 | } | ||
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c new file mode 100644 index 000000000000..2229e66d80db --- /dev/null +++ b/drivers/mfd/tps65910.c | |||
@@ -0,0 +1,229 @@ | |||
1 | /* | ||
2 | * tps65910.c -- TI TPS6591x | ||
3 | * | ||
4 | * Copyright 2010 Texas Instruments Inc. | ||
5 | * | ||
6 | * Author: Graeme Gregory <gg@slimlogic.co.uk> | ||
7 | * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/moduleparam.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/slab.h> | ||
20 | #include <linux/i2c.h> | ||
21 | #include <linux/gpio.h> | ||
22 | #include <linux/mfd/core.h> | ||
23 | #include <linux/mfd/tps65910.h> | ||
24 | |||
25 | static struct mfd_cell tps65910s[] = { | ||
26 | { | ||
27 | .name = "tps65910-pmic", | ||
28 | }, | ||
29 | { | ||
30 | .name = "tps65910-rtc", | ||
31 | }, | ||
32 | { | ||
33 | .name = "tps65910-power", | ||
34 | }, | ||
35 | }; | ||
36 | |||
37 | |||
38 | static int tps65910_i2c_read(struct tps65910 *tps65910, u8 reg, | ||
39 | int bytes, void *dest) | ||
40 | { | ||
41 | struct i2c_client *i2c = tps65910->i2c_client; | ||
42 | struct i2c_msg xfer[2]; | ||
43 | int ret; | ||
44 | |||
45 | /* Write register */ | ||
46 | xfer[0].addr = i2c->addr; | ||
47 | xfer[0].flags = 0; | ||
48 | xfer[0].len = 1; | ||
49 | xfer[0].buf = ® | ||
50 | |||
51 | /* Read data */ | ||
52 | xfer[1].addr = i2c->addr; | ||
53 | xfer[1].flags = I2C_M_RD; | ||
54 | xfer[1].len = bytes; | ||
55 | xfer[1].buf = dest; | ||
56 | |||
57 | ret = i2c_transfer(i2c->adapter, xfer, 2); | ||
58 | if (ret == 2) | ||
59 | ret = 0; | ||
60 | else if (ret >= 0) | ||
61 | ret = -EIO; | ||
62 | |||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | static int tps65910_i2c_write(struct tps65910 *tps65910, u8 reg, | ||
67 | int bytes, void *src) | ||
68 | { | ||
69 | struct i2c_client *i2c = tps65910->i2c_client; | ||
70 | /* we add 1 byte for device register */ | ||
71 | u8 msg[TPS65910_MAX_REGISTER + 1]; | ||
72 | int ret; | ||
73 | |||
74 | if (bytes > TPS65910_MAX_REGISTER) | ||
75 | return -EINVAL; | ||
76 | |||
77 | msg[0] = reg; | ||
78 | memcpy(&msg[1], src, bytes); | ||
79 | |||
80 | ret = i2c_master_send(i2c, msg, bytes + 1); | ||
81 | if (ret < 0) | ||
82 | return ret; | ||
83 | if (ret != bytes + 1) | ||
84 | return -EIO; | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask) | ||
89 | { | ||
90 | u8 data; | ||
91 | int err; | ||
92 | |||
93 | mutex_lock(&tps65910->io_mutex); | ||
94 | err = tps65910_i2c_read(tps65910, reg, 1, &data); | ||
95 | if (err) { | ||
96 | dev_err(tps65910->dev, "read from reg %x failed\n", reg); | ||
97 | goto out; | ||
98 | } | ||
99 | |||
100 | data |= mask; | ||
101 | err = tps65910_i2c_write(tps65910, reg, 1, &data); | ||
102 | if (err) | ||
103 | dev_err(tps65910->dev, "write to reg %x failed\n", reg); | ||
104 | |||
105 | out: | ||
106 | mutex_unlock(&tps65910->io_mutex); | ||
107 | return err; | ||
108 | } | ||
109 | EXPORT_SYMBOL_GPL(tps65910_set_bits); | ||
110 | |||
111 | int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask) | ||
112 | { | ||
113 | u8 data; | ||
114 | int err; | ||
115 | |||
116 | mutex_lock(&tps65910->io_mutex); | ||
117 | err = tps65910_i2c_read(tps65910, reg, 1, &data); | ||
118 | if (err) { | ||
119 | dev_err(tps65910->dev, "read from reg %x failed\n", reg); | ||
120 | goto out; | ||
121 | } | ||
122 | |||
123 | data &= mask; | ||
124 | err = tps65910_i2c_write(tps65910, reg, 1, &data); | ||
125 | if (err) | ||
126 | dev_err(tps65910->dev, "write to reg %x failed\n", reg); | ||
127 | |||
128 | out: | ||
129 | mutex_unlock(&tps65910->io_mutex); | ||
130 | return err; | ||
131 | } | ||
132 | EXPORT_SYMBOL_GPL(tps65910_clear_bits); | ||
133 | |||
134 | static int tps65910_i2c_probe(struct i2c_client *i2c, | ||
135 | const struct i2c_device_id *id) | ||
136 | { | ||
137 | struct tps65910 *tps65910; | ||
138 | struct tps65910_board *pmic_plat_data; | ||
139 | struct tps65910_platform_data *init_data; | ||
140 | int ret = 0; | ||
141 | |||
142 | pmic_plat_data = dev_get_platdata(&i2c->dev); | ||
143 | if (!pmic_plat_data) | ||
144 | return -EINVAL; | ||
145 | |||
146 | init_data = kzalloc(sizeof(struct tps65910_platform_data), GFP_KERNEL); | ||
147 | if (init_data == NULL) | ||
148 | return -ENOMEM; | ||
149 | |||
150 | init_data->irq = pmic_plat_data->irq; | ||
151 | init_data->irq_base = pmic_plat_data->irq; | ||
152 | |||
153 | tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL); | ||
154 | if (tps65910 == NULL) | ||
155 | return -ENOMEM; | ||
156 | |||
157 | i2c_set_clientdata(i2c, tps65910); | ||
158 | tps65910->dev = &i2c->dev; | ||
159 | tps65910->i2c_client = i2c; | ||
160 | tps65910->id = id->driver_data; | ||
161 | tps65910->read = tps65910_i2c_read; | ||
162 | tps65910->write = tps65910_i2c_write; | ||
163 | mutex_init(&tps65910->io_mutex); | ||
164 | |||
165 | ret = mfd_add_devices(tps65910->dev, -1, | ||
166 | tps65910s, ARRAY_SIZE(tps65910s), | ||
167 | NULL, 0); | ||
168 | if (ret < 0) | ||
169 | goto err; | ||
170 | |||
171 | tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base); | ||
172 | |||
173 | ret = tps65910_irq_init(tps65910, init_data->irq, init_data); | ||
174 | if (ret < 0) | ||
175 | goto err; | ||
176 | |||
177 | return ret; | ||
178 | |||
179 | err: | ||
180 | mfd_remove_devices(tps65910->dev); | ||
181 | kfree(tps65910); | ||
182 | return ret; | ||
183 | } | ||
184 | |||
185 | static int tps65910_i2c_remove(struct i2c_client *i2c) | ||
186 | { | ||
187 | struct tps65910 *tps65910 = i2c_get_clientdata(i2c); | ||
188 | |||
189 | mfd_remove_devices(tps65910->dev); | ||
190 | kfree(tps65910); | ||
191 | |||
192 | return 0; | ||
193 | } | ||
194 | |||
195 | static const struct i2c_device_id tps65910_i2c_id[] = { | ||
196 | { "tps65910", TPS65910 }, | ||
197 | { "tps65911", TPS65911 }, | ||
198 | { } | ||
199 | }; | ||
200 | MODULE_DEVICE_TABLE(i2c, tps65910_i2c_id); | ||
201 | |||
202 | |||
203 | static struct i2c_driver tps65910_i2c_driver = { | ||
204 | .driver = { | ||
205 | .name = "tps65910", | ||
206 | .owner = THIS_MODULE, | ||
207 | }, | ||
208 | .probe = tps65910_i2c_probe, | ||
209 | .remove = tps65910_i2c_remove, | ||
210 | .id_table = tps65910_i2c_id, | ||
211 | }; | ||
212 | |||
213 | static int __init tps65910_i2c_init(void) | ||
214 | { | ||
215 | return i2c_add_driver(&tps65910_i2c_driver); | ||
216 | } | ||
217 | /* init early so consumer devices can complete system boot */ | ||
218 | subsys_initcall(tps65910_i2c_init); | ||
219 | |||
220 | static void __exit tps65910_i2c_exit(void) | ||
221 | { | ||
222 | i2c_del_driver(&tps65910_i2c_driver); | ||
223 | } | ||
224 | module_exit(tps65910_i2c_exit); | ||
225 | |||
226 | MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>"); | ||
227 | MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>"); | ||
228 | MODULE_DESCRIPTION("TPS6591x chip family multi-function driver"); | ||
229 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/mfd/tps65911-comparator.c b/drivers/mfd/tps65911-comparator.c new file mode 100644 index 000000000000..3d2dc56a3d40 --- /dev/null +++ b/drivers/mfd/tps65911-comparator.c | |||
@@ -0,0 +1,188 @@ | |||
1 | /* | ||
2 | * tps65910.c -- TI TPS6591x | ||
3 | * | ||
4 | * Copyright 2010 Texas Instruments Inc. | ||
5 | * | ||
6 | * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk> | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify it | ||
9 | * under the terms of the GNU General Public License as published by the | ||
10 | * Free Software Foundation; either version 2 of the License, or (at your | ||
11 | * option) any later version. | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | #include <linux/kernel.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/init.h> | ||
18 | #include <linux/slab.h> | ||
19 | #include <linux/err.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/debugfs.h> | ||
22 | #include <linux/gpio.h> | ||
23 | #include <linux/mfd/tps65910.h> | ||
24 | |||
25 | #define COMP 0 | ||
26 | #define COMP1 1 | ||
27 | #define COMP2 2 | ||
28 | |||
29 | /* Comparator 1 voltage selection table in milivolts */ | ||
30 | static const u16 COMP_VSEL_TABLE[] = { | ||
31 | 0, 2500, 2500, 2500, 2500, 2550, 2600, 2650, | ||
32 | 2700, 2750, 2800, 2850, 2900, 2950, 3000, 3050, | ||
33 | 3100, 3150, 3200, 3250, 3300, 3350, 3400, 3450, | ||
34 | 3500, | ||
35 | }; | ||
36 | |||
37 | struct comparator { | ||
38 | const char *name; | ||
39 | int reg; | ||
40 | int uV_max; | ||
41 | const u16 *vsel_table; | ||
42 | }; | ||
43 | |||
44 | static struct comparator tps_comparators[] = { | ||
45 | { | ||
46 | .name = "COMP1", | ||
47 | .reg = TPS65911_VMBCH, | ||
48 | .uV_max = 3500, | ||
49 | .vsel_table = COMP_VSEL_TABLE, | ||
50 | }, | ||
51 | { | ||
52 | .name = "COMP2", | ||
53 | .reg = TPS65911_VMBCH2, | ||
54 | .uV_max = 3500, | ||
55 | .vsel_table = COMP_VSEL_TABLE, | ||
56 | }, | ||
57 | }; | ||
58 | |||
59 | static int comp_threshold_set(struct tps65910 *tps65910, int id, int voltage) | ||
60 | { | ||
61 | struct comparator tps_comp = tps_comparators[id]; | ||
62 | int curr_voltage = 0; | ||
63 | int ret; | ||
64 | u8 index = 0, val; | ||
65 | |||
66 | if (id == COMP) | ||
67 | return 0; | ||
68 | |||
69 | while (curr_voltage < tps_comp.uV_max) { | ||
70 | curr_voltage = tps_comp.vsel_table[index]; | ||
71 | if (curr_voltage >= voltage) | ||
72 | break; | ||
73 | else if (curr_voltage < voltage) | ||
74 | index ++; | ||
75 | } | ||
76 | |||
77 | if (curr_voltage > tps_comp.uV_max) | ||
78 | return -EINVAL; | ||
79 | |||
80 | val = index << 1; | ||
81 | ret = tps65910->write(tps65910, tps_comp.reg, 1, &val); | ||
82 | |||
83 | return ret; | ||
84 | } | ||
85 | |||
86 | static int comp_threshold_get(struct tps65910 *tps65910, int id) | ||
87 | { | ||
88 | struct comparator tps_comp = tps_comparators[id]; | ||
89 | int ret; | ||
90 | u8 val; | ||
91 | |||
92 | if (id == COMP) | ||
93 | return 0; | ||
94 | |||
95 | ret = tps65910->read(tps65910, tps_comp.reg, 1, &val); | ||
96 | if (ret < 0) | ||
97 | return ret; | ||
98 | |||
99 | val >>= 1; | ||
100 | return tps_comp.vsel_table[val]; | ||
101 | } | ||
102 | |||
103 | static ssize_t comp_threshold_show(struct device *dev, | ||
104 | struct device_attribute *attr, char *buf) | ||
105 | { | ||
106 | struct tps65910 *tps65910 = dev_get_drvdata(dev->parent); | ||
107 | struct attribute comp_attr = attr->attr; | ||
108 | int id, uVolt; | ||
109 | |||
110 | if (!strcmp(comp_attr.name, "comp1_threshold")) | ||
111 | id = COMP1; | ||
112 | else if (!strcmp(comp_attr.name, "comp2_threshold")) | ||
113 | id = COMP2; | ||
114 | else | ||
115 | return -EINVAL; | ||
116 | |||
117 | uVolt = comp_threshold_get(tps65910, id); | ||
118 | |||
119 | return sprintf(buf, "%d\n", uVolt); | ||
120 | } | ||
121 | |||
122 | static DEVICE_ATTR(comp1_threshold, S_IRUGO, comp_threshold_show, NULL); | ||
123 | static DEVICE_ATTR(comp2_threshold, S_IRUGO, comp_threshold_show, NULL); | ||
124 | |||
125 | static __devinit int tps65911_comparator_probe(struct platform_device *pdev) | ||
126 | { | ||
127 | struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent); | ||
128 | struct tps65910_platform_data *pdata = dev_get_platdata(tps65910->dev); | ||
129 | int ret; | ||
130 | |||
131 | ret = comp_threshold_set(tps65910, COMP1, pdata->vmbch_threshold); | ||
132 | if (ret < 0) { | ||
133 | dev_err(&pdev->dev, "cannot set COMP1 threshold\n"); | ||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | ret = comp_threshold_set(tps65910, COMP2, pdata->vmbch2_threshold); | ||
138 | if (ret < 0) { | ||
139 | dev_err(&pdev->dev, "cannot set COMP2 theshold\n"); | ||
140 | return ret; | ||
141 | } | ||
142 | |||
143 | /* Create sysfs entry */ | ||
144 | ret = device_create_file(&pdev->dev, &dev_attr_comp1_threshold); | ||
145 | if (ret < 0) | ||
146 | dev_err(&pdev->dev, "failed to add COMP1 sysfs file\n"); | ||
147 | |||
148 | ret = device_create_file(&pdev->dev, &dev_attr_comp2_threshold); | ||
149 | if (ret < 0) | ||
150 | dev_err(&pdev->dev, "failed to add COMP2 sysfs file\n"); | ||
151 | |||
152 | return ret; | ||
153 | } | ||
154 | |||
155 | static __devexit int tps65911_comparator_remove(struct platform_device *pdev) | ||
156 | { | ||
157 | struct tps65910 *tps65910; | ||
158 | |||
159 | tps65910 = dev_get_drvdata(pdev->dev.parent); | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | static struct platform_driver tps65911_comparator_driver = { | ||
165 | .driver = { | ||
166 | .name = "tps65911-comparator", | ||
167 | .owner = THIS_MODULE, | ||
168 | }, | ||
169 | .probe = tps65911_comparator_probe, | ||
170 | .remove = __devexit_p(tps65911_comparator_remove), | ||
171 | }; | ||
172 | |||
173 | static int __init tps65911_comparator_init(void) | ||
174 | { | ||
175 | return platform_driver_register(&tps65911_comparator_driver); | ||
176 | } | ||
177 | subsys_initcall(tps65911_comparator_init); | ||
178 | |||
179 | static void __exit tps65911_comparator_exit(void) | ||
180 | { | ||
181 | platform_driver_unregister(&tps65911_comparator_driver); | ||
182 | } | ||
183 | module_exit(tps65911_comparator_exit); | ||
184 | |||
185 | MODULE_AUTHOR("Jorge Eduardo Candelaria <jedu@slimlogic.co.uk>"); | ||
186 | MODULE_DESCRIPTION("TPS65911 comparator driver"); | ||
187 | MODULE_LICENSE("GPL v2"); | ||
188 | MODULE_ALIAS("platform:tps65911-comparator"); | ||
diff --git a/drivers/mfd/twl-core.c b/drivers/mfd/twl-core.c index 960b5bed7f52..b8f2a4e7f6e7 100644 --- a/drivers/mfd/twl-core.c +++ b/drivers/mfd/twl-core.c | |||
@@ -198,6 +198,7 @@ | |||
198 | #define TWL6030_BASEADD_GASGAUGE 0x00C0 | 198 | #define TWL6030_BASEADD_GASGAUGE 0x00C0 |
199 | #define TWL6030_BASEADD_PIH 0x00D0 | 199 | #define TWL6030_BASEADD_PIH 0x00D0 |
200 | #define TWL6030_BASEADD_CHARGER 0x00E0 | 200 | #define TWL6030_BASEADD_CHARGER 0x00E0 |
201 | #define TWL6025_BASEADD_CHARGER 0x00DA | ||
201 | 202 | ||
202 | /* subchip/slave 2 0x4A - DFT */ | 203 | /* subchip/slave 2 0x4A - DFT */ |
203 | #define TWL6030_BASEADD_DIEID 0x00C0 | 204 | #define TWL6030_BASEADD_DIEID 0x00C0 |
@@ -229,6 +230,9 @@ | |||
229 | /* is driver active, bound to a chip? */ | 230 | /* is driver active, bound to a chip? */ |
230 | static bool inuse; | 231 | static bool inuse; |
231 | 232 | ||
233 | /* TWL IDCODE Register value */ | ||
234 | static u32 twl_idcode; | ||
235 | |||
232 | static unsigned int twl_id; | 236 | static unsigned int twl_id; |
233 | unsigned int twl_rev(void) | 237 | unsigned int twl_rev(void) |
234 | { | 238 | { |
@@ -328,6 +332,7 @@ static struct twl_mapping twl6030_map[] = { | |||
328 | 332 | ||
329 | { SUB_CHIP_ID0, TWL6030_BASEADD_RTC }, | 333 | { SUB_CHIP_ID0, TWL6030_BASEADD_RTC }, |
330 | { SUB_CHIP_ID0, TWL6030_BASEADD_MEM }, | 334 | { SUB_CHIP_ID0, TWL6030_BASEADD_MEM }, |
335 | { SUB_CHIP_ID1, TWL6025_BASEADD_CHARGER }, | ||
331 | }; | 336 | }; |
332 | 337 | ||
333 | /*----------------------------------------------------------------------*/ | 338 | /*----------------------------------------------------------------------*/ |
@@ -487,6 +492,58 @@ EXPORT_SYMBOL(twl_i2c_read_u8); | |||
487 | 492 | ||
488 | /*----------------------------------------------------------------------*/ | 493 | /*----------------------------------------------------------------------*/ |
489 | 494 | ||
495 | /** | ||
496 | * twl_read_idcode_register - API to read the IDCODE register. | ||
497 | * | ||
498 | * Unlocks the IDCODE register and read the 32 bit value. | ||
499 | */ | ||
500 | static int twl_read_idcode_register(void) | ||
501 | { | ||
502 | int err; | ||
503 | |||
504 | err = twl_i2c_write_u8(TWL4030_MODULE_INTBR, TWL_EEPROM_R_UNLOCK, | ||
505 | REG_UNLOCK_TEST_REG); | ||
506 | if (err) { | ||
507 | pr_err("TWL4030 Unable to unlock IDCODE registers -%d\n", err); | ||
508 | goto fail; | ||
509 | } | ||
510 | |||
511 | err = twl_i2c_read(TWL4030_MODULE_INTBR, (u8 *)(&twl_idcode), | ||
512 | REG_IDCODE_7_0, 4); | ||
513 | if (err) { | ||
514 | pr_err("TWL4030: unable to read IDCODE -%d\n", err); | ||
515 | goto fail; | ||
516 | } | ||
517 | |||
518 | err = twl_i2c_write_u8(TWL4030_MODULE_INTBR, 0x0, REG_UNLOCK_TEST_REG); | ||
519 | if (err) | ||
520 | pr_err("TWL4030 Unable to relock IDCODE registers -%d\n", err); | ||
521 | fail: | ||
522 | return err; | ||
523 | } | ||
524 | |||
525 | /** | ||
526 | * twl_get_type - API to get TWL Si type. | ||
527 | * | ||
528 | * Api to get the TWL Si type from IDCODE value. | ||
529 | */ | ||
530 | int twl_get_type(void) | ||
531 | { | ||
532 | return TWL_SIL_TYPE(twl_idcode); | ||
533 | } | ||
534 | EXPORT_SYMBOL_GPL(twl_get_type); | ||
535 | |||
536 | /** | ||
537 | * twl_get_version - API to get TWL Si version. | ||
538 | * | ||
539 | * Api to get the TWL Si version from IDCODE value. | ||
540 | */ | ||
541 | int twl_get_version(void) | ||
542 | { | ||
543 | return TWL_SIL_REV(twl_idcode); | ||
544 | } | ||
545 | EXPORT_SYMBOL_GPL(twl_get_version); | ||
546 | |||
490 | static struct device * | 547 | static struct device * |
491 | add_numbered_child(unsigned chip, const char *name, int num, | 548 | add_numbered_child(unsigned chip, const char *name, int num, |
492 | void *pdata, unsigned pdata_len, | 549 | void *pdata, unsigned pdata_len, |
@@ -549,7 +606,7 @@ static inline struct device *add_child(unsigned chip, const char *name, | |||
549 | static struct device * | 606 | static struct device * |
550 | add_regulator_linked(int num, struct regulator_init_data *pdata, | 607 | add_regulator_linked(int num, struct regulator_init_data *pdata, |
551 | struct regulator_consumer_supply *consumers, | 608 | struct regulator_consumer_supply *consumers, |
552 | unsigned num_consumers) | 609 | unsigned num_consumers, unsigned long features) |
553 | { | 610 | { |
554 | unsigned sub_chip_id; | 611 | unsigned sub_chip_id; |
555 | /* regulator framework demands init_data ... */ | 612 | /* regulator framework demands init_data ... */ |
@@ -561,6 +618,8 @@ add_regulator_linked(int num, struct regulator_init_data *pdata, | |||
561 | pdata->num_consumer_supplies = num_consumers; | 618 | pdata->num_consumer_supplies = num_consumers; |
562 | } | 619 | } |
563 | 620 | ||
621 | pdata->driver_data = (void *)features; | ||
622 | |||
564 | /* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */ | 623 | /* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */ |
565 | sub_chip_id = twl_map[TWL_MODULE_PM_MASTER].sid; | 624 | sub_chip_id = twl_map[TWL_MODULE_PM_MASTER].sid; |
566 | return add_numbered_child(sub_chip_id, "twl_reg", num, | 625 | return add_numbered_child(sub_chip_id, "twl_reg", num, |
@@ -568,9 +627,10 @@ add_regulator_linked(int num, struct regulator_init_data *pdata, | |||
568 | } | 627 | } |
569 | 628 | ||
570 | static struct device * | 629 | static struct device * |
571 | add_regulator(int num, struct regulator_init_data *pdata) | 630 | add_regulator(int num, struct regulator_init_data *pdata, |
631 | unsigned long features) | ||
572 | { | 632 | { |
573 | return add_regulator_linked(num, pdata, NULL, 0); | 633 | return add_regulator_linked(num, pdata, NULL, 0, features); |
574 | } | 634 | } |
575 | 635 | ||
576 | /* | 636 | /* |
@@ -650,17 +710,20 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) | |||
650 | }; | 710 | }; |
651 | 711 | ||
652 | child = add_regulator_linked(TWL4030_REG_VUSB1V5, | 712 | child = add_regulator_linked(TWL4030_REG_VUSB1V5, |
653 | &usb_fixed, &usb1v5, 1); | 713 | &usb_fixed, &usb1v5, 1, |
714 | features); | ||
654 | if (IS_ERR(child)) | 715 | if (IS_ERR(child)) |
655 | return PTR_ERR(child); | 716 | return PTR_ERR(child); |
656 | 717 | ||
657 | child = add_regulator_linked(TWL4030_REG_VUSB1V8, | 718 | child = add_regulator_linked(TWL4030_REG_VUSB1V8, |
658 | &usb_fixed, &usb1v8, 1); | 719 | &usb_fixed, &usb1v8, 1, |
720 | features); | ||
659 | if (IS_ERR(child)) | 721 | if (IS_ERR(child)) |
660 | return PTR_ERR(child); | 722 | return PTR_ERR(child); |
661 | 723 | ||
662 | child = add_regulator_linked(TWL4030_REG_VUSB3V1, | 724 | child = add_regulator_linked(TWL4030_REG_VUSB3V1, |
663 | &usb_fixed, &usb3v1, 1); | 725 | &usb_fixed, &usb3v1, 1, |
726 | features); | ||
664 | if (IS_ERR(child)) | 727 | if (IS_ERR(child)) |
665 | return PTR_ERR(child); | 728 | return PTR_ERR(child); |
666 | 729 | ||
@@ -685,9 +748,8 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) | |||
685 | } | 748 | } |
686 | if (twl_has_usb() && pdata->usb && twl_class_is_6030()) { | 749 | if (twl_has_usb() && pdata->usb && twl_class_is_6030()) { |
687 | 750 | ||
688 | static struct regulator_consumer_supply usb3v3 = { | 751 | static struct regulator_consumer_supply usb3v3; |
689 | .supply = "vusb", | 752 | int regulator; |
690 | }; | ||
691 | 753 | ||
692 | if (twl_has_regulator()) { | 754 | if (twl_has_regulator()) { |
693 | /* this is a template that gets copied */ | 755 | /* this is a template that gets copied */ |
@@ -700,12 +762,22 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) | |||
700 | | REGULATOR_CHANGE_STATUS, | 762 | | REGULATOR_CHANGE_STATUS, |
701 | }; | 763 | }; |
702 | 764 | ||
703 | child = add_regulator_linked(TWL6030_REG_VUSB, | 765 | if (features & TWL6025_SUBCLASS) { |
704 | &usb_fixed, &usb3v3, 1); | 766 | usb3v3.supply = "ldousb"; |
767 | regulator = TWL6025_REG_LDOUSB; | ||
768 | } else { | ||
769 | usb3v3.supply = "vusb"; | ||
770 | regulator = TWL6030_REG_VUSB; | ||
771 | } | ||
772 | child = add_regulator_linked(regulator, &usb_fixed, | ||
773 | &usb3v3, 1, | ||
774 | features); | ||
705 | if (IS_ERR(child)) | 775 | if (IS_ERR(child)) |
706 | return PTR_ERR(child); | 776 | return PTR_ERR(child); |
707 | } | 777 | } |
708 | 778 | ||
779 | pdata->usb->features = features; | ||
780 | |||
709 | child = add_child(0, "twl6030_usb", | 781 | child = add_child(0, "twl6030_usb", |
710 | pdata->usb, sizeof(*pdata->usb), | 782 | pdata->usb, sizeof(*pdata->usb), |
711 | true, | 783 | true, |
@@ -718,7 +790,16 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) | |||
718 | /* we need to connect regulators to this transceiver */ | 790 | /* we need to connect regulators to this transceiver */ |
719 | if (twl_has_regulator() && child) | 791 | if (twl_has_regulator() && child) |
720 | usb3v3.dev = child; | 792 | usb3v3.dev = child; |
793 | } else if (twl_has_regulator() && twl_class_is_6030()) { | ||
794 | if (features & TWL6025_SUBCLASS) | ||
795 | child = add_regulator(TWL6025_REG_LDOUSB, | ||
796 | pdata->ldousb, features); | ||
797 | else | ||
798 | child = add_regulator(TWL6030_REG_VUSB, | ||
799 | pdata->vusb, features); | ||
721 | 800 | ||
801 | if (IS_ERR(child)) | ||
802 | return PTR_ERR(child); | ||
722 | } | 803 | } |
723 | 804 | ||
724 | if (twl_has_watchdog() && twl_class_is_4030()) { | 805 | if (twl_has_watchdog() && twl_class_is_4030()) { |
@@ -755,46 +836,55 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) | |||
755 | 836 | ||
756 | /* twl4030 regulators */ | 837 | /* twl4030 regulators */ |
757 | if (twl_has_regulator() && twl_class_is_4030()) { | 838 | if (twl_has_regulator() && twl_class_is_4030()) { |
758 | child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1); | 839 | child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1, |
840 | features); | ||
759 | if (IS_ERR(child)) | 841 | if (IS_ERR(child)) |
760 | return PTR_ERR(child); | 842 | return PTR_ERR(child); |
761 | 843 | ||
762 | child = add_regulator(TWL4030_REG_VIO, pdata->vio); | 844 | child = add_regulator(TWL4030_REG_VIO, pdata->vio, |
845 | features); | ||
763 | if (IS_ERR(child)) | 846 | if (IS_ERR(child)) |
764 | return PTR_ERR(child); | 847 | return PTR_ERR(child); |
765 | 848 | ||
766 | child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1); | 849 | child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1, |
850 | features); | ||
767 | if (IS_ERR(child)) | 851 | if (IS_ERR(child)) |
768 | return PTR_ERR(child); | 852 | return PTR_ERR(child); |
769 | 853 | ||
770 | child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2); | 854 | child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2, |
855 | features); | ||
771 | if (IS_ERR(child)) | 856 | if (IS_ERR(child)) |
772 | return PTR_ERR(child); | 857 | return PTR_ERR(child); |
773 | 858 | ||
774 | child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1); | 859 | child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1, |
860 | features); | ||
775 | if (IS_ERR(child)) | 861 | if (IS_ERR(child)) |
776 | return PTR_ERR(child); | 862 | return PTR_ERR(child); |
777 | 863 | ||
778 | child = add_regulator(TWL4030_REG_VDAC, pdata->vdac); | 864 | child = add_regulator(TWL4030_REG_VDAC, pdata->vdac, |
865 | features); | ||
779 | if (IS_ERR(child)) | 866 | if (IS_ERR(child)) |
780 | return PTR_ERR(child); | 867 | return PTR_ERR(child); |
781 | 868 | ||
782 | child = add_regulator((features & TWL4030_VAUX2) | 869 | child = add_regulator((features & TWL4030_VAUX2) |
783 | ? TWL4030_REG_VAUX2_4030 | 870 | ? TWL4030_REG_VAUX2_4030 |
784 | : TWL4030_REG_VAUX2, | 871 | : TWL4030_REG_VAUX2, |
785 | pdata->vaux2); | 872 | pdata->vaux2, features); |
786 | if (IS_ERR(child)) | 873 | if (IS_ERR(child)) |
787 | return PTR_ERR(child); | 874 | return PTR_ERR(child); |
788 | 875 | ||
789 | child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1); | 876 | child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1, |
877 | features); | ||
790 | if (IS_ERR(child)) | 878 | if (IS_ERR(child)) |
791 | return PTR_ERR(child); | 879 | return PTR_ERR(child); |
792 | 880 | ||
793 | child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2); | 881 | child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2, |
882 | features); | ||
794 | if (IS_ERR(child)) | 883 | if (IS_ERR(child)) |
795 | return PTR_ERR(child); | 884 | return PTR_ERR(child); |
796 | 885 | ||
797 | child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig); | 886 | child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig, |
887 | features); | ||
798 | if (IS_ERR(child)) | 888 | if (IS_ERR(child)) |
799 | return PTR_ERR(child); | 889 | return PTR_ERR(child); |
800 | } | 890 | } |
@@ -802,72 +892,152 @@ add_children(struct twl4030_platform_data *pdata, unsigned long features) | |||
802 | /* maybe add LDOs that are omitted on cost-reduced parts */ | 892 | /* maybe add LDOs that are omitted on cost-reduced parts */ |
803 | if (twl_has_regulator() && !(features & TPS_SUBSET) | 893 | if (twl_has_regulator() && !(features & TPS_SUBSET) |
804 | && twl_class_is_4030()) { | 894 | && twl_class_is_4030()) { |
805 | child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2); | 895 | child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2, |
896 | features); | ||
806 | if (IS_ERR(child)) | 897 | if (IS_ERR(child)) |
807 | return PTR_ERR(child); | 898 | return PTR_ERR(child); |
808 | 899 | ||
809 | child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2); | 900 | child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2, |
901 | features); | ||
810 | if (IS_ERR(child)) | 902 | if (IS_ERR(child)) |
811 | return PTR_ERR(child); | 903 | return PTR_ERR(child); |
812 | 904 | ||
813 | child = add_regulator(TWL4030_REG_VSIM, pdata->vsim); | 905 | child = add_regulator(TWL4030_REG_VSIM, pdata->vsim, |
906 | features); | ||
814 | if (IS_ERR(child)) | 907 | if (IS_ERR(child)) |
815 | return PTR_ERR(child); | 908 | return PTR_ERR(child); |
816 | 909 | ||
817 | child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1); | 910 | child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1, |
911 | features); | ||
818 | if (IS_ERR(child)) | 912 | if (IS_ERR(child)) |
819 | return PTR_ERR(child); | 913 | return PTR_ERR(child); |
820 | 914 | ||
821 | child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3); | 915 | child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3, |
916 | features); | ||
822 | if (IS_ERR(child)) | 917 | if (IS_ERR(child)) |
823 | return PTR_ERR(child); | 918 | return PTR_ERR(child); |
824 | 919 | ||
825 | child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4); | 920 | child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4, |
921 | features); | ||
826 | if (IS_ERR(child)) | 922 | if (IS_ERR(child)) |
827 | return PTR_ERR(child); | 923 | return PTR_ERR(child); |
828 | } | 924 | } |
829 | 925 | ||
830 | /* twl6030 regulators */ | 926 | /* twl6030 regulators */ |
927 | if (twl_has_regulator() && twl_class_is_6030() && | ||
928 | !(features & TWL6025_SUBCLASS)) { | ||
929 | child = add_regulator(TWL6030_REG_VMMC, pdata->vmmc, | ||
930 | features); | ||
931 | if (IS_ERR(child)) | ||
932 | return PTR_ERR(child); | ||
933 | |||
934 | child = add_regulator(TWL6030_REG_VPP, pdata->vpp, | ||
935 | features); | ||
936 | if (IS_ERR(child)) | ||
937 | return PTR_ERR(child); | ||
938 | |||
939 | child = add_regulator(TWL6030_REG_VUSIM, pdata->vusim, | ||
940 | features); | ||
941 | if (IS_ERR(child)) | ||
942 | return PTR_ERR(child); | ||
943 | |||
944 | child = add_regulator(TWL6030_REG_VCXIO, pdata->vcxio, | ||
945 | features); | ||
946 | if (IS_ERR(child)) | ||
947 | return PTR_ERR(child); | ||
948 | |||
949 | child = add_regulator(TWL6030_REG_VDAC, pdata->vdac, | ||
950 | features); | ||
951 | if (IS_ERR(child)) | ||
952 | return PTR_ERR(child); | ||
953 | |||
954 | child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1, | ||
955 | features); | ||
956 | if (IS_ERR(child)) | ||
957 | return PTR_ERR(child); | ||
958 | |||
959 | child = add_regulator(TWL6030_REG_VAUX2_6030, pdata->vaux2, | ||
960 | features); | ||
961 | if (IS_ERR(child)) | ||
962 | return PTR_ERR(child); | ||
963 | |||
964 | child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3, | ||
965 | features); | ||
966 | if (IS_ERR(child)) | ||
967 | return PTR_ERR(child); | ||
968 | |||
969 | child = add_regulator(TWL6030_REG_CLK32KG, pdata->clk32kg, | ||
970 | features); | ||
971 | if (IS_ERR(child)) | ||
972 | return PTR_ERR(child); | ||
973 | } | ||
974 | |||
975 | /* 6030 and 6025 share this regulator */ | ||
831 | if (twl_has_regulator() && twl_class_is_6030()) { | 976 | if (twl_has_regulator() && twl_class_is_6030()) { |
832 | child = add_regulator(TWL6030_REG_VMMC, pdata->vmmc); | 977 | child = add_regulator(TWL6030_REG_VANA, pdata->vana, |
978 | features); | ||
833 | if (IS_ERR(child)) | 979 | if (IS_ERR(child)) |
834 | return PTR_ERR(child); | 980 | return PTR_ERR(child); |
981 | } | ||
835 | 982 | ||
836 | child = add_regulator(TWL6030_REG_VPP, pdata->vpp); | 983 | /* twl6025 regulators */ |
984 | if (twl_has_regulator() && twl_class_is_6030() && | ||
985 | (features & TWL6025_SUBCLASS)) { | ||
986 | child = add_regulator(TWL6025_REG_LDO5, pdata->ldo5, | ||
987 | features); | ||
837 | if (IS_ERR(child)) | 988 | if (IS_ERR(child)) |
838 | return PTR_ERR(child); | 989 | return PTR_ERR(child); |
839 | 990 | ||
840 | child = add_regulator(TWL6030_REG_VUSIM, pdata->vusim); | 991 | child = add_regulator(TWL6025_REG_LDO1, pdata->ldo1, |
992 | features); | ||
841 | if (IS_ERR(child)) | 993 | if (IS_ERR(child)) |
842 | return PTR_ERR(child); | 994 | return PTR_ERR(child); |
843 | 995 | ||
844 | child = add_regulator(TWL6030_REG_VANA, pdata->vana); | 996 | child = add_regulator(TWL6025_REG_LDO7, pdata->ldo7, |
997 | features); | ||
845 | if (IS_ERR(child)) | 998 | if (IS_ERR(child)) |
846 | return PTR_ERR(child); | 999 | return PTR_ERR(child); |
847 | 1000 | ||
848 | child = add_regulator(TWL6030_REG_VCXIO, pdata->vcxio); | 1001 | child = add_regulator(TWL6025_REG_LDO6, pdata->ldo6, |
1002 | features); | ||
849 | if (IS_ERR(child)) | 1003 | if (IS_ERR(child)) |
850 | return PTR_ERR(child); | 1004 | return PTR_ERR(child); |
851 | 1005 | ||
852 | child = add_regulator(TWL6030_REG_VDAC, pdata->vdac); | 1006 | child = add_regulator(TWL6025_REG_LDOLN, pdata->ldoln, |
1007 | features); | ||
853 | if (IS_ERR(child)) | 1008 | if (IS_ERR(child)) |
854 | return PTR_ERR(child); | 1009 | return PTR_ERR(child); |
855 | 1010 | ||
856 | child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1); | 1011 | child = add_regulator(TWL6025_REG_LDO2, pdata->ldo2, |
1012 | features); | ||
857 | if (IS_ERR(child)) | 1013 | if (IS_ERR(child)) |
858 | return PTR_ERR(child); | 1014 | return PTR_ERR(child); |
859 | 1015 | ||
860 | child = add_regulator(TWL6030_REG_VAUX2_6030, pdata->vaux2); | 1016 | child = add_regulator(TWL6025_REG_LDO4, pdata->ldo4, |
1017 | features); | ||
861 | if (IS_ERR(child)) | 1018 | if (IS_ERR(child)) |
862 | return PTR_ERR(child); | 1019 | return PTR_ERR(child); |
863 | 1020 | ||
864 | child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3); | 1021 | child = add_regulator(TWL6025_REG_LDO3, pdata->ldo3, |
1022 | features); | ||
865 | if (IS_ERR(child)) | 1023 | if (IS_ERR(child)) |
866 | return PTR_ERR(child); | 1024 | return PTR_ERR(child); |
867 | 1025 | ||
868 | child = add_regulator(TWL6030_REG_CLK32KG, pdata->clk32kg); | 1026 | child = add_regulator(TWL6025_REG_SMPS3, pdata->smps3, |
1027 | features); | ||
869 | if (IS_ERR(child)) | 1028 | if (IS_ERR(child)) |
870 | return PTR_ERR(child); | 1029 | return PTR_ERR(child); |
1030 | |||
1031 | child = add_regulator(TWL6025_REG_SMPS4, pdata->smps4, | ||
1032 | features); | ||
1033 | if (IS_ERR(child)) | ||
1034 | return PTR_ERR(child); | ||
1035 | |||
1036 | child = add_regulator(TWL6025_REG_VIO, pdata->vio6025, | ||
1037 | features); | ||
1038 | if (IS_ERR(child)) | ||
1039 | return PTR_ERR(child); | ||
1040 | |||
871 | } | 1041 | } |
872 | 1042 | ||
873 | if (twl_has_bci() && pdata->bci && | 1043 | if (twl_has_bci() && pdata->bci && |
@@ -1014,6 +1184,7 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
1014 | unsigned i; | 1184 | unsigned i; |
1015 | struct twl4030_platform_data *pdata = client->dev.platform_data; | 1185 | struct twl4030_platform_data *pdata = client->dev.platform_data; |
1016 | u8 temp; | 1186 | u8 temp; |
1187 | int ret = 0; | ||
1017 | 1188 | ||
1018 | if (!pdata) { | 1189 | if (!pdata) { |
1019 | dev_dbg(&client->dev, "no platform data?\n"); | 1190 | dev_dbg(&client->dev, "no platform data?\n"); |
@@ -1060,6 +1231,12 @@ twl_probe(struct i2c_client *client, const struct i2c_device_id *id) | |||
1060 | /* setup clock framework */ | 1231 | /* setup clock framework */ |
1061 | clocks_init(&client->dev, pdata->clock); | 1232 | clocks_init(&client->dev, pdata->clock); |
1062 | 1233 | ||
1234 | /* read TWL IDCODE Register */ | ||
1235 | if (twl_id == TWL4030_CLASS_ID) { | ||
1236 | ret = twl_read_idcode_register(); | ||
1237 | WARN(ret < 0, "Error: reading twl_idcode register value\n"); | ||
1238 | } | ||
1239 | |||
1063 | /* load power event scripts */ | 1240 | /* load power event scripts */ |
1064 | if (twl_has_power() && pdata->power) | 1241 | if (twl_has_power() && pdata->power) |
1065 | twl4030_power_init(pdata->power); | 1242 | twl4030_power_init(pdata->power); |
@@ -1108,6 +1285,7 @@ static const struct i2c_device_id twl_ids[] = { | |||
1108 | { "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */ | 1285 | { "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */ |
1109 | { "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */ | 1286 | { "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */ |
1110 | { "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */ | 1287 | { "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */ |
1288 | { "twl6025", TWL6030_CLASS | TWL6025_SUBCLASS }, /* "Phoenix lite" */ | ||
1111 | { /* end of list */ }, | 1289 | { /* end of list */ }, |
1112 | }; | 1290 | }; |
1113 | MODULE_DEVICE_TABLE(i2c, twl_ids); | 1291 | MODULE_DEVICE_TABLE(i2c, twl_ids); |
diff --git a/drivers/mfd/twl4030-codec.c b/drivers/mfd/twl4030-codec.c index c02fded316c9..2bf4136464c1 100644 --- a/drivers/mfd/twl4030-codec.c +++ b/drivers/mfd/twl4030-codec.c | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * MFD driver for twl4030 codec submodule | 2 | * MFD driver for twl4030 codec submodule |
3 | * | 3 | * |
4 | * Author: Peter Ujfalusi <peter.ujfalusi@nokia.com> | 4 | * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> |
5 | * | 5 | * |
6 | * Copyright: (C) 2009 Nokia Corporation | 6 | * Copyright: (C) 2009 Nokia Corporation |
7 | * | 7 | * |
@@ -208,13 +208,15 @@ static int __devinit twl4030_codec_probe(struct platform_device *pdev) | |||
208 | if (pdata->audio) { | 208 | if (pdata->audio) { |
209 | cell = &codec->cells[childs]; | 209 | cell = &codec->cells[childs]; |
210 | cell->name = "twl4030-codec"; | 210 | cell->name = "twl4030-codec"; |
211 | cell->mfd_data = pdata->audio; | 211 | cell->platform_data = pdata->audio; |
212 | cell->pdata_size = sizeof(*pdata->audio); | ||
212 | childs++; | 213 | childs++; |
213 | } | 214 | } |
214 | if (pdata->vibra) { | 215 | if (pdata->vibra) { |
215 | cell = &codec->cells[childs]; | 216 | cell = &codec->cells[childs]; |
216 | cell->name = "twl4030-vibra"; | 217 | cell->name = "twl4030-vibra"; |
217 | cell->mfd_data = pdata->vibra; | 218 | cell->platform_data = pdata->vibra; |
219 | cell->pdata_size = sizeof(*pdata->vibra); | ||
218 | childs++; | 220 | childs++; |
219 | } | 221 | } |
220 | 222 | ||
@@ -270,6 +272,6 @@ static void __devexit twl4030_codec_exit(void) | |||
270 | } | 272 | } |
271 | module_exit(twl4030_codec_exit); | 273 | module_exit(twl4030_codec_exit); |
272 | 274 | ||
273 | MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@nokia.com>"); | 275 | MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>"); |
274 | MODULE_LICENSE("GPL"); | 276 | MODULE_LICENSE("GPL"); |
275 | 277 | ||
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c index 2c0d4d16491a..a764676f0922 100644 --- a/drivers/mfd/twl4030-power.c +++ b/drivers/mfd/twl4030-power.c | |||
@@ -120,7 +120,7 @@ static u8 res_config_addrs[] = { | |||
120 | [RES_HFCLKOUT] = 0x8b, | 120 | [RES_HFCLKOUT] = 0x8b, |
121 | [RES_32KCLKOUT] = 0x8e, | 121 | [RES_32KCLKOUT] = 0x8e, |
122 | [RES_RESET] = 0x91, | 122 | [RES_RESET] = 0x91, |
123 | [RES_Main_Ref] = 0x94, | 123 | [RES_MAIN_REF] = 0x94, |
124 | }; | 124 | }; |
125 | 125 | ||
126 | static int __init twl4030_write_script_byte(u8 address, u8 byte) | 126 | static int __init twl4030_write_script_byte(u8 address, u8 byte) |
@@ -448,7 +448,7 @@ static int __init load_twl4030_script(struct twl4030_script *tscript, | |||
448 | goto out; | 448 | goto out; |
449 | } | 449 | } |
450 | if (tscript->flags & TWL4030_SLEEP_SCRIPT) { | 450 | if (tscript->flags & TWL4030_SLEEP_SCRIPT) { |
451 | if (order) | 451 | if (!order) |
452 | pr_warning("TWL4030: Bad order of scripts (sleep "\ | 452 | pr_warning("TWL4030: Bad order of scripts (sleep "\ |
453 | "script before wakeup) Leads to boot"\ | 453 | "script before wakeup) Leads to boot"\ |
454 | "failure on some boards\n"); | 454 | "failure on some boards\n"); |
@@ -485,9 +485,9 @@ int twl4030_remove_script(u8 flags) | |||
485 | return err; | 485 | return err; |
486 | } | 486 | } |
487 | if (flags & TWL4030_WAKEUP12_SCRIPT) { | 487 | if (flags & TWL4030_WAKEUP12_SCRIPT) { |
488 | if (err) | ||
489 | err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT, | 488 | err = twl_i2c_write_u8(TWL4030_MODULE_PM_MASTER, END_OF_SCRIPT, |
490 | R_SEQ_ADD_S2A12); | 489 | R_SEQ_ADD_S2A12); |
490 | if (err) | ||
491 | return err; | 491 | return err; |
492 | } | 492 | } |
493 | if (flags & TWL4030_WAKEUP3_SCRIPT) { | 493 | if (flags & TWL4030_WAKEUP3_SCRIPT) { |
diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c index dfbae34e1804..eb3b5f88e566 100644 --- a/drivers/mfd/twl6030-irq.c +++ b/drivers/mfd/twl6030-irq.c | |||
@@ -76,8 +76,8 @@ static int twl6030_interrupt_mapping[24] = { | |||
76 | USBOTG_INTR_OFFSET, /* Bit 18 ID */ | 76 | USBOTG_INTR_OFFSET, /* Bit 18 ID */ |
77 | USB_PRES_INTR_OFFSET, /* Bit 19 VBUS */ | 77 | USB_PRES_INTR_OFFSET, /* Bit 19 VBUS */ |
78 | CHARGER_INTR_OFFSET, /* Bit 20 CHRG_CTRL */ | 78 | CHARGER_INTR_OFFSET, /* Bit 20 CHRG_CTRL */ |
79 | CHARGER_INTR_OFFSET, /* Bit 21 EXT_CHRG */ | 79 | CHARGERFAULT_INTR_OFFSET, /* Bit 21 EXT_CHRG */ |
80 | CHARGER_INTR_OFFSET, /* Bit 22 INT_CHRG */ | 80 | CHARGERFAULT_INTR_OFFSET, /* Bit 22 INT_CHRG */ |
81 | RSV_INTR_OFFSET, /* Bit 23 Reserved */ | 81 | RSV_INTR_OFFSET, /* Bit 23 Reserved */ |
82 | }; | 82 | }; |
83 | /*----------------------------------------------------------------------*/ | 83 | /*----------------------------------------------------------------------*/ |
diff --git a/drivers/mfd/wl1273-core.c b/drivers/mfd/wl1273-core.c index 04914f2836c0..d97a86945174 100644 --- a/drivers/mfd/wl1273-core.c +++ b/drivers/mfd/wl1273-core.c | |||
@@ -153,7 +153,6 @@ out: | |||
153 | */ | 153 | */ |
154 | static int wl1273_fm_set_volume(struct wl1273_core *core, unsigned int volume) | 154 | static int wl1273_fm_set_volume(struct wl1273_core *core, unsigned int volume) |
155 | { | 155 | { |
156 | u16 val; | ||
157 | int r; | 156 | int r; |
158 | 157 | ||
159 | if (volume > WL1273_MAX_VOLUME) | 158 | if (volume > WL1273_MAX_VOLUME) |
@@ -217,7 +216,8 @@ static int __devinit wl1273_core_probe(struct i2c_client *client, | |||
217 | 216 | ||
218 | cell = &core->cells[children]; | 217 | cell = &core->cells[children]; |
219 | cell->name = "wl1273_fm_radio"; | 218 | cell->name = "wl1273_fm_radio"; |
220 | cell->mfd_data = &core; | 219 | cell->platform_data = &core; |
220 | cell->pdata_size = sizeof(core); | ||
221 | children++; | 221 | children++; |
222 | 222 | ||
223 | core->read = wl1273_fm_read_reg; | 223 | core->read = wl1273_fm_read_reg; |
@@ -231,7 +231,8 @@ static int __devinit wl1273_core_probe(struct i2c_client *client, | |||
231 | 231 | ||
232 | dev_dbg(&client->dev, "%s: Have codec.\n", __func__); | 232 | dev_dbg(&client->dev, "%s: Have codec.\n", __func__); |
233 | cell->name = "wl1273-codec"; | 233 | cell->name = "wl1273-codec"; |
234 | cell->mfd_data = &core; | 234 | cell->platform_data = &core; |
235 | cell->pdata_size = sizeof(core); | ||
235 | children++; | 236 | children++; |
236 | } | 237 | } |
237 | 238 | ||
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c index 3fe9a58fe6c7..265f75fc6a25 100644 --- a/drivers/mfd/wm831x-core.c +++ b/drivers/mfd/wm831x-core.c | |||
@@ -1442,7 +1442,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) | |||
1442 | struct wm831x_pdata *pdata = wm831x->dev->platform_data; | 1442 | struct wm831x_pdata *pdata = wm831x->dev->platform_data; |
1443 | int rev; | 1443 | int rev; |
1444 | enum wm831x_parent parent; | 1444 | enum wm831x_parent parent; |
1445 | int ret; | 1445 | int ret, i; |
1446 | 1446 | ||
1447 | mutex_init(&wm831x->io_lock); | 1447 | mutex_init(&wm831x->io_lock); |
1448 | mutex_init(&wm831x->key_lock); | 1448 | mutex_init(&wm831x->key_lock); |
@@ -1581,6 +1581,17 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq) | |||
1581 | } | 1581 | } |
1582 | } | 1582 | } |
1583 | 1583 | ||
1584 | if (pdata) { | ||
1585 | for (i = 0; i < ARRAY_SIZE(pdata->gpio_defaults); i++) { | ||
1586 | if (!pdata->gpio_defaults[i]) | ||
1587 | continue; | ||
1588 | |||
1589 | wm831x_reg_write(wm831x, | ||
1590 | WM831X_GPIO1_CONTROL + i, | ||
1591 | pdata->gpio_defaults[i] & 0xffff); | ||
1592 | } | ||
1593 | } | ||
1594 | |||
1584 | ret = wm831x_irq_init(wm831x, irq); | 1595 | ret = wm831x_irq_init(wm831x, irq); |
1585 | if (ret != 0) | 1596 | if (ret != 0) |
1586 | goto err; | 1597 | goto err; |
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c index 23e66af89dea..42b928ec891e 100644 --- a/drivers/mfd/wm831x-irq.c +++ b/drivers/mfd/wm831x-irq.c | |||
@@ -515,12 +515,6 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq) | |||
515 | 0xffff); | 515 | 0xffff); |
516 | } | 516 | } |
517 | 517 | ||
518 | if (!irq) { | ||
519 | dev_warn(wm831x->dev, | ||
520 | "No interrupt specified - functionality limited\n"); | ||
521 | return 0; | ||
522 | } | ||
523 | |||
524 | if (!pdata || !pdata->irq_base) { | 518 | if (!pdata || !pdata->irq_base) { |
525 | dev_err(wm831x->dev, | 519 | dev_err(wm831x->dev, |
526 | "No interrupt base specified, no interrupts\n"); | 520 | "No interrupt base specified, no interrupts\n"); |
@@ -567,15 +561,22 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq) | |||
567 | #endif | 561 | #endif |
568 | } | 562 | } |
569 | 563 | ||
570 | ret = request_threaded_irq(irq, NULL, wm831x_irq_thread, | 564 | if (irq) { |
571 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, | 565 | ret = request_threaded_irq(irq, NULL, wm831x_irq_thread, |
572 | "wm831x", wm831x); | 566 | IRQF_TRIGGER_LOW | IRQF_ONESHOT, |
573 | if (ret != 0) { | 567 | "wm831x", wm831x); |
574 | dev_err(wm831x->dev, "Failed to request IRQ %d: %d\n", | 568 | if (ret != 0) { |
575 | irq, ret); | 569 | dev_err(wm831x->dev, "Failed to request IRQ %d: %d\n", |
576 | return ret; | 570 | irq, ret); |
571 | return ret; | ||
572 | } | ||
573 | } else { | ||
574 | dev_warn(wm831x->dev, | ||
575 | "No interrupt specified - functionality limited\n"); | ||
577 | } | 576 | } |
578 | 577 | ||
578 | |||
579 | |||
579 | /* Enable top level interrupts, we mask at secondary level */ | 580 | /* Enable top level interrupts, we mask at secondary level */ |
580 | wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0); | 581 | wm831x_reg_write(wm831x, WM831X_SYSTEM_INTERRUPTS_MASK, 0); |
581 | 582 | ||
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c index 3a6e78cb0384..597f82edacaa 100644 --- a/drivers/mfd/wm8400-core.c +++ b/drivers/mfd/wm8400-core.c | |||
@@ -245,7 +245,8 @@ static int wm8400_register_codec(struct wm8400 *wm8400) | |||
245 | { | 245 | { |
246 | struct mfd_cell cell = { | 246 | struct mfd_cell cell = { |
247 | .name = "wm8400-codec", | 247 | .name = "wm8400-codec", |
248 | .mfd_data = wm8400, | 248 | .platform_data = wm8400, |
249 | .pdata_size = sizeof(*wm8400), | ||
249 | }; | 250 | }; |
250 | 251 | ||
251 | return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0); | 252 | return mfd_add_devices(wm8400->dev, -1, &cell, 1, NULL, 0); |
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c index 74f16f167b8e..b0c56313dbbb 100644 --- a/drivers/misc/kgdbts.c +++ b/drivers/misc/kgdbts.c | |||
@@ -285,33 +285,26 @@ static void hw_break_val_write(void) | |||
285 | static int check_and_rewind_pc(char *put_str, char *arg) | 285 | static int check_and_rewind_pc(char *put_str, char *arg) |
286 | { | 286 | { |
287 | unsigned long addr = lookup_addr(arg); | 287 | unsigned long addr = lookup_addr(arg); |
288 | unsigned long ip; | ||
288 | int offset = 0; | 289 | int offset = 0; |
289 | 290 | ||
290 | kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, | 291 | kgdb_hex2mem(&put_str[1], (char *)kgdbts_gdb_regs, |
291 | NUMREGBYTES); | 292 | NUMREGBYTES); |
292 | gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); | 293 | gdb_regs_to_pt_regs(kgdbts_gdb_regs, &kgdbts_regs); |
293 | v2printk("Stopped at IP: %lx\n", instruction_pointer(&kgdbts_regs)); | 294 | ip = instruction_pointer(&kgdbts_regs); |
294 | #ifdef CONFIG_X86 | 295 | v2printk("Stopped at IP: %lx\n", ip); |
295 | /* On x86 a breakpoint stop requires it to be decremented */ | 296 | #ifdef GDB_ADJUSTS_BREAK_OFFSET |
296 | if (addr + 1 == kgdbts_regs.ip) | 297 | /* On some arches, a breakpoint stop requires it to be decremented */ |
297 | offset = -1; | 298 | if (addr + BREAK_INSTR_SIZE == ip) |
298 | #elif defined(CONFIG_SUPERH) | 299 | offset = -BREAK_INSTR_SIZE; |
299 | /* On SUPERH a breakpoint stop requires it to be decremented */ | ||
300 | if (addr + 2 == kgdbts_regs.pc) | ||
301 | offset = -2; | ||
302 | #endif | 300 | #endif |
303 | if (strcmp(arg, "silent") && | 301 | if (strcmp(arg, "silent") && ip + offset != addr) { |
304 | instruction_pointer(&kgdbts_regs) + offset != addr) { | ||
305 | eprintk("kgdbts: BP mismatch %lx expected %lx\n", | 302 | eprintk("kgdbts: BP mismatch %lx expected %lx\n", |
306 | instruction_pointer(&kgdbts_regs) + offset, addr); | 303 | ip + offset, addr); |
307 | return 1; | 304 | return 1; |
308 | } | 305 | } |
309 | #ifdef CONFIG_X86 | 306 | /* Readjust the instruction pointer if needed */ |
310 | /* On x86 adjust the instruction pointer if needed */ | 307 | instruction_pointer_set(&kgdbts_regs, ip + offset); |
311 | kgdbts_regs.ip += offset; | ||
312 | #elif defined(CONFIG_SUPERH) | ||
313 | kgdbts_regs.pc += offset; | ||
314 | #endif | ||
315 | return 0; | 308 | return 0; |
316 | } | 309 | } |
317 | 310 | ||
diff --git a/drivers/mmc/host/tmio_mmc.c b/drivers/mmc/host/tmio_mmc.c index 14479f9ef53f..8d185de90d20 100644 --- a/drivers/mmc/host/tmio_mmc.c +++ b/drivers/mmc/host/tmio_mmc.c | |||
@@ -69,7 +69,7 @@ static int __devinit tmio_mmc_probe(struct platform_device *pdev) | |||
69 | if (pdev->num_resources != 2) | 69 | if (pdev->num_resources != 2) |
70 | goto out; | 70 | goto out; |
71 | 71 | ||
72 | pdata = mfd_get_data(pdev); | 72 | pdata = pdev->dev.platform_data; |
73 | if (!pdata || !pdata->hclk) | 73 | if (!pdata || !pdata->hclk) |
74 | goto out; | 74 | goto out; |
75 | 75 | ||
diff --git a/drivers/mtd/nand/tmio_nand.c b/drivers/mtd/nand/tmio_nand.c index 14c578707824..c004e474631b 100644 --- a/drivers/mtd/nand/tmio_nand.c +++ b/drivers/mtd/nand/tmio_nand.c | |||
@@ -372,7 +372,7 @@ static void tmio_hw_stop(struct platform_device *dev, struct tmio_nand *tmio) | |||
372 | 372 | ||
373 | static int tmio_probe(struct platform_device *dev) | 373 | static int tmio_probe(struct platform_device *dev) |
374 | { | 374 | { |
375 | struct tmio_nand_data *data = mfd_get_data(dev); | 375 | struct tmio_nand_data *data = dev->dev.platform_data; |
376 | struct resource *fcr = platform_get_resource(dev, | 376 | struct resource *fcr = platform_get_resource(dev, |
377 | IORESOURCE_MEM, 0); | 377 | IORESOURCE_MEM, 0); |
378 | struct resource *ccr = platform_get_resource(dev, | 378 | struct resource *ccr = platform_get_resource(dev, |
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index 6141667c5fb7..17b4dd94da90 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c | |||
@@ -113,9 +113,11 @@ MODULE_PARM_DESC(max_bonds, "Max number of bonded devices"); | |||
113 | module_param(tx_queues, int, 0); | 113 | module_param(tx_queues, int, 0); |
114 | MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); | 114 | MODULE_PARM_DESC(tx_queues, "Max number of transmit queues (default = 16)"); |
115 | module_param_named(num_grat_arp, num_peer_notif, int, 0644); | 115 | module_param_named(num_grat_arp, num_peer_notif, int, 0644); |
116 | MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on failover event (alias of num_unsol_na)"); | 116 | MODULE_PARM_DESC(num_grat_arp, "Number of peer notifications to send on " |
117 | "failover event (alias of num_unsol_na)"); | ||
117 | module_param_named(num_unsol_na, num_peer_notif, int, 0644); | 118 | module_param_named(num_unsol_na, num_peer_notif, int, 0644); |
118 | MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on failover event (alias of num_grat_arp)"); | 119 | MODULE_PARM_DESC(num_unsol_na, "Number of peer notifications to send on " |
120 | "failover event (alias of num_grat_arp)"); | ||
119 | module_param(miimon, int, 0); | 121 | module_param(miimon, int, 0); |
120 | MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); | 122 | MODULE_PARM_DESC(miimon, "Link check interval in milliseconds"); |
121 | module_param(updelay, int, 0); | 123 | module_param(updelay, int, 0); |
@@ -127,7 +129,7 @@ module_param(use_carrier, int, 0); | |||
127 | MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " | 129 | MODULE_PARM_DESC(use_carrier, "Use netif_carrier_ok (vs MII ioctls) in miimon; " |
128 | "0 for off, 1 for on (default)"); | 130 | "0 for off, 1 for on (default)"); |
129 | module_param(mode, charp, 0); | 131 | module_param(mode, charp, 0); |
130 | MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, " | 132 | MODULE_PARM_DESC(mode, "Mode of operation; 0 for balance-rr, " |
131 | "1 for active-backup, 2 for balance-xor, " | 133 | "1 for active-backup, 2 for balance-xor, " |
132 | "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, " | 134 | "3 for broadcast, 4 for 802.3ad, 5 for balance-tlb, " |
133 | "6 for balance-alb"); | 135 | "6 for balance-alb"); |
@@ -142,27 +144,35 @@ MODULE_PARM_DESC(primary_reselect, "Reselect primary slave " | |||
142 | "2 for only on active slave " | 144 | "2 for only on active slave " |
143 | "failure"); | 145 | "failure"); |
144 | module_param(lacp_rate, charp, 0); | 146 | module_param(lacp_rate, charp, 0); |
145 | MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner " | 147 | MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner; " |
146 | "(slow/fast)"); | 148 | "0 for slow, 1 for fast"); |
147 | module_param(ad_select, charp, 0); | 149 | module_param(ad_select, charp, 0); |
148 | MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic: stable (0, default), bandwidth (1), count (2)"); | 150 | MODULE_PARM_DESC(ad_select, "803.ad aggregation selection logic; " |
151 | "0 for stable (default), 1 for bandwidth, " | ||
152 | "2 for count"); | ||
149 | module_param(xmit_hash_policy, charp, 0); | 153 | module_param(xmit_hash_policy, charp, 0); |
150 | MODULE_PARM_DESC(xmit_hash_policy, "XOR hashing method: 0 for layer 2 (default)" | 154 | MODULE_PARM_DESC(xmit_hash_policy, "balance-xor and 802.3ad hashing method; " |
151 | ", 1 for layer 3+4"); | 155 | "0 for layer 2 (default), 1 for layer 3+4, " |
156 | "2 for layer 2+3"); | ||
152 | module_param(arp_interval, int, 0); | 157 | module_param(arp_interval, int, 0); |
153 | MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); | 158 | MODULE_PARM_DESC(arp_interval, "arp interval in milliseconds"); |
154 | module_param_array(arp_ip_target, charp, NULL, 0); | 159 | module_param_array(arp_ip_target, charp, NULL, 0); |
155 | MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); | 160 | MODULE_PARM_DESC(arp_ip_target, "arp targets in n.n.n.n form"); |
156 | module_param(arp_validate, charp, 0); | 161 | module_param(arp_validate, charp, 0); |
157 | MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes: none (default), active, backup or all"); | 162 | MODULE_PARM_DESC(arp_validate, "validate src/dst of ARP probes; " |
163 | "0 for none (default), 1 for active, " | ||
164 | "2 for backup, 3 for all"); | ||
158 | module_param(fail_over_mac, charp, 0); | 165 | module_param(fail_over_mac, charp, 0); |
159 | MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the same MAC. none (default), active or follow"); | 166 | MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to " |
167 | "the same MAC; 0 for none (default), " | ||
168 | "1 for active, 2 for follow"); | ||
160 | module_param(all_slaves_active, int, 0); | 169 | module_param(all_slaves_active, int, 0); |
161 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" | 170 | MODULE_PARM_DESC(all_slaves_active, "Keep all frames received on an interface" |
162 | "by setting active flag for all slaves. " | 171 | "by setting active flag for all slaves; " |
163 | "0 for never (default), 1 for always."); | 172 | "0 for never (default), 1 for always."); |
164 | module_param(resend_igmp, int, 0); | 173 | module_param(resend_igmp, int, 0); |
165 | MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on link failure"); | 174 | MODULE_PARM_DESC(resend_igmp, "Number of IGMP membership reports to send on " |
175 | "link failure"); | ||
166 | 176 | ||
167 | /*----------------------------- Global variables ----------------------------*/ | 177 | /*----------------------------- Global variables ----------------------------*/ |
168 | 178 | ||
diff --git a/drivers/net/can/janz-ican3.c b/drivers/net/can/janz-ican3.c index 587fba48cdd9..f1942cab35f6 100644 --- a/drivers/net/can/janz-ican3.c +++ b/drivers/net/can/janz-ican3.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/interrupt.h> | 15 | #include <linux/interrupt.h> |
16 | #include <linux/delay.h> | 16 | #include <linux/delay.h> |
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/mfd/core.h> | ||
19 | 18 | ||
20 | #include <linux/netdevice.h> | 19 | #include <linux/netdevice.h> |
21 | #include <linux/can.h> | 20 | #include <linux/can.h> |
@@ -1644,7 +1643,7 @@ static int __devinit ican3_probe(struct platform_device *pdev) | |||
1644 | struct device *dev; | 1643 | struct device *dev; |
1645 | int ret; | 1644 | int ret; |
1646 | 1645 | ||
1647 | pdata = mfd_get_data(pdev); | 1646 | pdata = pdev->dev.platform_data; |
1648 | if (!pdata) | 1647 | if (!pdata) |
1649 | return -ENXIO; | 1648 | return -ENXIO; |
1650 | 1649 | ||
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c index 807b6bb200eb..29a4f06fbfcf 100644 --- a/drivers/net/davinci_emac.c +++ b/drivers/net/davinci_emac.c | |||
@@ -1772,7 +1772,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
1772 | /* obtain emac clock from kernel */ | 1772 | /* obtain emac clock from kernel */ |
1773 | emac_clk = clk_get(&pdev->dev, NULL); | 1773 | emac_clk = clk_get(&pdev->dev, NULL); |
1774 | if (IS_ERR(emac_clk)) { | 1774 | if (IS_ERR(emac_clk)) { |
1775 | printk(KERN_ERR "DaVinci EMAC: Failed to get EMAC clock\n"); | 1775 | dev_err(&pdev->dev, "failed to get EMAC clock\n"); |
1776 | return -EBUSY; | 1776 | return -EBUSY; |
1777 | } | 1777 | } |
1778 | emac_bus_frequency = clk_get_rate(emac_clk); | 1778 | emac_bus_frequency = clk_get_rate(emac_clk); |
@@ -1780,7 +1780,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
1780 | 1780 | ||
1781 | ndev = alloc_etherdev(sizeof(struct emac_priv)); | 1781 | ndev = alloc_etherdev(sizeof(struct emac_priv)); |
1782 | if (!ndev) { | 1782 | if (!ndev) { |
1783 | printk(KERN_ERR "DaVinci EMAC: Error allocating net_device\n"); | 1783 | dev_err(&pdev->dev, "error allocating net_device\n"); |
1784 | clk_put(emac_clk); | 1784 | clk_put(emac_clk); |
1785 | return -ENOMEM; | 1785 | return -ENOMEM; |
1786 | } | 1786 | } |
@@ -1795,7 +1795,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
1795 | 1795 | ||
1796 | pdata = pdev->dev.platform_data; | 1796 | pdata = pdev->dev.platform_data; |
1797 | if (!pdata) { | 1797 | if (!pdata) { |
1798 | printk(KERN_ERR "DaVinci EMAC: No platform data\n"); | 1798 | dev_err(&pdev->dev, "no platform data\n"); |
1799 | return -ENODEV; | 1799 | return -ENODEV; |
1800 | } | 1800 | } |
1801 | 1801 | ||
@@ -1814,7 +1814,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
1814 | /* Get EMAC platform data */ | 1814 | /* Get EMAC platform data */ |
1815 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 1815 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
1816 | if (!res) { | 1816 | if (!res) { |
1817 | dev_err(emac_dev, "DaVinci EMAC: Error getting res\n"); | 1817 | dev_err(&pdev->dev,"error getting res\n"); |
1818 | rc = -ENOENT; | 1818 | rc = -ENOENT; |
1819 | goto probe_quit; | 1819 | goto probe_quit; |
1820 | } | 1820 | } |
@@ -1822,14 +1822,14 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
1822 | priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; | 1822 | priv->emac_base_phys = res->start + pdata->ctrl_reg_offset; |
1823 | size = res->end - res->start + 1; | 1823 | size = res->end - res->start + 1; |
1824 | if (!request_mem_region(res->start, size, ndev->name)) { | 1824 | if (!request_mem_region(res->start, size, ndev->name)) { |
1825 | dev_err(emac_dev, "DaVinci EMAC: failed request_mem_region() for regs\n"); | 1825 | dev_err(&pdev->dev, "failed request_mem_region() for regs\n"); |
1826 | rc = -ENXIO; | 1826 | rc = -ENXIO; |
1827 | goto probe_quit; | 1827 | goto probe_quit; |
1828 | } | 1828 | } |
1829 | 1829 | ||
1830 | priv->remap_addr = ioremap(res->start, size); | 1830 | priv->remap_addr = ioremap(res->start, size); |
1831 | if (!priv->remap_addr) { | 1831 | if (!priv->remap_addr) { |
1832 | dev_err(emac_dev, "Unable to map IO\n"); | 1832 | dev_err(&pdev->dev, "unable to map IO\n"); |
1833 | rc = -ENOMEM; | 1833 | rc = -ENOMEM; |
1834 | release_mem_region(res->start, size); | 1834 | release_mem_region(res->start, size); |
1835 | goto probe_quit; | 1835 | goto probe_quit; |
@@ -1863,7 +1863,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
1863 | 1863 | ||
1864 | priv->dma = cpdma_ctlr_create(&dma_params); | 1864 | priv->dma = cpdma_ctlr_create(&dma_params); |
1865 | if (!priv->dma) { | 1865 | if (!priv->dma) { |
1866 | dev_err(emac_dev, "DaVinci EMAC: Error initializing DMA\n"); | 1866 | dev_err(&pdev->dev, "error initializing DMA\n"); |
1867 | rc = -ENOMEM; | 1867 | rc = -ENOMEM; |
1868 | goto no_dma; | 1868 | goto no_dma; |
1869 | } | 1869 | } |
@@ -1879,7 +1879,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
1879 | 1879 | ||
1880 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 1880 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
1881 | if (!res) { | 1881 | if (!res) { |
1882 | dev_err(emac_dev, "DaVinci EMAC: Error getting irq res\n"); | 1882 | dev_err(&pdev->dev, "error getting irq res\n"); |
1883 | rc = -ENOENT; | 1883 | rc = -ENOENT; |
1884 | goto no_irq_res; | 1884 | goto no_irq_res; |
1885 | } | 1885 | } |
@@ -1888,8 +1888,8 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
1888 | if (!is_valid_ether_addr(priv->mac_addr)) { | 1888 | if (!is_valid_ether_addr(priv->mac_addr)) { |
1889 | /* Use random MAC if none passed */ | 1889 | /* Use random MAC if none passed */ |
1890 | random_ether_addr(priv->mac_addr); | 1890 | random_ether_addr(priv->mac_addr); |
1891 | printk(KERN_WARNING "%s: using random MAC addr: %pM\n", | 1891 | dev_warn(&pdev->dev, "using random MAC addr: %pM\n", |
1892 | __func__, priv->mac_addr); | 1892 | priv->mac_addr); |
1893 | } | 1893 | } |
1894 | 1894 | ||
1895 | ndev->netdev_ops = &emac_netdev_ops; | 1895 | ndev->netdev_ops = &emac_netdev_ops; |
@@ -1902,7 +1902,7 @@ static int __devinit davinci_emac_probe(struct platform_device *pdev) | |||
1902 | SET_NETDEV_DEV(ndev, &pdev->dev); | 1902 | SET_NETDEV_DEV(ndev, &pdev->dev); |
1903 | rc = register_netdev(ndev); | 1903 | rc = register_netdev(ndev); |
1904 | if (rc) { | 1904 | if (rc) { |
1905 | dev_err(emac_dev, "DaVinci EMAC: Error in register_netdev\n"); | 1905 | dev_err(&pdev->dev, "error in register_netdev\n"); |
1906 | rc = -ENODEV; | 1906 | rc = -ENODEV; |
1907 | goto netdev_reg_err; | 1907 | goto netdev_reg_err; |
1908 | } | 1908 | } |
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c index a3c0dc9d8b98..9537aaa50c2f 100644 --- a/drivers/net/hamradio/baycom_epp.c +++ b/drivers/net/hamradio/baycom_epp.c | |||
@@ -69,7 +69,7 @@ static const char paranoia_str[] = KERN_ERR | |||
69 | 69 | ||
70 | static const char bc_drvname[] = "baycom_epp"; | 70 | static const char bc_drvname[] = "baycom_epp"; |
71 | static const char bc_drvinfo[] = KERN_INFO "baycom_epp: (C) 1998-2000 Thomas Sailer, HB9JNX/AE4WA\n" | 71 | static const char bc_drvinfo[] = KERN_INFO "baycom_epp: (C) 1998-2000 Thomas Sailer, HB9JNX/AE4WA\n" |
72 | "baycom_epp: version 0.7 compiled " __TIME__ " " __DATE__ "\n"; | 72 | "baycom_epp: version 0.7\n"; |
73 | 73 | ||
74 | /* --------------------------------------------------------------------- */ | 74 | /* --------------------------------------------------------------------- */ |
75 | 75 | ||
diff --git a/drivers/net/hamradio/baycom_par.c b/drivers/net/hamradio/baycom_par.c index 5f5af9a606f8..279d2296290a 100644 --- a/drivers/net/hamradio/baycom_par.c +++ b/drivers/net/hamradio/baycom_par.c | |||
@@ -102,7 +102,7 @@ | |||
102 | 102 | ||
103 | static const char bc_drvname[] = "baycom_par"; | 103 | static const char bc_drvname[] = "baycom_par"; |
104 | static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" | 104 | static const char bc_drvinfo[] = KERN_INFO "baycom_par: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" |
105 | "baycom_par: version 0.9 compiled " __TIME__ " " __DATE__ "\n"; | 105 | "baycom_par: version 0.9\n"; |
106 | 106 | ||
107 | /* --------------------------------------------------------------------- */ | 107 | /* --------------------------------------------------------------------- */ |
108 | 108 | ||
diff --git a/drivers/net/hamradio/baycom_ser_fdx.c b/drivers/net/hamradio/baycom_ser_fdx.c index 3e25f10cabd6..99cdce33df8b 100644 --- a/drivers/net/hamradio/baycom_ser_fdx.c +++ b/drivers/net/hamradio/baycom_ser_fdx.c | |||
@@ -92,7 +92,7 @@ | |||
92 | 92 | ||
93 | static const char bc_drvname[] = "baycom_ser_fdx"; | 93 | static const char bc_drvname[] = "baycom_ser_fdx"; |
94 | static const char bc_drvinfo[] = KERN_INFO "baycom_ser_fdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" | 94 | static const char bc_drvinfo[] = KERN_INFO "baycom_ser_fdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" |
95 | "baycom_ser_fdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n"; | 95 | "baycom_ser_fdx: version 0.10\n"; |
96 | 96 | ||
97 | /* --------------------------------------------------------------------- */ | 97 | /* --------------------------------------------------------------------- */ |
98 | 98 | ||
diff --git a/drivers/net/hamradio/baycom_ser_hdx.c b/drivers/net/hamradio/baycom_ser_hdx.c index 1686f6dcbbce..d92fe6ca788f 100644 --- a/drivers/net/hamradio/baycom_ser_hdx.c +++ b/drivers/net/hamradio/baycom_ser_hdx.c | |||
@@ -80,7 +80,7 @@ | |||
80 | 80 | ||
81 | static const char bc_drvname[] = "baycom_ser_hdx"; | 81 | static const char bc_drvname[] = "baycom_ser_hdx"; |
82 | static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" | 82 | static const char bc_drvinfo[] = KERN_INFO "baycom_ser_hdx: (C) 1996-2000 Thomas Sailer, HB9JNX/AE4WA\n" |
83 | "baycom_ser_hdx: version 0.10 compiled " __TIME__ " " __DATE__ "\n"; | 83 | "baycom_ser_hdx: version 0.10\n"; |
84 | 84 | ||
85 | /* --------------------------------------------------------------------- */ | 85 | /* --------------------------------------------------------------------- */ |
86 | 86 | ||
diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c index 5b37579e84b7..a4a3516b6bbf 100644 --- a/drivers/net/hamradio/hdlcdrv.c +++ b/drivers/net/hamradio/hdlcdrv.c | |||
@@ -749,7 +749,7 @@ EXPORT_SYMBOL(hdlcdrv_unregister); | |||
749 | static int __init hdlcdrv_init_driver(void) | 749 | static int __init hdlcdrv_init_driver(void) |
750 | { | 750 | { |
751 | printk(KERN_INFO "hdlcdrv: (C) 1996-2000 Thomas Sailer HB9JNX/AE4WA\n"); | 751 | printk(KERN_INFO "hdlcdrv: (C) 1996-2000 Thomas Sailer HB9JNX/AE4WA\n"); |
752 | printk(KERN_INFO "hdlcdrv: version 0.8 compiled " __TIME__ " " __DATE__ "\n"); | 752 | printk(KERN_INFO "hdlcdrv: version 0.8\n"); |
753 | return 0; | 753 | return 0; |
754 | } | 754 | } |
755 | 755 | ||
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c index f0d8346d0fa5..4d40626b3bfa 100644 --- a/drivers/net/ks8842.c +++ b/drivers/net/ks8842.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/kernel.h> | 26 | #include <linux/kernel.h> |
27 | #include <linux/module.h> | 27 | #include <linux/module.h> |
28 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
29 | #include <linux/mfd/core.h> | ||
30 | #include <linux/netdevice.h> | 29 | #include <linux/netdevice.h> |
31 | #include <linux/etherdevice.h> | 30 | #include <linux/etherdevice.h> |
32 | #include <linux/ethtool.h> | 31 | #include <linux/ethtool.h> |
@@ -1146,7 +1145,7 @@ static int __devinit ks8842_probe(struct platform_device *pdev) | |||
1146 | struct resource *iomem; | 1145 | struct resource *iomem; |
1147 | struct net_device *netdev; | 1146 | struct net_device *netdev; |
1148 | struct ks8842_adapter *adapter; | 1147 | struct ks8842_adapter *adapter; |
1149 | struct ks8842_platform_data *pdata = mfd_get_data(pdev); | 1148 | struct ks8842_platform_data *pdata = pdev->dev.platform_data; |
1150 | u16 id; | 1149 | u16 id; |
1151 | unsigned i; | 1150 | unsigned i; |
1152 | 1151 | ||
diff --git a/drivers/net/wan/pc300_drv.c b/drivers/net/wan/pc300_drv.c index 737b59f1a8dc..9617d3d0ee39 100644 --- a/drivers/net/wan/pc300_drv.c +++ b/drivers/net/wan/pc300_drv.c | |||
@@ -3242,8 +3242,7 @@ static inline void show_version(void) | |||
3242 | rcsdate++; | 3242 | rcsdate++; |
3243 | tmp = strrchr(rcsdate, ' '); | 3243 | tmp = strrchr(rcsdate, ' '); |
3244 | *tmp = '\0'; | 3244 | *tmp = '\0'; |
3245 | printk(KERN_INFO "Cyclades-PC300 driver %s %s (built %s %s)\n", | 3245 | printk(KERN_INFO "Cyclades-PC300 driver %s %s\n", rcsvers, rcsdate); |
3246 | rcsvers, rcsdate, __DATE__, __TIME__); | ||
3247 | } /* show_version */ | 3246 | } /* show_version */ |
3248 | 3247 | ||
3249 | static const struct net_device_ops cpc_netdev_ops = { | 3248 | static const struct net_device_ops cpc_netdev_ops = { |
diff --git a/drivers/parport/parport_ip32.c b/drivers/parport/parport_ip32.c index d3d7809af8bf..0dc34f12f92e 100644 --- a/drivers/parport/parport_ip32.c +++ b/drivers/parport/parport_ip32.c | |||
@@ -2203,7 +2203,6 @@ static __exit void parport_ip32_unregister_port(struct parport *p) | |||
2203 | static int __init parport_ip32_init(void) | 2203 | static int __init parport_ip32_init(void) |
2204 | { | 2204 | { |
2205 | pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n"); | 2205 | pr_info(PPIP32 "SGI IP32 built-in parallel port driver v0.6\n"); |
2206 | pr_debug1(PPIP32 "Compiled on %s, %s\n", __DATE__, __TIME__); | ||
2207 | this_port = parport_ip32_probe_port(); | 2206 | this_port = parport_ip32_probe_port(); |
2208 | return IS_ERR(this_port) ? PTR_ERR(this_port) : 0; | 2207 | return IS_ERR(this_port) ? PTR_ERR(this_port) : 0; |
2209 | } | 2208 | } |
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig index 52a462fc6b84..e57b50b38565 100644 --- a/drivers/power/Kconfig +++ b/drivers/power/Kconfig | |||
@@ -68,6 +68,13 @@ config BATTERY_DS2760 | |||
68 | help | 68 | help |
69 | Say Y here to enable support for batteries with ds2760 chip. | 69 | Say Y here to enable support for batteries with ds2760 chip. |
70 | 70 | ||
71 | config BATTERY_DS2780 | ||
72 | tristate "DS2780 battery driver" | ||
73 | select W1 | ||
74 | select W1_SLAVE_DS2780 | ||
75 | help | ||
76 | Say Y here to enable support for batteries with ds2780 chip. | ||
77 | |||
71 | config BATTERY_DS2782 | 78 | config BATTERY_DS2782 |
72 | tristate "DS2782/DS2786 standalone gas-gauge" | 79 | tristate "DS2782/DS2786 standalone gas-gauge" |
73 | depends on I2C | 80 | depends on I2C |
@@ -203,6 +210,15 @@ config CHARGER_ISP1704 | |||
203 | Say Y to enable support for USB Charger Detection with | 210 | Say Y to enable support for USB Charger Detection with |
204 | ISP1707/ISP1704 USB transceivers. | 211 | ISP1707/ISP1704 USB transceivers. |
205 | 212 | ||
213 | config CHARGER_MAX8903 | ||
214 | tristate "MAX8903 Battery DC-DC Charger for USB and Adapter Power" | ||
215 | depends on GENERIC_HARDIRQS | ||
216 | help | ||
217 | Say Y to enable support for the MAX8903 DC-DC charger and sysfs. | ||
218 | The driver supports controlling charger-enable and current-limit | ||
219 | pins based on the status of charger connections with interrupt | ||
220 | handlers. | ||
221 | |||
206 | config CHARGER_TWL4030 | 222 | config CHARGER_TWL4030 |
207 | tristate "OMAP TWL4030 BCI charger driver" | 223 | tristate "OMAP TWL4030 BCI charger driver" |
208 | depends on TWL4030_CORE | 224 | depends on TWL4030_CORE |
diff --git a/drivers/power/Makefile b/drivers/power/Makefile index 8385bfae8728..009a90fa8ac9 100644 --- a/drivers/power/Makefile +++ b/drivers/power/Makefile | |||
@@ -15,6 +15,7 @@ obj-$(CONFIG_WM8350_POWER) += wm8350_power.o | |||
15 | obj-$(CONFIG_TEST_POWER) += test_power.o | 15 | obj-$(CONFIG_TEST_POWER) += test_power.o |
16 | 16 | ||
17 | obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o | 17 | obj-$(CONFIG_BATTERY_DS2760) += ds2760_battery.o |
18 | obj-$(CONFIG_BATTERY_DS2780) += ds2780_battery.o | ||
18 | obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o | 19 | obj-$(CONFIG_BATTERY_DS2782) += ds2782_battery.o |
19 | obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o | 20 | obj-$(CONFIG_BATTERY_PMU) += pmu_battery.o |
20 | obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o | 21 | obj-$(CONFIG_BATTERY_OLPC) += olpc_battery.o |
@@ -32,5 +33,6 @@ obj-$(CONFIG_CHARGER_PCF50633) += pcf50633-charger.o | |||
32 | obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o | 33 | obj-$(CONFIG_BATTERY_JZ4740) += jz4740-battery.o |
33 | obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o | 34 | obj-$(CONFIG_BATTERY_INTEL_MID) += intel_mid_battery.o |
34 | obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o | 35 | obj-$(CONFIG_CHARGER_ISP1704) += isp1704_charger.o |
36 | obj-$(CONFIG_CHARGER_MAX8903) += max8903_charger.o | ||
35 | obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o | 37 | obj-$(CONFIG_CHARGER_TWL4030) += twl4030_charger.o |
36 | obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o | 38 | obj-$(CONFIG_CHARGER_GPIO) += gpio-charger.o |
diff --git a/drivers/power/bq27x00_battery.c b/drivers/power/bq27x00_battery.c index 59e68dbd028b..bb16f5b7e167 100644 --- a/drivers/power/bq27x00_battery.c +++ b/drivers/power/bq27x00_battery.c | |||
@@ -4,6 +4,7 @@ | |||
4 | * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it> | 4 | * Copyright (C) 2008 Rodolfo Giometti <giometti@linux.it> |
5 | * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it> | 5 | * Copyright (C) 2008 Eurotech S.p.A. <info@eurotech.it> |
6 | * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de> | 6 | * Copyright (C) 2010-2011 Lars-Peter Clausen <lars@metafoo.de> |
7 | * Copyright (C) 2011 Pali Rohár <pali.rohar@gmail.com> | ||
7 | * | 8 | * |
8 | * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc. | 9 | * Based on a previous work by Copyright (C) 2008 Texas Instruments, Inc. |
9 | * | 10 | * |
@@ -76,7 +77,7 @@ struct bq27x00_reg_cache { | |||
76 | int time_to_empty_avg; | 77 | int time_to_empty_avg; |
77 | int time_to_full; | 78 | int time_to_full; |
78 | int charge_full; | 79 | int charge_full; |
79 | int charge_counter; | 80 | int cycle_count; |
80 | int capacity; | 81 | int capacity; |
81 | int flags; | 82 | int flags; |
82 | 83 | ||
@@ -115,7 +116,7 @@ static enum power_supply_property bq27x00_battery_props[] = { | |||
115 | POWER_SUPPLY_PROP_CHARGE_FULL, | 116 | POWER_SUPPLY_PROP_CHARGE_FULL, |
116 | POWER_SUPPLY_PROP_CHARGE_NOW, | 117 | POWER_SUPPLY_PROP_CHARGE_NOW, |
117 | POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, | 118 | POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, |
118 | POWER_SUPPLY_PROP_CHARGE_COUNTER, | 119 | POWER_SUPPLY_PROP_CYCLE_COUNT, |
119 | POWER_SUPPLY_PROP_ENERGY_NOW, | 120 | POWER_SUPPLY_PROP_ENERGY_NOW, |
120 | }; | 121 | }; |
121 | 122 | ||
@@ -267,7 +268,7 @@ static void bq27x00_update(struct bq27x00_device_info *di) | |||
267 | cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP); | 268 | cache.time_to_empty_avg = bq27x00_battery_read_time(di, BQ27x00_REG_TTECP); |
268 | cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF); | 269 | cache.time_to_full = bq27x00_battery_read_time(di, BQ27x00_REG_TTF); |
269 | cache.charge_full = bq27x00_battery_read_lmd(di); | 270 | cache.charge_full = bq27x00_battery_read_lmd(di); |
270 | cache.charge_counter = bq27x00_battery_read_cyct(di); | 271 | cache.cycle_count = bq27x00_battery_read_cyct(di); |
271 | 272 | ||
272 | if (!is_bq27500) | 273 | if (!is_bq27500) |
273 | cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false); | 274 | cache.current_now = bq27x00_read(di, BQ27x00_REG_AI, false); |
@@ -496,8 +497,8 @@ static int bq27x00_battery_get_property(struct power_supply *psy, | |||
496 | case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: | 497 | case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: |
497 | ret = bq27x00_simple_value(di->charge_design_full, val); | 498 | ret = bq27x00_simple_value(di->charge_design_full, val); |
498 | break; | 499 | break; |
499 | case POWER_SUPPLY_PROP_CHARGE_COUNTER: | 500 | case POWER_SUPPLY_PROP_CYCLE_COUNT: |
500 | ret = bq27x00_simple_value(di->cache.charge_counter, val); | 501 | ret = bq27x00_simple_value(di->cache.cycle_count, val); |
501 | break; | 502 | break; |
502 | case POWER_SUPPLY_PROP_ENERGY_NOW: | 503 | case POWER_SUPPLY_PROP_ENERGY_NOW: |
503 | ret = bq27x00_battery_energy(di, val); | 504 | ret = bq27x00_battery_energy(di, val); |
diff --git a/drivers/power/ds2760_battery.c b/drivers/power/ds2760_battery.c index e534290f3256..f2c9cc33c0f9 100644 --- a/drivers/power/ds2760_battery.c +++ b/drivers/power/ds2760_battery.c | |||
@@ -86,7 +86,11 @@ static int rated_capacities[] = { | |||
86 | 920, /* NEC */ | 86 | 920, /* NEC */ |
87 | 1440, /* Samsung */ | 87 | 1440, /* Samsung */ |
88 | 1440, /* BYD */ | 88 | 1440, /* BYD */ |
89 | #ifdef CONFIG_MACH_H4700 | ||
90 | 1800, /* HP iPAQ hx4700 3.7V 1800mAh (359113-001) */ | ||
91 | #else | ||
89 | 1440, /* Lishen */ | 92 | 1440, /* Lishen */ |
93 | #endif | ||
90 | 1440, /* NEC */ | 94 | 1440, /* NEC */ |
91 | 2880, /* Samsung */ | 95 | 2880, /* Samsung */ |
92 | 2880, /* BYD */ | 96 | 2880, /* BYD */ |
@@ -186,7 +190,7 @@ static int ds2760_battery_read_status(struct ds2760_device_info *di) | |||
186 | 190 | ||
187 | scale[0] = di->full_active_uAh; | 191 | scale[0] = di->full_active_uAh; |
188 | for (i = 1; i < 5; i++) | 192 | for (i = 1; i < 5; i++) |
189 | scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 2 + i]; | 193 | scale[i] = scale[i - 1] + di->raw[DS2760_ACTIVE_FULL + 1 + i]; |
190 | 194 | ||
191 | di->full_active_uAh = battery_interpolate(scale, di->temp_C / 10); | 195 | di->full_active_uAh = battery_interpolate(scale, di->temp_C / 10); |
192 | di->full_active_uAh *= 1000; /* convert to µAh */ | 196 | di->full_active_uAh *= 1000; /* convert to µAh */ |
diff --git a/drivers/power/ds2780_battery.c b/drivers/power/ds2780_battery.c new file mode 100644 index 000000000000..1fefe82e12e3 --- /dev/null +++ b/drivers/power/ds2780_battery.c | |||
@@ -0,0 +1,853 @@ | |||
1 | /* | ||
2 | * 1-wire client/driver for the Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC | ||
3 | * | ||
4 | * Copyright (C) 2010 Indesign, LLC | ||
5 | * | ||
6 | * Author: Clifton Barnes <cabarnes@indesign-llc.com> | ||
7 | * | ||
8 | * Based on ds2760_battery and ds2782_battery drivers | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/module.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/param.h> | ||
19 | #include <linux/pm.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/power_supply.h> | ||
22 | #include <linux/idr.h> | ||
23 | |||
24 | #include "../w1/w1.h" | ||
25 | #include "../w1/slaves/w1_ds2780.h" | ||
26 | |||
27 | /* Current unit measurement in uA for a 1 milli-ohm sense resistor */ | ||
28 | #define DS2780_CURRENT_UNITS 1563 | ||
29 | /* Charge unit measurement in uAh for a 1 milli-ohm sense resistor */ | ||
30 | #define DS2780_CHARGE_UNITS 6250 | ||
31 | /* Number of bytes in user EEPROM space */ | ||
32 | #define DS2780_USER_EEPROM_SIZE (DS2780_EEPROM_BLOCK0_END - \ | ||
33 | DS2780_EEPROM_BLOCK0_START + 1) | ||
34 | /* Number of bytes in parameter EEPROM space */ | ||
35 | #define DS2780_PARAM_EEPROM_SIZE (DS2780_EEPROM_BLOCK1_END - \ | ||
36 | DS2780_EEPROM_BLOCK1_START + 1) | ||
37 | |||
38 | struct ds2780_device_info { | ||
39 | struct device *dev; | ||
40 | struct power_supply bat; | ||
41 | struct device *w1_dev; | ||
42 | }; | ||
43 | |||
44 | enum current_types { | ||
45 | CURRENT_NOW, | ||
46 | CURRENT_AVG, | ||
47 | }; | ||
48 | |||
49 | static const char model[] = "DS2780"; | ||
50 | static const char manufacturer[] = "Maxim/Dallas"; | ||
51 | |||
52 | static inline struct ds2780_device_info *to_ds2780_device_info( | ||
53 | struct power_supply *psy) | ||
54 | { | ||
55 | return container_of(psy, struct ds2780_device_info, bat); | ||
56 | } | ||
57 | |||
58 | static inline struct power_supply *to_power_supply(struct device *dev) | ||
59 | { | ||
60 | return dev_get_drvdata(dev); | ||
61 | } | ||
62 | |||
63 | static inline int ds2780_read8(struct device *dev, u8 *val, int addr) | ||
64 | { | ||
65 | return w1_ds2780_io(dev, val, addr, sizeof(u8), 0); | ||
66 | } | ||
67 | |||
68 | static int ds2780_read16(struct device *dev, s16 *val, int addr) | ||
69 | { | ||
70 | int ret; | ||
71 | u8 raw[2]; | ||
72 | |||
73 | ret = w1_ds2780_io(dev, raw, addr, sizeof(u8) * 2, 0); | ||
74 | if (ret < 0) | ||
75 | return ret; | ||
76 | |||
77 | *val = (raw[0] << 8) | raw[1]; | ||
78 | |||
79 | return 0; | ||
80 | } | ||
81 | |||
82 | static inline int ds2780_read_block(struct device *dev, u8 *val, int addr, | ||
83 | size_t count) | ||
84 | { | ||
85 | return w1_ds2780_io(dev, val, addr, count, 0); | ||
86 | } | ||
87 | |||
88 | static inline int ds2780_write(struct device *dev, u8 *val, int addr, | ||
89 | size_t count) | ||
90 | { | ||
91 | return w1_ds2780_io(dev, val, addr, count, 1); | ||
92 | } | ||
93 | |||
94 | static inline int ds2780_store_eeprom(struct device *dev, int addr) | ||
95 | { | ||
96 | return w1_ds2780_eeprom_cmd(dev, addr, W1_DS2780_COPY_DATA); | ||
97 | } | ||
98 | |||
99 | static inline int ds2780_recall_eeprom(struct device *dev, int addr) | ||
100 | { | ||
101 | return w1_ds2780_eeprom_cmd(dev, addr, W1_DS2780_RECALL_DATA); | ||
102 | } | ||
103 | |||
104 | static int ds2780_save_eeprom(struct ds2780_device_info *dev_info, int reg) | ||
105 | { | ||
106 | int ret; | ||
107 | |||
108 | ret = ds2780_store_eeprom(dev_info->w1_dev, reg); | ||
109 | if (ret < 0) | ||
110 | return ret; | ||
111 | |||
112 | ret = ds2780_recall_eeprom(dev_info->w1_dev, reg); | ||
113 | if (ret < 0) | ||
114 | return ret; | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | /* Set sense resistor value in mhos */ | ||
120 | static int ds2780_set_sense_register(struct ds2780_device_info *dev_info, | ||
121 | u8 conductance) | ||
122 | { | ||
123 | int ret; | ||
124 | |||
125 | ret = ds2780_write(dev_info->w1_dev, &conductance, | ||
126 | DS2780_RSNSP_REG, sizeof(u8)); | ||
127 | if (ret < 0) | ||
128 | return ret; | ||
129 | |||
130 | return ds2780_save_eeprom(dev_info, DS2780_RSNSP_REG); | ||
131 | } | ||
132 | |||
133 | /* Get RSGAIN value from 0 to 1.999 in steps of 0.001 */ | ||
134 | static int ds2780_get_rsgain_register(struct ds2780_device_info *dev_info, | ||
135 | u16 *rsgain) | ||
136 | { | ||
137 | return ds2780_read16(dev_info->w1_dev, rsgain, DS2780_RSGAIN_MSB_REG); | ||
138 | } | ||
139 | |||
140 | /* Set RSGAIN value from 0 to 1.999 in steps of 0.001 */ | ||
141 | static int ds2780_set_rsgain_register(struct ds2780_device_info *dev_info, | ||
142 | u16 rsgain) | ||
143 | { | ||
144 | int ret; | ||
145 | u8 raw[] = {rsgain >> 8, rsgain & 0xFF}; | ||
146 | |||
147 | ret = ds2780_write(dev_info->w1_dev, raw, | ||
148 | DS2780_RSGAIN_MSB_REG, sizeof(u8) * 2); | ||
149 | if (ret < 0) | ||
150 | return ret; | ||
151 | |||
152 | return ds2780_save_eeprom(dev_info, DS2780_RSGAIN_MSB_REG); | ||
153 | } | ||
154 | |||
155 | static int ds2780_get_voltage(struct ds2780_device_info *dev_info, | ||
156 | int *voltage_uV) | ||
157 | { | ||
158 | int ret; | ||
159 | s16 voltage_raw; | ||
160 | |||
161 | /* | ||
162 | * The voltage value is located in 10 bits across the voltage MSB | ||
163 | * and LSB registers in two's compliment form | ||
164 | * Sign bit of the voltage value is in bit 7 of the voltage MSB register | ||
165 | * Bits 9 - 3 of the voltage value are in bits 6 - 0 of the | ||
166 | * voltage MSB register | ||
167 | * Bits 2 - 0 of the voltage value are in bits 7 - 5 of the | ||
168 | * voltage LSB register | ||
169 | */ | ||
170 | ret = ds2780_read16(dev_info->w1_dev, &voltage_raw, | ||
171 | DS2780_VOLT_MSB_REG); | ||
172 | if (ret < 0) | ||
173 | return ret; | ||
174 | |||
175 | /* | ||
176 | * DS2780 reports voltage in units of 4.88mV, but the battery class | ||
177 | * reports in units of uV, so convert by multiplying by 4880. | ||
178 | */ | ||
179 | *voltage_uV = (voltage_raw / 32) * 4880; | ||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | static int ds2780_get_temperature(struct ds2780_device_info *dev_info, | ||
184 | int *temperature) | ||
185 | { | ||
186 | int ret; | ||
187 | s16 temperature_raw; | ||
188 | |||
189 | /* | ||
190 | * The temperature value is located in 10 bits across the temperature | ||
191 | * MSB and LSB registers in two's compliment form | ||
192 | * Sign bit of the temperature value is in bit 7 of the temperature | ||
193 | * MSB register | ||
194 | * Bits 9 - 3 of the temperature value are in bits 6 - 0 of the | ||
195 | * temperature MSB register | ||
196 | * Bits 2 - 0 of the temperature value are in bits 7 - 5 of the | ||
197 | * temperature LSB register | ||
198 | */ | ||
199 | ret = ds2780_read16(dev_info->w1_dev, &temperature_raw, | ||
200 | DS2780_TEMP_MSB_REG); | ||
201 | if (ret < 0) | ||
202 | return ret; | ||
203 | |||
204 | /* | ||
205 | * Temperature is measured in units of 0.125 degrees celcius, the | ||
206 | * power_supply class measures temperature in tenths of degrees | ||
207 | * celsius. The temperature value is stored as a 10 bit number, plus | ||
208 | * sign in the upper bits of a 16 bit register. | ||
209 | */ | ||
210 | *temperature = ((temperature_raw / 32) * 125) / 100; | ||
211 | return 0; | ||
212 | } | ||
213 | |||
214 | static int ds2780_get_current(struct ds2780_device_info *dev_info, | ||
215 | enum current_types type, int *current_uA) | ||
216 | { | ||
217 | int ret, sense_res; | ||
218 | s16 current_raw; | ||
219 | u8 sense_res_raw, reg_msb; | ||
220 | |||
221 | /* | ||
222 | * The units of measurement for current are dependent on the value of | ||
223 | * the sense resistor. | ||
224 | */ | ||
225 | ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG); | ||
226 | if (ret < 0) | ||
227 | return ret; | ||
228 | |||
229 | if (sense_res_raw == 0) { | ||
230 | dev_err(dev_info->dev, "sense resistor value is 0\n"); | ||
231 | return -ENXIO; | ||
232 | } | ||
233 | sense_res = 1000 / sense_res_raw; | ||
234 | |||
235 | if (type == CURRENT_NOW) | ||
236 | reg_msb = DS2780_CURRENT_MSB_REG; | ||
237 | else if (type == CURRENT_AVG) | ||
238 | reg_msb = DS2780_IAVG_MSB_REG; | ||
239 | else | ||
240 | return -EINVAL; | ||
241 | |||
242 | /* | ||
243 | * The current value is located in 16 bits across the current MSB | ||
244 | * and LSB registers in two's compliment form | ||
245 | * Sign bit of the current value is in bit 7 of the current MSB register | ||
246 | * Bits 14 - 8 of the current value are in bits 6 - 0 of the current | ||
247 | * MSB register | ||
248 | * Bits 7 - 0 of the current value are in bits 7 - 0 of the current | ||
249 | * LSB register | ||
250 | */ | ||
251 | ret = ds2780_read16(dev_info->w1_dev, ¤t_raw, reg_msb); | ||
252 | if (ret < 0) | ||
253 | return ret; | ||
254 | |||
255 | *current_uA = current_raw * (DS2780_CURRENT_UNITS / sense_res); | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static int ds2780_get_accumulated_current(struct ds2780_device_info *dev_info, | ||
260 | int *accumulated_current) | ||
261 | { | ||
262 | int ret, sense_res; | ||
263 | s16 current_raw; | ||
264 | u8 sense_res_raw; | ||
265 | |||
266 | /* | ||
267 | * The units of measurement for accumulated current are dependent on | ||
268 | * the value of the sense resistor. | ||
269 | */ | ||
270 | ret = ds2780_read8(dev_info->w1_dev, &sense_res_raw, DS2780_RSNSP_REG); | ||
271 | if (ret < 0) | ||
272 | return ret; | ||
273 | |||
274 | if (sense_res_raw == 0) { | ||
275 | dev_err(dev_info->dev, "sense resistor value is 0\n"); | ||
276 | return -ENXIO; | ||
277 | } | ||
278 | sense_res = 1000 / sense_res_raw; | ||
279 | |||
280 | /* | ||
281 | * The ACR value is located in 16 bits across the ACR MSB and | ||
282 | * LSB registers | ||
283 | * Bits 15 - 8 of the ACR value are in bits 7 - 0 of the ACR | ||
284 | * MSB register | ||
285 | * Bits 7 - 0 of the ACR value are in bits 7 - 0 of the ACR | ||
286 | * LSB register | ||
287 | */ | ||
288 | ret = ds2780_read16(dev_info->w1_dev, ¤t_raw, DS2780_ACR_MSB_REG); | ||
289 | if (ret < 0) | ||
290 | return ret; | ||
291 | |||
292 | *accumulated_current = current_raw * (DS2780_CHARGE_UNITS / sense_res); | ||
293 | return 0; | ||
294 | } | ||
295 | |||
296 | static int ds2780_get_capacity(struct ds2780_device_info *dev_info, | ||
297 | int *capacity) | ||
298 | { | ||
299 | int ret; | ||
300 | u8 raw; | ||
301 | |||
302 | ret = ds2780_read8(dev_info->w1_dev, &raw, DS2780_RARC_REG); | ||
303 | if (ret < 0) | ||
304 | return ret; | ||
305 | |||
306 | *capacity = raw; | ||
307 | return raw; | ||
308 | } | ||
309 | |||
310 | static int ds2780_get_status(struct ds2780_device_info *dev_info, int *status) | ||
311 | { | ||
312 | int ret, current_uA, capacity; | ||
313 | |||
314 | ret = ds2780_get_current(dev_info, CURRENT_NOW, ¤t_uA); | ||
315 | if (ret < 0) | ||
316 | return ret; | ||
317 | |||
318 | ret = ds2780_get_capacity(dev_info, &capacity); | ||
319 | if (ret < 0) | ||
320 | return ret; | ||
321 | |||
322 | if (capacity == 100) | ||
323 | *status = POWER_SUPPLY_STATUS_FULL; | ||
324 | else if (current_uA == 0) | ||
325 | *status = POWER_SUPPLY_STATUS_NOT_CHARGING; | ||
326 | else if (current_uA < 0) | ||
327 | *status = POWER_SUPPLY_STATUS_DISCHARGING; | ||
328 | else | ||
329 | *status = POWER_SUPPLY_STATUS_CHARGING; | ||
330 | |||
331 | return 0; | ||
332 | } | ||
333 | |||
334 | static int ds2780_get_charge_now(struct ds2780_device_info *dev_info, | ||
335 | int *charge_now) | ||
336 | { | ||
337 | int ret; | ||
338 | u16 charge_raw; | ||
339 | |||
340 | /* | ||
341 | * The RAAC value is located in 16 bits across the RAAC MSB and | ||
342 | * LSB registers | ||
343 | * Bits 15 - 8 of the RAAC value are in bits 7 - 0 of the RAAC | ||
344 | * MSB register | ||
345 | * Bits 7 - 0 of the RAAC value are in bits 7 - 0 of the RAAC | ||
346 | * LSB register | ||
347 | */ | ||
348 | ret = ds2780_read16(dev_info->w1_dev, &charge_raw, DS2780_RAAC_MSB_REG); | ||
349 | if (ret < 0) | ||
350 | return ret; | ||
351 | |||
352 | *charge_now = charge_raw * 1600; | ||
353 | return 0; | ||
354 | } | ||
355 | |||
356 | static int ds2780_get_control_register(struct ds2780_device_info *dev_info, | ||
357 | u8 *control_reg) | ||
358 | { | ||
359 | return ds2780_read8(dev_info->w1_dev, control_reg, DS2780_CONTROL_REG); | ||
360 | } | ||
361 | |||
362 | static int ds2780_set_control_register(struct ds2780_device_info *dev_info, | ||
363 | u8 control_reg) | ||
364 | { | ||
365 | int ret; | ||
366 | |||
367 | ret = ds2780_write(dev_info->w1_dev, &control_reg, | ||
368 | DS2780_CONTROL_REG, sizeof(u8)); | ||
369 | if (ret < 0) | ||
370 | return ret; | ||
371 | |||
372 | return ds2780_save_eeprom(dev_info, DS2780_CONTROL_REG); | ||
373 | } | ||
374 | |||
375 | static int ds2780_battery_get_property(struct power_supply *psy, | ||
376 | enum power_supply_property psp, | ||
377 | union power_supply_propval *val) | ||
378 | { | ||
379 | int ret = 0; | ||
380 | struct ds2780_device_info *dev_info = to_ds2780_device_info(psy); | ||
381 | |||
382 | switch (psp) { | ||
383 | case POWER_SUPPLY_PROP_VOLTAGE_NOW: | ||
384 | ret = ds2780_get_voltage(dev_info, &val->intval); | ||
385 | break; | ||
386 | |||
387 | case POWER_SUPPLY_PROP_TEMP: | ||
388 | ret = ds2780_get_temperature(dev_info, &val->intval); | ||
389 | break; | ||
390 | |||
391 | case POWER_SUPPLY_PROP_MODEL_NAME: | ||
392 | val->strval = model; | ||
393 | break; | ||
394 | |||
395 | case POWER_SUPPLY_PROP_MANUFACTURER: | ||
396 | val->strval = manufacturer; | ||
397 | break; | ||
398 | |||
399 | case POWER_SUPPLY_PROP_CURRENT_NOW: | ||
400 | ret = ds2780_get_current(dev_info, CURRENT_NOW, &val->intval); | ||
401 | break; | ||
402 | |||
403 | case POWER_SUPPLY_PROP_CURRENT_AVG: | ||
404 | ret = ds2780_get_current(dev_info, CURRENT_AVG, &val->intval); | ||
405 | break; | ||
406 | |||
407 | case POWER_SUPPLY_PROP_STATUS: | ||
408 | ret = ds2780_get_status(dev_info, &val->intval); | ||
409 | break; | ||
410 | |||
411 | case POWER_SUPPLY_PROP_CAPACITY: | ||
412 | ret = ds2780_get_capacity(dev_info, &val->intval); | ||
413 | break; | ||
414 | |||
415 | case POWER_SUPPLY_PROP_CHARGE_COUNTER: | ||
416 | ret = ds2780_get_accumulated_current(dev_info, &val->intval); | ||
417 | break; | ||
418 | |||
419 | case POWER_SUPPLY_PROP_CHARGE_NOW: | ||
420 | ret = ds2780_get_charge_now(dev_info, &val->intval); | ||
421 | break; | ||
422 | |||
423 | default: | ||
424 | ret = -EINVAL; | ||
425 | } | ||
426 | |||
427 | return ret; | ||
428 | } | ||
429 | |||
430 | static enum power_supply_property ds2780_battery_props[] = { | ||
431 | POWER_SUPPLY_PROP_STATUS, | ||
432 | POWER_SUPPLY_PROP_VOLTAGE_NOW, | ||
433 | POWER_SUPPLY_PROP_TEMP, | ||
434 | POWER_SUPPLY_PROP_MODEL_NAME, | ||
435 | POWER_SUPPLY_PROP_MANUFACTURER, | ||
436 | POWER_SUPPLY_PROP_CURRENT_NOW, | ||
437 | POWER_SUPPLY_PROP_CURRENT_AVG, | ||
438 | POWER_SUPPLY_PROP_CAPACITY, | ||
439 | POWER_SUPPLY_PROP_CHARGE_COUNTER, | ||
440 | POWER_SUPPLY_PROP_CHARGE_NOW, | ||
441 | }; | ||
442 | |||
443 | static ssize_t ds2780_get_pmod_enabled(struct device *dev, | ||
444 | struct device_attribute *attr, | ||
445 | char *buf) | ||
446 | { | ||
447 | int ret; | ||
448 | u8 control_reg; | ||
449 | struct power_supply *psy = to_power_supply(dev); | ||
450 | struct ds2780_device_info *dev_info = to_ds2780_device_info(psy); | ||
451 | |||
452 | /* Get power mode */ | ||
453 | ret = ds2780_get_control_register(dev_info, &control_reg); | ||
454 | if (ret < 0) | ||
455 | return ret; | ||
456 | |||
457 | return sprintf(buf, "%d\n", | ||
458 | !!(control_reg & DS2780_CONTROL_REG_PMOD)); | ||
459 | } | ||
460 | |||
461 | static ssize_t ds2780_set_pmod_enabled(struct device *dev, | ||
462 | struct device_attribute *attr, | ||
463 | const char *buf, | ||
464 | size_t count) | ||
465 | { | ||
466 | int ret; | ||
467 | u8 control_reg, new_setting; | ||
468 | struct power_supply *psy = to_power_supply(dev); | ||
469 | struct ds2780_device_info *dev_info = to_ds2780_device_info(psy); | ||
470 | |||
471 | /* Set power mode */ | ||
472 | ret = ds2780_get_control_register(dev_info, &control_reg); | ||
473 | if (ret < 0) | ||
474 | return ret; | ||
475 | |||
476 | ret = kstrtou8(buf, 0, &new_setting); | ||
477 | if (ret < 0) | ||
478 | return ret; | ||
479 | |||
480 | if ((new_setting != 0) && (new_setting != 1)) { | ||
481 | dev_err(dev_info->dev, "Invalid pmod setting (0 or 1)\n"); | ||
482 | return -EINVAL; | ||
483 | } | ||
484 | |||
485 | if (new_setting) | ||
486 | control_reg |= DS2780_CONTROL_REG_PMOD; | ||
487 | else | ||
488 | control_reg &= ~DS2780_CONTROL_REG_PMOD; | ||
489 | |||
490 | ret = ds2780_set_control_register(dev_info, control_reg); | ||
491 | if (ret < 0) | ||
492 | return ret; | ||
493 | |||
494 | return count; | ||
495 | } | ||
496 | |||
497 | static ssize_t ds2780_get_sense_resistor_value(struct device *dev, | ||
498 | struct device_attribute *attr, | ||
499 | char *buf) | ||
500 | { | ||
501 | int ret; | ||
502 | u8 sense_resistor; | ||
503 | struct power_supply *psy = to_power_supply(dev); | ||
504 | struct ds2780_device_info *dev_info = to_ds2780_device_info(psy); | ||
505 | |||
506 | ret = ds2780_read8(dev_info->w1_dev, &sense_resistor, DS2780_RSNSP_REG); | ||
507 | if (ret < 0) | ||
508 | return ret; | ||
509 | |||
510 | ret = sprintf(buf, "%d\n", sense_resistor); | ||
511 | return ret; | ||
512 | } | ||
513 | |||
514 | static ssize_t ds2780_set_sense_resistor_value(struct device *dev, | ||
515 | struct device_attribute *attr, | ||
516 | const char *buf, | ||
517 | size_t count) | ||
518 | { | ||
519 | int ret; | ||
520 | u8 new_setting; | ||
521 | struct power_supply *psy = to_power_supply(dev); | ||
522 | struct ds2780_device_info *dev_info = to_ds2780_device_info(psy); | ||
523 | |||
524 | ret = kstrtou8(buf, 0, &new_setting); | ||
525 | if (ret < 0) | ||
526 | return ret; | ||
527 | |||
528 | ret = ds2780_set_sense_register(dev_info, new_setting); | ||
529 | if (ret < 0) | ||
530 | return ret; | ||
531 | |||
532 | return count; | ||
533 | } | ||
534 | |||
535 | static ssize_t ds2780_get_rsgain_setting(struct device *dev, | ||
536 | struct device_attribute *attr, | ||
537 | char *buf) | ||
538 | { | ||
539 | int ret; | ||
540 | u16 rsgain; | ||
541 | struct power_supply *psy = to_power_supply(dev); | ||
542 | struct ds2780_device_info *dev_info = to_ds2780_device_info(psy); | ||
543 | |||
544 | ret = ds2780_get_rsgain_register(dev_info, &rsgain); | ||
545 | if (ret < 0) | ||
546 | return ret; | ||
547 | |||
548 | return sprintf(buf, "%d\n", rsgain); | ||
549 | } | ||
550 | |||
551 | static ssize_t ds2780_set_rsgain_setting(struct device *dev, | ||
552 | struct device_attribute *attr, | ||
553 | const char *buf, | ||
554 | size_t count) | ||
555 | { | ||
556 | int ret; | ||
557 | u16 new_setting; | ||
558 | struct power_supply *psy = to_power_supply(dev); | ||
559 | struct ds2780_device_info *dev_info = to_ds2780_device_info(psy); | ||
560 | |||
561 | ret = kstrtou16(buf, 0, &new_setting); | ||
562 | if (ret < 0) | ||
563 | return ret; | ||
564 | |||
565 | /* Gain can only be from 0 to 1.999 in steps of .001 */ | ||
566 | if (new_setting > 1999) { | ||
567 | dev_err(dev_info->dev, "Invalid rsgain setting (0 - 1999)\n"); | ||
568 | return -EINVAL; | ||
569 | } | ||
570 | |||
571 | ret = ds2780_set_rsgain_register(dev_info, new_setting); | ||
572 | if (ret < 0) | ||
573 | return ret; | ||
574 | |||
575 | return count; | ||
576 | } | ||
577 | |||
578 | static ssize_t ds2780_get_pio_pin(struct device *dev, | ||
579 | struct device_attribute *attr, | ||
580 | char *buf) | ||
581 | { | ||
582 | int ret; | ||
583 | u8 sfr; | ||
584 | struct power_supply *psy = to_power_supply(dev); | ||
585 | struct ds2780_device_info *dev_info = to_ds2780_device_info(psy); | ||
586 | |||
587 | ret = ds2780_read8(dev_info->w1_dev, &sfr, DS2780_SFR_REG); | ||
588 | if (ret < 0) | ||
589 | return ret; | ||
590 | |||
591 | ret = sprintf(buf, "%d\n", sfr & DS2780_SFR_REG_PIOSC); | ||
592 | return ret; | ||
593 | } | ||
594 | |||
595 | static ssize_t ds2780_set_pio_pin(struct device *dev, | ||
596 | struct device_attribute *attr, | ||
597 | const char *buf, | ||
598 | size_t count) | ||
599 | { | ||
600 | int ret; | ||
601 | u8 new_setting; | ||
602 | struct power_supply *psy = to_power_supply(dev); | ||
603 | struct ds2780_device_info *dev_info = to_ds2780_device_info(psy); | ||
604 | |||
605 | ret = kstrtou8(buf, 0, &new_setting); | ||
606 | if (ret < 0) | ||
607 | return ret; | ||
608 | |||
609 | if ((new_setting != 0) && (new_setting != 1)) { | ||
610 | dev_err(dev_info->dev, "Invalid pio_pin setting (0 or 1)\n"); | ||
611 | return -EINVAL; | ||
612 | } | ||
613 | |||
614 | ret = ds2780_write(dev_info->w1_dev, &new_setting, | ||
615 | DS2780_SFR_REG, sizeof(u8)); | ||
616 | if (ret < 0) | ||
617 | return ret; | ||
618 | |||
619 | return count; | ||
620 | } | ||
621 | |||
622 | static ssize_t ds2780_read_param_eeprom_bin(struct file *filp, | ||
623 | struct kobject *kobj, | ||
624 | struct bin_attribute *bin_attr, | ||
625 | char *buf, loff_t off, size_t count) | ||
626 | { | ||
627 | struct device *dev = container_of(kobj, struct device, kobj); | ||
628 | struct power_supply *psy = to_power_supply(dev); | ||
629 | struct ds2780_device_info *dev_info = to_ds2780_device_info(psy); | ||
630 | |||
631 | count = min_t(loff_t, count, | ||
632 | DS2780_EEPROM_BLOCK1_END - | ||
633 | DS2780_EEPROM_BLOCK1_START + 1 - off); | ||
634 | |||
635 | return ds2780_read_block(dev_info->w1_dev, buf, | ||
636 | DS2780_EEPROM_BLOCK1_START + off, count); | ||
637 | } | ||
638 | |||
639 | static ssize_t ds2780_write_param_eeprom_bin(struct file *filp, | ||
640 | struct kobject *kobj, | ||
641 | struct bin_attribute *bin_attr, | ||
642 | char *buf, loff_t off, size_t count) | ||
643 | { | ||
644 | struct device *dev = container_of(kobj, struct device, kobj); | ||
645 | struct power_supply *psy = to_power_supply(dev); | ||
646 | struct ds2780_device_info *dev_info = to_ds2780_device_info(psy); | ||
647 | int ret; | ||
648 | |||
649 | count = min_t(loff_t, count, | ||
650 | DS2780_EEPROM_BLOCK1_END - | ||
651 | DS2780_EEPROM_BLOCK1_START + 1 - off); | ||
652 | |||
653 | ret = ds2780_write(dev_info->w1_dev, buf, | ||
654 | DS2780_EEPROM_BLOCK1_START + off, count); | ||
655 | if (ret < 0) | ||
656 | return ret; | ||
657 | |||
658 | ret = ds2780_save_eeprom(dev_info, DS2780_EEPROM_BLOCK1_START); | ||
659 | if (ret < 0) | ||
660 | return ret; | ||
661 | |||
662 | return count; | ||
663 | } | ||
664 | |||
665 | static struct bin_attribute ds2780_param_eeprom_bin_attr = { | ||
666 | .attr = { | ||
667 | .name = "param_eeprom", | ||
668 | .mode = S_IRUGO | S_IWUSR, | ||
669 | }, | ||
670 | .size = DS2780_EEPROM_BLOCK1_END - DS2780_EEPROM_BLOCK1_START + 1, | ||
671 | .read = ds2780_read_param_eeprom_bin, | ||
672 | .write = ds2780_write_param_eeprom_bin, | ||
673 | }; | ||
674 | |||
675 | static ssize_t ds2780_read_user_eeprom_bin(struct file *filp, | ||
676 | struct kobject *kobj, | ||
677 | struct bin_attribute *bin_attr, | ||
678 | char *buf, loff_t off, size_t count) | ||
679 | { | ||
680 | struct device *dev = container_of(kobj, struct device, kobj); | ||
681 | struct power_supply *psy = to_power_supply(dev); | ||
682 | struct ds2780_device_info *dev_info = to_ds2780_device_info(psy); | ||
683 | |||
684 | count = min_t(loff_t, count, | ||
685 | DS2780_EEPROM_BLOCK0_END - | ||
686 | DS2780_EEPROM_BLOCK0_START + 1 - off); | ||
687 | |||
688 | return ds2780_read_block(dev_info->w1_dev, buf, | ||
689 | DS2780_EEPROM_BLOCK0_START + off, count); | ||
690 | |||
691 | } | ||
692 | |||
693 | static ssize_t ds2780_write_user_eeprom_bin(struct file *filp, | ||
694 | struct kobject *kobj, | ||
695 | struct bin_attribute *bin_attr, | ||
696 | char *buf, loff_t off, size_t count) | ||
697 | { | ||
698 | struct device *dev = container_of(kobj, struct device, kobj); | ||
699 | struct power_supply *psy = to_power_supply(dev); | ||
700 | struct ds2780_device_info *dev_info = to_ds2780_device_info(psy); | ||
701 | int ret; | ||
702 | |||
703 | count = min_t(loff_t, count, | ||
704 | DS2780_EEPROM_BLOCK0_END - | ||
705 | DS2780_EEPROM_BLOCK0_START + 1 - off); | ||
706 | |||
707 | ret = ds2780_write(dev_info->w1_dev, buf, | ||
708 | DS2780_EEPROM_BLOCK0_START + off, count); | ||
709 | if (ret < 0) | ||
710 | return ret; | ||
711 | |||
712 | ret = ds2780_save_eeprom(dev_info, DS2780_EEPROM_BLOCK0_START); | ||
713 | if (ret < 0) | ||
714 | return ret; | ||
715 | |||
716 | return count; | ||
717 | } | ||
718 | |||
719 | static struct bin_attribute ds2780_user_eeprom_bin_attr = { | ||
720 | .attr = { | ||
721 | .name = "user_eeprom", | ||
722 | .mode = S_IRUGO | S_IWUSR, | ||
723 | }, | ||
724 | .size = DS2780_EEPROM_BLOCK0_END - DS2780_EEPROM_BLOCK0_START + 1, | ||
725 | .read = ds2780_read_user_eeprom_bin, | ||
726 | .write = ds2780_write_user_eeprom_bin, | ||
727 | }; | ||
728 | |||
729 | static DEVICE_ATTR(pmod_enabled, S_IRUGO | S_IWUSR, ds2780_get_pmod_enabled, | ||
730 | ds2780_set_pmod_enabled); | ||
731 | static DEVICE_ATTR(sense_resistor_value, S_IRUGO | S_IWUSR, | ||
732 | ds2780_get_sense_resistor_value, ds2780_set_sense_resistor_value); | ||
733 | static DEVICE_ATTR(rsgain_setting, S_IRUGO | S_IWUSR, ds2780_get_rsgain_setting, | ||
734 | ds2780_set_rsgain_setting); | ||
735 | static DEVICE_ATTR(pio_pin, S_IRUGO | S_IWUSR, ds2780_get_pio_pin, | ||
736 | ds2780_set_pio_pin); | ||
737 | |||
738 | |||
739 | static struct attribute *ds2780_attributes[] = { | ||
740 | &dev_attr_pmod_enabled.attr, | ||
741 | &dev_attr_sense_resistor_value.attr, | ||
742 | &dev_attr_rsgain_setting.attr, | ||
743 | &dev_attr_pio_pin.attr, | ||
744 | NULL | ||
745 | }; | ||
746 | |||
747 | static const struct attribute_group ds2780_attr_group = { | ||
748 | .attrs = ds2780_attributes, | ||
749 | }; | ||
750 | |||
751 | static int __devinit ds2780_battery_probe(struct platform_device *pdev) | ||
752 | { | ||
753 | int ret = 0; | ||
754 | struct ds2780_device_info *dev_info; | ||
755 | |||
756 | dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); | ||
757 | if (!dev_info) { | ||
758 | ret = -ENOMEM; | ||
759 | goto fail; | ||
760 | } | ||
761 | |||
762 | platform_set_drvdata(pdev, dev_info); | ||
763 | |||
764 | dev_info->dev = &pdev->dev; | ||
765 | dev_info->w1_dev = pdev->dev.parent; | ||
766 | dev_info->bat.name = dev_name(&pdev->dev); | ||
767 | dev_info->bat.type = POWER_SUPPLY_TYPE_BATTERY; | ||
768 | dev_info->bat.properties = ds2780_battery_props; | ||
769 | dev_info->bat.num_properties = ARRAY_SIZE(ds2780_battery_props); | ||
770 | dev_info->bat.get_property = ds2780_battery_get_property; | ||
771 | |||
772 | ret = power_supply_register(&pdev->dev, &dev_info->bat); | ||
773 | if (ret) { | ||
774 | dev_err(dev_info->dev, "failed to register battery\n"); | ||
775 | goto fail_free_info; | ||
776 | } | ||
777 | |||
778 | ret = sysfs_create_group(&dev_info->bat.dev->kobj, &ds2780_attr_group); | ||
779 | if (ret) { | ||
780 | dev_err(dev_info->dev, "failed to create sysfs group\n"); | ||
781 | goto fail_unregister; | ||
782 | } | ||
783 | |||
784 | ret = sysfs_create_bin_file(&dev_info->bat.dev->kobj, | ||
785 | &ds2780_param_eeprom_bin_attr); | ||
786 | if (ret) { | ||
787 | dev_err(dev_info->dev, | ||
788 | "failed to create param eeprom bin file"); | ||
789 | goto fail_remove_group; | ||
790 | } | ||
791 | |||
792 | ret = sysfs_create_bin_file(&dev_info->bat.dev->kobj, | ||
793 | &ds2780_user_eeprom_bin_attr); | ||
794 | if (ret) { | ||
795 | dev_err(dev_info->dev, | ||
796 | "failed to create user eeprom bin file"); | ||
797 | goto fail_remove_bin_file; | ||
798 | } | ||
799 | |||
800 | return 0; | ||
801 | |||
802 | fail_remove_bin_file: | ||
803 | sysfs_remove_bin_file(&dev_info->bat.dev->kobj, | ||
804 | &ds2780_param_eeprom_bin_attr); | ||
805 | fail_remove_group: | ||
806 | sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2780_attr_group); | ||
807 | fail_unregister: | ||
808 | power_supply_unregister(&dev_info->bat); | ||
809 | fail_free_info: | ||
810 | kfree(dev_info); | ||
811 | fail: | ||
812 | return ret; | ||
813 | } | ||
814 | |||
815 | static int __devexit ds2780_battery_remove(struct platform_device *pdev) | ||
816 | { | ||
817 | struct ds2780_device_info *dev_info = platform_get_drvdata(pdev); | ||
818 | |||
819 | /* remove attributes */ | ||
820 | sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2780_attr_group); | ||
821 | |||
822 | power_supply_unregister(&dev_info->bat); | ||
823 | |||
824 | kfree(dev_info); | ||
825 | return 0; | ||
826 | } | ||
827 | |||
828 | MODULE_ALIAS("platform:ds2780-battery"); | ||
829 | |||
830 | static struct platform_driver ds2780_battery_driver = { | ||
831 | .driver = { | ||
832 | .name = "ds2780-battery", | ||
833 | }, | ||
834 | .probe = ds2780_battery_probe, | ||
835 | .remove = ds2780_battery_remove, | ||
836 | }; | ||
837 | |||
838 | static int __init ds2780_battery_init(void) | ||
839 | { | ||
840 | return platform_driver_register(&ds2780_battery_driver); | ||
841 | } | ||
842 | |||
843 | static void __exit ds2780_battery_exit(void) | ||
844 | { | ||
845 | platform_driver_unregister(&ds2780_battery_driver); | ||
846 | } | ||
847 | |||
848 | module_init(ds2780_battery_init); | ||
849 | module_exit(ds2780_battery_exit); | ||
850 | |||
851 | MODULE_LICENSE("GPL"); | ||
852 | MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>"); | ||
853 | MODULE_DESCRIPTION("Maxim/Dallas DS2780 Stand-Alone Fuel Gauage IC driver"); | ||
diff --git a/drivers/power/gpio-charger.c b/drivers/power/gpio-charger.c index 25b88ac1d44c..718f2c537827 100644 --- a/drivers/power/gpio-charger.c +++ b/drivers/power/gpio-charger.c | |||
@@ -161,12 +161,27 @@ static int __devexit gpio_charger_remove(struct platform_device *pdev) | |||
161 | return 0; | 161 | return 0; |
162 | } | 162 | } |
163 | 163 | ||
164 | #ifdef CONFIG_PM_SLEEP | ||
165 | static int gpio_charger_resume(struct device *dev) | ||
166 | { | ||
167 | struct platform_device *pdev = to_platform_device(dev); | ||
168 | struct gpio_charger *gpio_charger = platform_get_drvdata(pdev); | ||
169 | |||
170 | power_supply_changed(&gpio_charger->charger); | ||
171 | |||
172 | return 0; | ||
173 | } | ||
174 | #endif | ||
175 | |||
176 | static SIMPLE_DEV_PM_OPS(gpio_charger_pm_ops, NULL, gpio_charger_resume); | ||
177 | |||
164 | static struct platform_driver gpio_charger_driver = { | 178 | static struct platform_driver gpio_charger_driver = { |
165 | .probe = gpio_charger_probe, | 179 | .probe = gpio_charger_probe, |
166 | .remove = __devexit_p(gpio_charger_remove), | 180 | .remove = __devexit_p(gpio_charger_remove), |
167 | .driver = { | 181 | .driver = { |
168 | .name = "gpio-charger", | 182 | .name = "gpio-charger", |
169 | .owner = THIS_MODULE, | 183 | .owner = THIS_MODULE, |
184 | .pm = &gpio_charger_pm_ops, | ||
170 | }, | 185 | }, |
171 | }; | 186 | }; |
172 | 187 | ||
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c index 2ad9b14a5ce3..f6d72b402a8e 100644 --- a/drivers/power/isp1704_charger.c +++ b/drivers/power/isp1704_charger.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <linux/usb/ulpi.h> | 33 | #include <linux/usb/ulpi.h> |
34 | #include <linux/usb/ch9.h> | 34 | #include <linux/usb/ch9.h> |
35 | #include <linux/usb/gadget.h> | 35 | #include <linux/usb/gadget.h> |
36 | #include <linux/power/isp1704_charger.h> | ||
36 | 37 | ||
37 | /* Vendor specific Power Control register */ | 38 | /* Vendor specific Power Control register */ |
38 | #define ISP1704_PWR_CTRL 0x3d | 39 | #define ISP1704_PWR_CTRL 0x3d |
@@ -71,6 +72,18 @@ struct isp1704_charger { | |||
71 | }; | 72 | }; |
72 | 73 | ||
73 | /* | 74 | /* |
75 | * Disable/enable the power from the isp1704 if a function for it | ||
76 | * has been provided with platform data. | ||
77 | */ | ||
78 | static void isp1704_charger_set_power(struct isp1704_charger *isp, bool on) | ||
79 | { | ||
80 | struct isp1704_charger_data *board = isp->dev->platform_data; | ||
81 | |||
82 | if (board->set_power) | ||
83 | board->set_power(on); | ||
84 | } | ||
85 | |||
86 | /* | ||
74 | * Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB | 87 | * Determine is the charging port DCP (dedicated charger) or CDP (Host/HUB |
75 | * chargers). | 88 | * chargers). |
76 | * | 89 | * |
@@ -222,6 +235,9 @@ static void isp1704_charger_work(struct work_struct *data) | |||
222 | 235 | ||
223 | mutex_lock(&lock); | 236 | mutex_lock(&lock); |
224 | 237 | ||
238 | if (event != USB_EVENT_NONE) | ||
239 | isp1704_charger_set_power(isp, 1); | ||
240 | |||
225 | switch (event) { | 241 | switch (event) { |
226 | case USB_EVENT_VBUS: | 242 | case USB_EVENT_VBUS: |
227 | isp->online = true; | 243 | isp->online = true; |
@@ -269,6 +285,8 @@ static void isp1704_charger_work(struct work_struct *data) | |||
269 | */ | 285 | */ |
270 | if (isp->otg->gadget) | 286 | if (isp->otg->gadget) |
271 | usb_gadget_disconnect(isp->otg->gadget); | 287 | usb_gadget_disconnect(isp->otg->gadget); |
288 | |||
289 | isp1704_charger_set_power(isp, 0); | ||
272 | break; | 290 | break; |
273 | case USB_EVENT_ENUMERATED: | 291 | case USB_EVENT_ENUMERATED: |
274 | if (isp->present) | 292 | if (isp->present) |
@@ -394,6 +412,8 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev) | |||
394 | isp->dev = &pdev->dev; | 412 | isp->dev = &pdev->dev; |
395 | platform_set_drvdata(pdev, isp); | 413 | platform_set_drvdata(pdev, isp); |
396 | 414 | ||
415 | isp1704_charger_set_power(isp, 1); | ||
416 | |||
397 | ret = isp1704_test_ulpi(isp); | 417 | ret = isp1704_test_ulpi(isp); |
398 | if (ret < 0) | 418 | if (ret < 0) |
399 | goto fail1; | 419 | goto fail1; |
@@ -434,6 +454,7 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev) | |||
434 | 454 | ||
435 | /* Detect charger if VBUS is valid (the cable was already plugged). */ | 455 | /* Detect charger if VBUS is valid (the cable was already plugged). */ |
436 | ret = otg_io_read(isp->otg, ULPI_USB_INT_STS); | 456 | ret = otg_io_read(isp->otg, ULPI_USB_INT_STS); |
457 | isp1704_charger_set_power(isp, 0); | ||
437 | if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) { | 458 | if ((ret & ULPI_INT_VBUS_VALID) && !isp->otg->default_a) { |
438 | isp->event = USB_EVENT_VBUS; | 459 | isp->event = USB_EVENT_VBUS; |
439 | schedule_work(&isp->work); | 460 | schedule_work(&isp->work); |
@@ -459,6 +480,7 @@ static int __devexit isp1704_charger_remove(struct platform_device *pdev) | |||
459 | otg_unregister_notifier(isp->otg, &isp->nb); | 480 | otg_unregister_notifier(isp->otg, &isp->nb); |
460 | power_supply_unregister(&isp->psy); | 481 | power_supply_unregister(&isp->psy); |
461 | otg_put_transceiver(isp->otg); | 482 | otg_put_transceiver(isp->otg); |
483 | isp1704_charger_set_power(isp, 0); | ||
462 | kfree(isp); | 484 | kfree(isp); |
463 | 485 | ||
464 | return 0; | 486 | return 0; |
diff --git a/drivers/power/max8903_charger.c b/drivers/power/max8903_charger.c new file mode 100644 index 000000000000..33ff0e37809e --- /dev/null +++ b/drivers/power/max8903_charger.c | |||
@@ -0,0 +1,391 @@ | |||
1 | /* | ||
2 | * max8903_charger.c - Maxim 8903 USB/Adapter Charger Driver | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics | ||
5 | * MyungJoo Ham <myungjoo.ham@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #include <linux/gpio.h> | ||
24 | #include <linux/interrupt.h> | ||
25 | #include <linux/slab.h> | ||
26 | #include <linux/power_supply.h> | ||
27 | #include <linux/platform_device.h> | ||
28 | #include <linux/power/max8903_charger.h> | ||
29 | |||
30 | struct max8903_data { | ||
31 | struct max8903_pdata *pdata; | ||
32 | struct device *dev; | ||
33 | struct power_supply psy; | ||
34 | bool fault; | ||
35 | bool usb_in; | ||
36 | bool ta_in; | ||
37 | }; | ||
38 | |||
39 | static enum power_supply_property max8903_charger_props[] = { | ||
40 | POWER_SUPPLY_PROP_STATUS, /* Charger status output */ | ||
41 | POWER_SUPPLY_PROP_ONLINE, /* External power source */ | ||
42 | POWER_SUPPLY_PROP_HEALTH, /* Fault or OK */ | ||
43 | }; | ||
44 | |||
45 | static int max8903_get_property(struct power_supply *psy, | ||
46 | enum power_supply_property psp, | ||
47 | union power_supply_propval *val) | ||
48 | { | ||
49 | struct max8903_data *data = container_of(psy, | ||
50 | struct max8903_data, psy); | ||
51 | |||
52 | switch (psp) { | ||
53 | case POWER_SUPPLY_PROP_STATUS: | ||
54 | val->intval = POWER_SUPPLY_STATUS_UNKNOWN; | ||
55 | if (data->pdata->chg) { | ||
56 | if (gpio_get_value(data->pdata->chg) == 0) | ||
57 | val->intval = POWER_SUPPLY_STATUS_CHARGING; | ||
58 | else if (data->usb_in || data->ta_in) | ||
59 | val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; | ||
60 | else | ||
61 | val->intval = POWER_SUPPLY_STATUS_DISCHARGING; | ||
62 | } | ||
63 | break; | ||
64 | case POWER_SUPPLY_PROP_ONLINE: | ||
65 | val->intval = 0; | ||
66 | if (data->usb_in || data->ta_in) | ||
67 | val->intval = 1; | ||
68 | break; | ||
69 | case POWER_SUPPLY_PROP_HEALTH: | ||
70 | val->intval = POWER_SUPPLY_HEALTH_GOOD; | ||
71 | if (data->fault) | ||
72 | val->intval = POWER_SUPPLY_HEALTH_UNSPEC_FAILURE; | ||
73 | break; | ||
74 | default: | ||
75 | return -EINVAL; | ||
76 | } | ||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static irqreturn_t max8903_dcin(int irq, void *_data) | ||
81 | { | ||
82 | struct max8903_data *data = _data; | ||
83 | struct max8903_pdata *pdata = data->pdata; | ||
84 | bool ta_in; | ||
85 | enum power_supply_type old_type; | ||
86 | |||
87 | ta_in = gpio_get_value(pdata->dok) ? false : true; | ||
88 | |||
89 | if (ta_in == data->ta_in) | ||
90 | return IRQ_HANDLED; | ||
91 | |||
92 | data->ta_in = ta_in; | ||
93 | |||
94 | /* Set Current-Limit-Mode 1:DC 0:USB */ | ||
95 | if (pdata->dcm) | ||
96 | gpio_set_value(pdata->dcm, ta_in ? 1 : 0); | ||
97 | |||
98 | /* Charger Enable / Disable (cen is negated) */ | ||
99 | if (pdata->cen) | ||
100 | gpio_set_value(pdata->cen, ta_in ? 0 : | ||
101 | (data->usb_in ? 0 : 1)); | ||
102 | |||
103 | dev_dbg(data->dev, "TA(DC-IN) Charger %s.\n", ta_in ? | ||
104 | "Connected" : "Disconnected"); | ||
105 | |||
106 | old_type = data->psy.type; | ||
107 | |||
108 | if (data->ta_in) | ||
109 | data->psy.type = POWER_SUPPLY_TYPE_MAINS; | ||
110 | else if (data->usb_in) | ||
111 | data->psy.type = POWER_SUPPLY_TYPE_USB; | ||
112 | else | ||
113 | data->psy.type = POWER_SUPPLY_TYPE_BATTERY; | ||
114 | |||
115 | if (old_type != data->psy.type) | ||
116 | power_supply_changed(&data->psy); | ||
117 | |||
118 | return IRQ_HANDLED; | ||
119 | } | ||
120 | |||
121 | static irqreturn_t max8903_usbin(int irq, void *_data) | ||
122 | { | ||
123 | struct max8903_data *data = _data; | ||
124 | struct max8903_pdata *pdata = data->pdata; | ||
125 | bool usb_in; | ||
126 | enum power_supply_type old_type; | ||
127 | |||
128 | usb_in = gpio_get_value(pdata->uok) ? false : true; | ||
129 | |||
130 | if (usb_in == data->usb_in) | ||
131 | return IRQ_HANDLED; | ||
132 | |||
133 | data->usb_in = usb_in; | ||
134 | |||
135 | /* Do not touch Current-Limit-Mode */ | ||
136 | |||
137 | /* Charger Enable / Disable (cen is negated) */ | ||
138 | if (pdata->cen) | ||
139 | gpio_set_value(pdata->cen, usb_in ? 0 : | ||
140 | (data->ta_in ? 0 : 1)); | ||
141 | |||
142 | dev_dbg(data->dev, "USB Charger %s.\n", usb_in ? | ||
143 | "Connected" : "Disconnected"); | ||
144 | |||
145 | old_type = data->psy.type; | ||
146 | |||
147 | if (data->ta_in) | ||
148 | data->psy.type = POWER_SUPPLY_TYPE_MAINS; | ||
149 | else if (data->usb_in) | ||
150 | data->psy.type = POWER_SUPPLY_TYPE_USB; | ||
151 | else | ||
152 | data->psy.type = POWER_SUPPLY_TYPE_BATTERY; | ||
153 | |||
154 | if (old_type != data->psy.type) | ||
155 | power_supply_changed(&data->psy); | ||
156 | |||
157 | return IRQ_HANDLED; | ||
158 | } | ||
159 | |||
160 | static irqreturn_t max8903_fault(int irq, void *_data) | ||
161 | { | ||
162 | struct max8903_data *data = _data; | ||
163 | struct max8903_pdata *pdata = data->pdata; | ||
164 | bool fault; | ||
165 | |||
166 | fault = gpio_get_value(pdata->flt) ? false : true; | ||
167 | |||
168 | if (fault == data->fault) | ||
169 | return IRQ_HANDLED; | ||
170 | |||
171 | data->fault = fault; | ||
172 | |||
173 | if (fault) | ||
174 | dev_err(data->dev, "Charger suffers a fault and stops.\n"); | ||
175 | else | ||
176 | dev_err(data->dev, "Charger recovered from a fault.\n"); | ||
177 | |||
178 | return IRQ_HANDLED; | ||
179 | } | ||
180 | |||
181 | static __devinit int max8903_probe(struct platform_device *pdev) | ||
182 | { | ||
183 | struct max8903_data *data; | ||
184 | struct device *dev = &pdev->dev; | ||
185 | struct max8903_pdata *pdata = pdev->dev.platform_data; | ||
186 | int ret = 0; | ||
187 | int gpio; | ||
188 | int ta_in = 0; | ||
189 | int usb_in = 0; | ||
190 | |||
191 | data = kzalloc(sizeof(struct max8903_data), GFP_KERNEL); | ||
192 | if (data == NULL) { | ||
193 | dev_err(dev, "Cannot allocate memory.\n"); | ||
194 | return -ENOMEM; | ||
195 | } | ||
196 | data->pdata = pdata; | ||
197 | data->dev = dev; | ||
198 | platform_set_drvdata(pdev, data); | ||
199 | |||
200 | if (pdata->dc_valid == false && pdata->usb_valid == false) { | ||
201 | dev_err(dev, "No valid power sources.\n"); | ||
202 | ret = -EINVAL; | ||
203 | goto err; | ||
204 | } | ||
205 | |||
206 | if (pdata->dc_valid) { | ||
207 | if (pdata->dok && gpio_is_valid(pdata->dok) && | ||
208 | pdata->dcm && gpio_is_valid(pdata->dcm)) { | ||
209 | gpio = pdata->dok; /* PULL_UPed Interrupt */ | ||
210 | ta_in = gpio_get_value(gpio) ? 0 : 1; | ||
211 | |||
212 | gpio = pdata->dcm; /* Output */ | ||
213 | gpio_set_value(gpio, ta_in); | ||
214 | } else { | ||
215 | dev_err(dev, "When DC is wired, DOK and DCM should" | ||
216 | " be wired as well.\n"); | ||
217 | ret = -EINVAL; | ||
218 | goto err; | ||
219 | } | ||
220 | } else { | ||
221 | if (pdata->dcm) { | ||
222 | if (gpio_is_valid(pdata->dcm)) | ||
223 | gpio_set_value(pdata->dcm, 0); | ||
224 | else { | ||
225 | dev_err(dev, "Invalid pin: dcm.\n"); | ||
226 | ret = -EINVAL; | ||
227 | goto err; | ||
228 | } | ||
229 | } | ||
230 | } | ||
231 | |||
232 | if (pdata->usb_valid) { | ||
233 | if (pdata->uok && gpio_is_valid(pdata->uok)) { | ||
234 | gpio = pdata->uok; | ||
235 | usb_in = gpio_get_value(gpio) ? 0 : 1; | ||
236 | } else { | ||
237 | dev_err(dev, "When USB is wired, UOK should be wired." | ||
238 | "as well.\n"); | ||
239 | ret = -EINVAL; | ||
240 | goto err; | ||
241 | } | ||
242 | } | ||
243 | |||
244 | if (pdata->cen) { | ||
245 | if (gpio_is_valid(pdata->cen)) { | ||
246 | gpio_set_value(pdata->cen, (ta_in || usb_in) ? 0 : 1); | ||
247 | } else { | ||
248 | dev_err(dev, "Invalid pin: cen.\n"); | ||
249 | ret = -EINVAL; | ||
250 | goto err; | ||
251 | } | ||
252 | } | ||
253 | |||
254 | if (pdata->chg) { | ||
255 | if (!gpio_is_valid(pdata->chg)) { | ||
256 | dev_err(dev, "Invalid pin: chg.\n"); | ||
257 | ret = -EINVAL; | ||
258 | goto err; | ||
259 | } | ||
260 | } | ||
261 | |||
262 | if (pdata->flt) { | ||
263 | if (!gpio_is_valid(pdata->flt)) { | ||
264 | dev_err(dev, "Invalid pin: flt.\n"); | ||
265 | ret = -EINVAL; | ||
266 | goto err; | ||
267 | } | ||
268 | } | ||
269 | |||
270 | if (pdata->usus) { | ||
271 | if (!gpio_is_valid(pdata->usus)) { | ||
272 | dev_err(dev, "Invalid pin: usus.\n"); | ||
273 | ret = -EINVAL; | ||
274 | goto err; | ||
275 | } | ||
276 | } | ||
277 | |||
278 | data->fault = false; | ||
279 | data->ta_in = ta_in; | ||
280 | data->usb_in = usb_in; | ||
281 | |||
282 | data->psy.name = "max8903_charger"; | ||
283 | data->psy.type = (ta_in) ? POWER_SUPPLY_TYPE_MAINS : | ||
284 | ((usb_in) ? POWER_SUPPLY_TYPE_USB : | ||
285 | POWER_SUPPLY_TYPE_BATTERY); | ||
286 | data->psy.get_property = max8903_get_property; | ||
287 | data->psy.properties = max8903_charger_props; | ||
288 | data->psy.num_properties = ARRAY_SIZE(max8903_charger_props); | ||
289 | |||
290 | ret = power_supply_register(dev, &data->psy); | ||
291 | if (ret) { | ||
292 | dev_err(dev, "failed: power supply register.\n"); | ||
293 | goto err; | ||
294 | } | ||
295 | |||
296 | if (pdata->dc_valid) { | ||
297 | ret = request_threaded_irq(gpio_to_irq(pdata->dok), | ||
298 | NULL, max8903_dcin, | ||
299 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | ||
300 | "MAX8903 DC IN", data); | ||
301 | if (ret) { | ||
302 | dev_err(dev, "Cannot request irq %d for DC (%d)\n", | ||
303 | gpio_to_irq(pdata->dok), ret); | ||
304 | goto err_psy; | ||
305 | } | ||
306 | } | ||
307 | |||
308 | if (pdata->usb_valid) { | ||
309 | ret = request_threaded_irq(gpio_to_irq(pdata->uok), | ||
310 | NULL, max8903_usbin, | ||
311 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | ||
312 | "MAX8903 USB IN", data); | ||
313 | if (ret) { | ||
314 | dev_err(dev, "Cannot request irq %d for USB (%d)\n", | ||
315 | gpio_to_irq(pdata->uok), ret); | ||
316 | goto err_dc_irq; | ||
317 | } | ||
318 | } | ||
319 | |||
320 | if (pdata->flt) { | ||
321 | ret = request_threaded_irq(gpio_to_irq(pdata->flt), | ||
322 | NULL, max8903_fault, | ||
323 | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, | ||
324 | "MAX8903 Fault", data); | ||
325 | if (ret) { | ||
326 | dev_err(dev, "Cannot request irq %d for Fault (%d)\n", | ||
327 | gpio_to_irq(pdata->flt), ret); | ||
328 | goto err_usb_irq; | ||
329 | } | ||
330 | } | ||
331 | |||
332 | return 0; | ||
333 | |||
334 | err_usb_irq: | ||
335 | if (pdata->usb_valid) | ||
336 | free_irq(gpio_to_irq(pdata->uok), data); | ||
337 | err_dc_irq: | ||
338 | if (pdata->dc_valid) | ||
339 | free_irq(gpio_to_irq(pdata->dok), data); | ||
340 | err_psy: | ||
341 | power_supply_unregister(&data->psy); | ||
342 | err: | ||
343 | kfree(data); | ||
344 | return ret; | ||
345 | } | ||
346 | |||
347 | static __devexit int max8903_remove(struct platform_device *pdev) | ||
348 | { | ||
349 | struct max8903_data *data = platform_get_drvdata(pdev); | ||
350 | |||
351 | if (data) { | ||
352 | struct max8903_pdata *pdata = data->pdata; | ||
353 | |||
354 | if (pdata->flt) | ||
355 | free_irq(gpio_to_irq(pdata->flt), data); | ||
356 | if (pdata->usb_valid) | ||
357 | free_irq(gpio_to_irq(pdata->uok), data); | ||
358 | if (pdata->dc_valid) | ||
359 | free_irq(gpio_to_irq(pdata->dok), data); | ||
360 | power_supply_unregister(&data->psy); | ||
361 | kfree(data); | ||
362 | } | ||
363 | |||
364 | return 0; | ||
365 | } | ||
366 | |||
367 | static struct platform_driver max8903_driver = { | ||
368 | .probe = max8903_probe, | ||
369 | .remove = __devexit_p(max8903_remove), | ||
370 | .driver = { | ||
371 | .name = "max8903-charger", | ||
372 | .owner = THIS_MODULE, | ||
373 | }, | ||
374 | }; | ||
375 | |||
376 | static int __init max8903_init(void) | ||
377 | { | ||
378 | return platform_driver_register(&max8903_driver); | ||
379 | } | ||
380 | module_init(max8903_init); | ||
381 | |||
382 | static void __exit max8903_exit(void) | ||
383 | { | ||
384 | platform_driver_unregister(&max8903_driver); | ||
385 | } | ||
386 | module_exit(max8903_exit); | ||
387 | |||
388 | MODULE_LICENSE("GPL"); | ||
389 | MODULE_DESCRIPTION("MAX8903 Charger Driver"); | ||
390 | MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); | ||
391 | MODULE_ALIAS("max8903-charger"); | ||
diff --git a/drivers/power/max8925_power.c b/drivers/power/max8925_power.c index 8e5aec260866..a70e16d3a3dc 100644 --- a/drivers/power/max8925_power.c +++ b/drivers/power/max8925_power.c | |||
@@ -425,16 +425,11 @@ static __devexit int max8925_deinit_charger(struct max8925_power_info *info) | |||
425 | static __devinit int max8925_power_probe(struct platform_device *pdev) | 425 | static __devinit int max8925_power_probe(struct platform_device *pdev) |
426 | { | 426 | { |
427 | struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); | 427 | struct max8925_chip *chip = dev_get_drvdata(pdev->dev.parent); |
428 | struct max8925_platform_data *max8925_pdata; | ||
429 | struct max8925_power_pdata *pdata = NULL; | 428 | struct max8925_power_pdata *pdata = NULL; |
430 | struct max8925_power_info *info; | 429 | struct max8925_power_info *info; |
431 | int ret; | 430 | int ret; |
432 | 431 | ||
433 | if (pdev->dev.parent->platform_data) { | 432 | pdata = pdev->dev.platform_data; |
434 | max8925_pdata = pdev->dev.parent->platform_data; | ||
435 | pdata = max8925_pdata->power; | ||
436 | } | ||
437 | |||
438 | if (!pdata) { | 433 | if (!pdata) { |
439 | dev_err(&pdev->dev, "platform data isn't assigned to " | 434 | dev_err(&pdev->dev, "platform data isn't assigned to " |
440 | "power supply\n"); | 435 | "power supply\n"); |
@@ -447,6 +442,7 @@ static __devinit int max8925_power_probe(struct platform_device *pdev) | |||
447 | info->chip = chip; | 442 | info->chip = chip; |
448 | info->gpm = chip->i2c; | 443 | info->gpm = chip->i2c; |
449 | info->adc = chip->adc; | 444 | info->adc = chip->adc; |
445 | platform_set_drvdata(pdev, info); | ||
450 | 446 | ||
451 | info->ac.name = "max8925-ac"; | 447 | info->ac.name = "max8925-ac"; |
452 | info->ac.type = POWER_SUPPLY_TYPE_MAINS; | 448 | info->ac.type = POWER_SUPPLY_TYPE_MAINS; |
@@ -482,8 +478,6 @@ static __devinit int max8925_power_probe(struct platform_device *pdev) | |||
482 | info->topoff_threshold = pdata->topoff_threshold; | 478 | info->topoff_threshold = pdata->topoff_threshold; |
483 | info->fast_charge = pdata->fast_charge; | 479 | info->fast_charge = pdata->fast_charge; |
484 | info->set_charger = pdata->set_charger; | 480 | info->set_charger = pdata->set_charger; |
485 | dev_set_drvdata(&pdev->dev, info); | ||
486 | platform_set_drvdata(pdev, info); | ||
487 | 481 | ||
488 | max8925_init_charger(chip, info); | 482 | max8925_init_charger(chip, info); |
489 | return 0; | 483 | return 0; |
diff --git a/drivers/power/test_power.c b/drivers/power/test_power.c index 0cd9f67d33e5..b527c93bf2f3 100644 --- a/drivers/power/test_power.c +++ b/drivers/power/test_power.c | |||
@@ -3,6 +3,12 @@ | |||
3 | * | 3 | * |
4 | * Copyright 2010 Anton Vorontsov <cbouatmailru@gmail.com> | 4 | * Copyright 2010 Anton Vorontsov <cbouatmailru@gmail.com> |
5 | * | 5 | * |
6 | * Dynamic module parameter code from the Virtual Battery Driver | ||
7 | * Copyright (C) 2008 Pylone, Inc. | ||
8 | * By: Masashi YOKOTA <yokota@pylone.jp> | ||
9 | * Originally found here: | ||
10 | * http://downloads.pylone.jp/src/virtual_battery/virtual_battery-0.0.1.tar.bz2 | ||
11 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
7 | * it under the terms of the GNU General Public License version 2 as | 13 | * it under the terms of the GNU General Public License version 2 as |
8 | * published by the Free Software Foundation. | 14 | * published by the Free Software Foundation. |
@@ -15,8 +21,12 @@ | |||
15 | #include <linux/delay.h> | 21 | #include <linux/delay.h> |
16 | #include <linux/vermagic.h> | 22 | #include <linux/vermagic.h> |
17 | 23 | ||
18 | static int test_power_ac_online = 1; | 24 | static int ac_online = 1; |
19 | static int test_power_battery_status = POWER_SUPPLY_STATUS_CHARGING; | 25 | static int battery_status = POWER_SUPPLY_STATUS_DISCHARGING; |
26 | static int battery_health = POWER_SUPPLY_HEALTH_GOOD; | ||
27 | static int battery_present = 1; /* true */ | ||
28 | static int battery_technology = POWER_SUPPLY_TECHNOLOGY_LION; | ||
29 | static int battery_capacity = 50; | ||
20 | 30 | ||
21 | static int test_power_get_ac_property(struct power_supply *psy, | 31 | static int test_power_get_ac_property(struct power_supply *psy, |
22 | enum power_supply_property psp, | 32 | enum power_supply_property psp, |
@@ -24,7 +34,7 @@ static int test_power_get_ac_property(struct power_supply *psy, | |||
24 | { | 34 | { |
25 | switch (psp) { | 35 | switch (psp) { |
26 | case POWER_SUPPLY_PROP_ONLINE: | 36 | case POWER_SUPPLY_PROP_ONLINE: |
27 | val->intval = test_power_ac_online; | 37 | val->intval = ac_online; |
28 | break; | 38 | break; |
29 | default: | 39 | default: |
30 | return -EINVAL; | 40 | return -EINVAL; |
@@ -47,22 +57,30 @@ static int test_power_get_battery_property(struct power_supply *psy, | |||
47 | val->strval = UTS_RELEASE; | 57 | val->strval = UTS_RELEASE; |
48 | break; | 58 | break; |
49 | case POWER_SUPPLY_PROP_STATUS: | 59 | case POWER_SUPPLY_PROP_STATUS: |
50 | val->intval = test_power_battery_status; | 60 | val->intval = battery_status; |
51 | break; | 61 | break; |
52 | case POWER_SUPPLY_PROP_CHARGE_TYPE: | 62 | case POWER_SUPPLY_PROP_CHARGE_TYPE: |
53 | val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; | 63 | val->intval = POWER_SUPPLY_CHARGE_TYPE_FAST; |
54 | break; | 64 | break; |
55 | case POWER_SUPPLY_PROP_HEALTH: | 65 | case POWER_SUPPLY_PROP_HEALTH: |
56 | val->intval = POWER_SUPPLY_HEALTH_GOOD; | 66 | val->intval = battery_health; |
67 | break; | ||
68 | case POWER_SUPPLY_PROP_PRESENT: | ||
69 | val->intval = battery_present; | ||
57 | break; | 70 | break; |
58 | case POWER_SUPPLY_PROP_TECHNOLOGY: | 71 | case POWER_SUPPLY_PROP_TECHNOLOGY: |
59 | val->intval = POWER_SUPPLY_TECHNOLOGY_LION; | 72 | val->intval = battery_technology; |
60 | break; | 73 | break; |
61 | case POWER_SUPPLY_PROP_CAPACITY_LEVEL: | 74 | case POWER_SUPPLY_PROP_CAPACITY_LEVEL: |
62 | val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; | 75 | val->intval = POWER_SUPPLY_CAPACITY_LEVEL_NORMAL; |
63 | break; | 76 | break; |
64 | case POWER_SUPPLY_PROP_CAPACITY: | 77 | case POWER_SUPPLY_PROP_CAPACITY: |
65 | val->intval = 50; | 78 | case POWER_SUPPLY_PROP_CHARGE_NOW: |
79 | val->intval = battery_capacity; | ||
80 | break; | ||
81 | case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: | ||
82 | case POWER_SUPPLY_PROP_CHARGE_FULL: | ||
83 | val->intval = 100; | ||
66 | break; | 84 | break; |
67 | case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: | 85 | case POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG: |
68 | case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: | 86 | case POWER_SUPPLY_PROP_TIME_TO_FULL_NOW: |
@@ -84,9 +102,11 @@ static enum power_supply_property test_power_battery_props[] = { | |||
84 | POWER_SUPPLY_PROP_STATUS, | 102 | POWER_SUPPLY_PROP_STATUS, |
85 | POWER_SUPPLY_PROP_CHARGE_TYPE, | 103 | POWER_SUPPLY_PROP_CHARGE_TYPE, |
86 | POWER_SUPPLY_PROP_HEALTH, | 104 | POWER_SUPPLY_PROP_HEALTH, |
105 | POWER_SUPPLY_PROP_PRESENT, | ||
87 | POWER_SUPPLY_PROP_TECHNOLOGY, | 106 | POWER_SUPPLY_PROP_TECHNOLOGY, |
107 | POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, | ||
88 | POWER_SUPPLY_PROP_CHARGE_FULL, | 108 | POWER_SUPPLY_PROP_CHARGE_FULL, |
89 | POWER_SUPPLY_PROP_CHARGE_EMPTY, | 109 | POWER_SUPPLY_PROP_CHARGE_NOW, |
90 | POWER_SUPPLY_PROP_CAPACITY, | 110 | POWER_SUPPLY_PROP_CAPACITY, |
91 | POWER_SUPPLY_PROP_CAPACITY_LEVEL, | 111 | POWER_SUPPLY_PROP_CAPACITY_LEVEL, |
92 | POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, | 112 | POWER_SUPPLY_PROP_TIME_TO_EMPTY_AVG, |
@@ -118,6 +138,7 @@ static struct power_supply test_power_supplies[] = { | |||
118 | }, | 138 | }, |
119 | }; | 139 | }; |
120 | 140 | ||
141 | |||
121 | static int __init test_power_init(void) | 142 | static int __init test_power_init(void) |
122 | { | 143 | { |
123 | int i; | 144 | int i; |
@@ -145,8 +166,8 @@ static void __exit test_power_exit(void) | |||
145 | int i; | 166 | int i; |
146 | 167 | ||
147 | /* Let's see how we handle changes... */ | 168 | /* Let's see how we handle changes... */ |
148 | test_power_ac_online = 0; | 169 | ac_online = 0; |
149 | test_power_battery_status = POWER_SUPPLY_STATUS_DISCHARGING; | 170 | battery_status = POWER_SUPPLY_STATUS_DISCHARGING; |
150 | for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) | 171 | for (i = 0; i < ARRAY_SIZE(test_power_supplies); i++) |
151 | power_supply_changed(&test_power_supplies[i]); | 172 | power_supply_changed(&test_power_supplies[i]); |
152 | pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n", | 173 | pr_info("%s: 'changed' event sent, sleeping for 10 seconds...\n", |
@@ -158,6 +179,241 @@ static void __exit test_power_exit(void) | |||
158 | } | 179 | } |
159 | module_exit(test_power_exit); | 180 | module_exit(test_power_exit); |
160 | 181 | ||
182 | |||
183 | |||
184 | #define MAX_KEYLENGTH 256 | ||
185 | struct battery_property_map { | ||
186 | int value; | ||
187 | char const *key; | ||
188 | }; | ||
189 | |||
190 | static struct battery_property_map map_ac_online[] = { | ||
191 | { 0, "on" }, | ||
192 | { 1, "off" }, | ||
193 | { -1, NULL }, | ||
194 | }; | ||
195 | |||
196 | static struct battery_property_map map_status[] = { | ||
197 | { POWER_SUPPLY_STATUS_CHARGING, "charging" }, | ||
198 | { POWER_SUPPLY_STATUS_DISCHARGING, "discharging" }, | ||
199 | { POWER_SUPPLY_STATUS_NOT_CHARGING, "not-charging" }, | ||
200 | { POWER_SUPPLY_STATUS_FULL, "full" }, | ||
201 | { -1, NULL }, | ||
202 | }; | ||
203 | |||
204 | static struct battery_property_map map_health[] = { | ||
205 | { POWER_SUPPLY_HEALTH_GOOD, "good" }, | ||
206 | { POWER_SUPPLY_HEALTH_OVERHEAT, "overheat" }, | ||
207 | { POWER_SUPPLY_HEALTH_DEAD, "dead" }, | ||
208 | { POWER_SUPPLY_HEALTH_OVERVOLTAGE, "overvoltage" }, | ||
209 | { POWER_SUPPLY_HEALTH_UNSPEC_FAILURE, "failure" }, | ||
210 | { -1, NULL }, | ||
211 | }; | ||
212 | |||
213 | static struct battery_property_map map_present[] = { | ||
214 | { 0, "false" }, | ||
215 | { 1, "true" }, | ||
216 | { -1, NULL }, | ||
217 | }; | ||
218 | |||
219 | static struct battery_property_map map_technology[] = { | ||
220 | { POWER_SUPPLY_TECHNOLOGY_NiMH, "NiMH" }, | ||
221 | { POWER_SUPPLY_TECHNOLOGY_LION, "LION" }, | ||
222 | { POWER_SUPPLY_TECHNOLOGY_LIPO, "LIPO" }, | ||
223 | { POWER_SUPPLY_TECHNOLOGY_LiFe, "LiFe" }, | ||
224 | { POWER_SUPPLY_TECHNOLOGY_NiCd, "NiCd" }, | ||
225 | { POWER_SUPPLY_TECHNOLOGY_LiMn, "LiMn" }, | ||
226 | { -1, NULL }, | ||
227 | }; | ||
228 | |||
229 | |||
230 | static int map_get_value(struct battery_property_map *map, const char *key, | ||
231 | int def_val) | ||
232 | { | ||
233 | char buf[MAX_KEYLENGTH]; | ||
234 | int cr; | ||
235 | |||
236 | strncpy(buf, key, MAX_KEYLENGTH); | ||
237 | buf[MAX_KEYLENGTH-1] = '\0'; | ||
238 | |||
239 | cr = strnlen(buf, MAX_KEYLENGTH) - 1; | ||
240 | if (buf[cr] == '\n') | ||
241 | buf[cr] = '\0'; | ||
242 | |||
243 | while (map->key) { | ||
244 | if (strncasecmp(map->key, buf, MAX_KEYLENGTH) == 0) | ||
245 | return map->value; | ||
246 | map++; | ||
247 | } | ||
248 | |||
249 | return def_val; | ||
250 | } | ||
251 | |||
252 | |||
253 | static const char *map_get_key(struct battery_property_map *map, int value, | ||
254 | const char *def_key) | ||
255 | { | ||
256 | while (map->key) { | ||
257 | if (map->value == value) | ||
258 | return map->key; | ||
259 | map++; | ||
260 | } | ||
261 | |||
262 | return def_key; | ||
263 | } | ||
264 | |||
265 | static int param_set_ac_online(const char *key, const struct kernel_param *kp) | ||
266 | { | ||
267 | ac_online = map_get_value(map_ac_online, key, ac_online); | ||
268 | power_supply_changed(&test_power_supplies[0]); | ||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | static int param_get_ac_online(char *buffer, const struct kernel_param *kp) | ||
273 | { | ||
274 | strcpy(buffer, map_get_key(map_ac_online, ac_online, "unknown")); | ||
275 | return strlen(buffer); | ||
276 | } | ||
277 | |||
278 | static int param_set_battery_status(const char *key, | ||
279 | const struct kernel_param *kp) | ||
280 | { | ||
281 | battery_status = map_get_value(map_status, key, battery_status); | ||
282 | power_supply_changed(&test_power_supplies[1]); | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static int param_get_battery_status(char *buffer, const struct kernel_param *kp) | ||
287 | { | ||
288 | strcpy(buffer, map_get_key(map_status, battery_status, "unknown")); | ||
289 | return strlen(buffer); | ||
290 | } | ||
291 | |||
292 | static int param_set_battery_health(const char *key, | ||
293 | const struct kernel_param *kp) | ||
294 | { | ||
295 | battery_health = map_get_value(map_health, key, battery_health); | ||
296 | power_supply_changed(&test_power_supplies[1]); | ||
297 | return 0; | ||
298 | } | ||
299 | |||
300 | static int param_get_battery_health(char *buffer, const struct kernel_param *kp) | ||
301 | { | ||
302 | strcpy(buffer, map_get_key(map_health, battery_health, "unknown")); | ||
303 | return strlen(buffer); | ||
304 | } | ||
305 | |||
306 | static int param_set_battery_present(const char *key, | ||
307 | const struct kernel_param *kp) | ||
308 | { | ||
309 | battery_present = map_get_value(map_present, key, battery_present); | ||
310 | power_supply_changed(&test_power_supplies[0]); | ||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | static int param_get_battery_present(char *buffer, | ||
315 | const struct kernel_param *kp) | ||
316 | { | ||
317 | strcpy(buffer, map_get_key(map_present, battery_present, "unknown")); | ||
318 | return strlen(buffer); | ||
319 | } | ||
320 | |||
321 | static int param_set_battery_technology(const char *key, | ||
322 | const struct kernel_param *kp) | ||
323 | { | ||
324 | battery_technology = map_get_value(map_technology, key, | ||
325 | battery_technology); | ||
326 | power_supply_changed(&test_power_supplies[1]); | ||
327 | return 0; | ||
328 | } | ||
329 | |||
330 | static int param_get_battery_technology(char *buffer, | ||
331 | const struct kernel_param *kp) | ||
332 | { | ||
333 | strcpy(buffer, | ||
334 | map_get_key(map_technology, battery_technology, "unknown")); | ||
335 | return strlen(buffer); | ||
336 | } | ||
337 | |||
338 | static int param_set_battery_capacity(const char *key, | ||
339 | const struct kernel_param *kp) | ||
340 | { | ||
341 | int tmp; | ||
342 | |||
343 | if (1 != sscanf(key, "%d", &tmp)) | ||
344 | return -EINVAL; | ||
345 | |||
346 | battery_capacity = tmp; | ||
347 | power_supply_changed(&test_power_supplies[1]); | ||
348 | return 0; | ||
349 | } | ||
350 | |||
351 | #define param_get_battery_capacity param_get_int | ||
352 | |||
353 | |||
354 | |||
355 | static struct kernel_param_ops param_ops_ac_online = { | ||
356 | .set = param_set_ac_online, | ||
357 | .get = param_get_ac_online, | ||
358 | }; | ||
359 | |||
360 | static struct kernel_param_ops param_ops_battery_status = { | ||
361 | .set = param_set_battery_status, | ||
362 | .get = param_get_battery_status, | ||
363 | }; | ||
364 | |||
365 | static struct kernel_param_ops param_ops_battery_present = { | ||
366 | .set = param_set_battery_present, | ||
367 | .get = param_get_battery_present, | ||
368 | }; | ||
369 | |||
370 | static struct kernel_param_ops param_ops_battery_technology = { | ||
371 | .set = param_set_battery_technology, | ||
372 | .get = param_get_battery_technology, | ||
373 | }; | ||
374 | |||
375 | static struct kernel_param_ops param_ops_battery_health = { | ||
376 | .set = param_set_battery_health, | ||
377 | .get = param_get_battery_health, | ||
378 | }; | ||
379 | |||
380 | static struct kernel_param_ops param_ops_battery_capacity = { | ||
381 | .set = param_set_battery_capacity, | ||
382 | .get = param_get_battery_capacity, | ||
383 | }; | ||
384 | |||
385 | |||
386 | #define param_check_ac_online(name, p) __param_check(name, p, void); | ||
387 | #define param_check_battery_status(name, p) __param_check(name, p, void); | ||
388 | #define param_check_battery_present(name, p) __param_check(name, p, void); | ||
389 | #define param_check_battery_technology(name, p) __param_check(name, p, void); | ||
390 | #define param_check_battery_health(name, p) __param_check(name, p, void); | ||
391 | #define param_check_battery_capacity(name, p) __param_check(name, p, void); | ||
392 | |||
393 | |||
394 | module_param(ac_online, ac_online, 0644); | ||
395 | MODULE_PARM_DESC(ac_online, "AC charging state <on|off>"); | ||
396 | |||
397 | module_param(battery_status, battery_status, 0644); | ||
398 | MODULE_PARM_DESC(battery_status, | ||
399 | "battery status <charging|discharging|not-charging|full>"); | ||
400 | |||
401 | module_param(battery_present, battery_present, 0644); | ||
402 | MODULE_PARM_DESC(battery_present, | ||
403 | "battery presence state <good|overheat|dead|overvoltage|failure>"); | ||
404 | |||
405 | module_param(battery_technology, battery_technology, 0644); | ||
406 | MODULE_PARM_DESC(battery_technology, | ||
407 | "battery technology <NiMH|LION|LIPO|LiFe|NiCd|LiMn>"); | ||
408 | |||
409 | module_param(battery_health, battery_health, 0644); | ||
410 | MODULE_PARM_DESC(battery_health, | ||
411 | "battery health state <good|overheat|dead|overvoltage|failure>"); | ||
412 | |||
413 | module_param(battery_capacity, battery_capacity, 0644); | ||
414 | MODULE_PARM_DESC(battery_capacity, "battery capacity (percentage)"); | ||
415 | |||
416 | |||
161 | MODULE_DESCRIPTION("Power supply driver for testing"); | 417 | MODULE_DESCRIPTION("Power supply driver for testing"); |
162 | MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>"); | 418 | MODULE_AUTHOR("Anton Vorontsov <cbouatmailru@gmail.com>"); |
163 | MODULE_LICENSE("GPL"); | 419 | MODULE_LICENSE("GPL"); |
diff --git a/drivers/power/z2_battery.c b/drivers/power/z2_battery.c index e5ced3a4c1ed..d119c38b3ff6 100644 --- a/drivers/power/z2_battery.c +++ b/drivers/power/z2_battery.c | |||
@@ -271,24 +271,33 @@ static int __devexit z2_batt_remove(struct i2c_client *client) | |||
271 | } | 271 | } |
272 | 272 | ||
273 | #ifdef CONFIG_PM | 273 | #ifdef CONFIG_PM |
274 | static int z2_batt_suspend(struct i2c_client *client, pm_message_t state) | 274 | static int z2_batt_suspend(struct device *dev) |
275 | { | 275 | { |
276 | struct i2c_client *client = to_i2c_client(dev); | ||
276 | struct z2_charger *charger = i2c_get_clientdata(client); | 277 | struct z2_charger *charger = i2c_get_clientdata(client); |
277 | 278 | ||
278 | flush_work_sync(&charger->bat_work); | 279 | flush_work_sync(&charger->bat_work); |
279 | return 0; | 280 | return 0; |
280 | } | 281 | } |
281 | 282 | ||
282 | static int z2_batt_resume(struct i2c_client *client) | 283 | static int z2_batt_resume(struct device *dev) |
283 | { | 284 | { |
285 | struct i2c_client *client = to_i2c_client(dev); | ||
284 | struct z2_charger *charger = i2c_get_clientdata(client); | 286 | struct z2_charger *charger = i2c_get_clientdata(client); |
285 | 287 | ||
286 | schedule_work(&charger->bat_work); | 288 | schedule_work(&charger->bat_work); |
287 | return 0; | 289 | return 0; |
288 | } | 290 | } |
291 | |||
292 | static const struct dev_pm_ops z2_battery_pm_ops = { | ||
293 | .suspend = z2_batt_suspend, | ||
294 | .resume = z2_batt_resume, | ||
295 | }; | ||
296 | |||
297 | #define Z2_BATTERY_PM_OPS (&z2_battery_pm_ops) | ||
298 | |||
289 | #else | 299 | #else |
290 | #define z2_batt_suspend NULL | 300 | #define Z2_BATTERY_PM_OPS (NULL) |
291 | #define z2_batt_resume NULL | ||
292 | #endif | 301 | #endif |
293 | 302 | ||
294 | static const struct i2c_device_id z2_batt_id[] = { | 303 | static const struct i2c_device_id z2_batt_id[] = { |
@@ -301,11 +310,10 @@ static struct i2c_driver z2_batt_driver = { | |||
301 | .driver = { | 310 | .driver = { |
302 | .name = "z2-battery", | 311 | .name = "z2-battery", |
303 | .owner = THIS_MODULE, | 312 | .owner = THIS_MODULE, |
313 | .pm = Z2_BATTERY_PM_OPS | ||
304 | }, | 314 | }, |
305 | .probe = z2_batt_probe, | 315 | .probe = z2_batt_probe, |
306 | .remove = z2_batt_remove, | 316 | .remove = z2_batt_remove, |
307 | .suspend = z2_batt_suspend, | ||
308 | .resume = z2_batt_resume, | ||
309 | .id_table = z2_batt_id, | 317 | .id_table = z2_batt_id, |
310 | }; | 318 | }; |
311 | 319 | ||
diff --git a/drivers/regulator/88pm8607.c b/drivers/regulator/88pm8607.c index 859251250b55..d63fddb0fbb0 100644 --- a/drivers/regulator/88pm8607.c +++ b/drivers/regulator/88pm8607.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/regulator/driver.h> | 16 | #include <linux/regulator/driver.h> |
17 | #include <linux/regulator/machine.h> | 17 | #include <linux/regulator/machine.h> |
18 | #include <linux/mfd/core.h> | ||
19 | #include <linux/mfd/88pm860x.h> | 18 | #include <linux/mfd/88pm860x.h> |
20 | 19 | ||
21 | struct pm8607_regulator_info { | 20 | struct pm8607_regulator_info { |
@@ -399,36 +398,33 @@ static int __devinit pm8607_regulator_probe(struct platform_device *pdev) | |||
399 | { | 398 | { |
400 | struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); | 399 | struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); |
401 | struct pm8607_regulator_info *info = NULL; | 400 | struct pm8607_regulator_info *info = NULL; |
402 | struct regulator_init_data *pdata; | 401 | struct regulator_init_data *pdata = pdev->dev.platform_data; |
403 | struct mfd_cell *cell; | 402 | struct resource *res; |
404 | int i; | 403 | int i; |
405 | 404 | ||
406 | cell = pdev->dev.platform_data; | 405 | res = platform_get_resource(pdev, IORESOURCE_IO, 0); |
407 | if (cell == NULL) | 406 | if (res == NULL) { |
408 | return -ENODEV; | 407 | dev_err(&pdev->dev, "No I/O resource!\n"); |
409 | pdata = cell->mfd_data; | ||
410 | if (pdata == NULL) | ||
411 | return -EINVAL; | 408 | return -EINVAL; |
412 | 409 | } | |
413 | for (i = 0; i < ARRAY_SIZE(pm8607_regulator_info); i++) { | 410 | for (i = 0; i < ARRAY_SIZE(pm8607_regulator_info); i++) { |
414 | info = &pm8607_regulator_info[i]; | 411 | info = &pm8607_regulator_info[i]; |
415 | if (!strcmp(info->desc.name, pdata->constraints.name)) | 412 | if (info->desc.id == res->start) |
416 | break; | 413 | break; |
417 | } | 414 | } |
418 | if (i > ARRAY_SIZE(pm8607_regulator_info)) { | 415 | if ((i < 0) || (i > PM8607_ID_RG_MAX)) { |
419 | dev_err(&pdev->dev, "Failed to find regulator %s\n", | 416 | dev_err(&pdev->dev, "Failed to find regulator %llu\n", |
420 | pdata->constraints.name); | 417 | (unsigned long long)res->start); |
421 | return -EINVAL; | 418 | return -EINVAL; |
422 | } | 419 | } |
423 | |||
424 | info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion; | 420 | info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion; |
425 | info->chip = chip; | 421 | info->chip = chip; |
426 | 422 | ||
427 | /* check DVC ramp slope double */ | 423 | /* check DVC ramp slope double */ |
428 | if (!strcmp(info->desc.name, "BUCK3")) | 424 | if ((i == PM8607_ID_BUCK3) && info->chip->buck3_double) |
429 | if (info->chip->buck3_double) | 425 | info->slope_double = 1; |
430 | info->slope_double = 1; | ||
431 | 426 | ||
427 | /* replace driver_data with info */ | ||
432 | info->regulator = regulator_register(&info->desc, &pdev->dev, | 428 | info->regulator = regulator_register(&info->desc, &pdev->dev, |
433 | pdata, info); | 429 | pdata, info); |
434 | if (IS_ERR(info->regulator)) { | 430 | if (IS_ERR(info->regulator)) { |
diff --git a/drivers/regulator/Kconfig b/drivers/regulator/Kconfig index f0b13a0d1851..d7ed20f293d7 100644 --- a/drivers/regulator/Kconfig +++ b/drivers/regulator/Kconfig | |||
@@ -297,5 +297,11 @@ config REGULATOR_TPS6524X | |||
297 | serial interface currently supported on the sequencer serial | 297 | serial interface currently supported on the sequencer serial |
298 | port controller. | 298 | port controller. |
299 | 299 | ||
300 | config REGULATOR_TPS65910 | ||
301 | tristate "TI TPS65910 Power Regulator" | ||
302 | depends on MFD_TPS65910 | ||
303 | help | ||
304 | This driver supports TPS65910 voltage regulator chips. | ||
305 | |||
300 | endif | 306 | endif |
301 | 307 | ||
diff --git a/drivers/regulator/Makefile b/drivers/regulator/Makefile index 165ff5371e9e..3932d2ec38f3 100644 --- a/drivers/regulator/Makefile +++ b/drivers/regulator/Makefile | |||
@@ -42,5 +42,6 @@ obj-$(CONFIG_REGULATOR_88PM8607) += 88pm8607.o | |||
42 | obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o | 42 | obj-$(CONFIG_REGULATOR_ISL6271A) += isl6271a-regulator.o |
43 | obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o | 43 | obj-$(CONFIG_REGULATOR_AB8500) += ab8500.o |
44 | obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o | 44 | obj-$(CONFIG_REGULATOR_DB8500_PRCMU) += db8500-prcmu.o |
45 | obj-$(CONFIG_REGULATOR_TPS65910) += tps65910-regulator.o | ||
45 | 46 | ||
46 | ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG | 47 | ccflags-$(CONFIG_REGULATOR_DEBUG) += -DDEBUG |
diff --git a/drivers/regulator/ab3100.c b/drivers/regulator/ab3100.c index b1d77946e9c6..585e4946fe0a 100644 --- a/drivers/regulator/ab3100.c +++ b/drivers/regulator/ab3100.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/platform_device.h> | 17 | #include <linux/platform_device.h> |
18 | #include <linux/regulator/driver.h> | 18 | #include <linux/regulator/driver.h> |
19 | #include <linux/mfd/abx500.h> | 19 | #include <linux/mfd/abx500.h> |
20 | #include <linux/mfd/core.h> | ||
21 | 20 | ||
22 | /* LDO registers and some handy masking definitions for AB3100 */ | 21 | /* LDO registers and some handy masking definitions for AB3100 */ |
23 | #define AB3100_LDO_A 0x40 | 22 | #define AB3100_LDO_A 0x40 |
@@ -582,7 +581,7 @@ ab3100_regulator_desc[AB3100_NUM_REGULATORS] = { | |||
582 | 581 | ||
583 | static int __devinit ab3100_regulators_probe(struct platform_device *pdev) | 582 | static int __devinit ab3100_regulators_probe(struct platform_device *pdev) |
584 | { | 583 | { |
585 | struct ab3100_platform_data *plfdata = mfd_get_data(pdev); | 584 | struct ab3100_platform_data *plfdata = pdev->dev.platform_data; |
586 | int err = 0; | 585 | int err = 0; |
587 | u8 data; | 586 | u8 data; |
588 | int i; | 587 | int i; |
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c index 0fae51c4845a..d3e38790906e 100644 --- a/drivers/regulator/core.c +++ b/drivers/regulator/core.c | |||
@@ -158,6 +158,13 @@ static int regulator_check_consumers(struct regulator_dev *rdev, | |||
158 | struct regulator *regulator; | 158 | struct regulator *regulator; |
159 | 159 | ||
160 | list_for_each_entry(regulator, &rdev->consumer_list, list) { | 160 | list_for_each_entry(regulator, &rdev->consumer_list, list) { |
161 | /* | ||
162 | * Assume consumers that didn't say anything are OK | ||
163 | * with anything in the constraint range. | ||
164 | */ | ||
165 | if (!regulator->min_uV && !regulator->max_uV) | ||
166 | continue; | ||
167 | |||
161 | if (*max_uV > regulator->max_uV) | 168 | if (*max_uV > regulator->max_uV) |
162 | *max_uV = regulator->max_uV; | 169 | *max_uV = regulator->max_uV; |
163 | if (*min_uV < regulator->min_uV) | 170 | if (*min_uV < regulator->min_uV) |
@@ -197,9 +204,9 @@ static int regulator_check_current_limit(struct regulator_dev *rdev, | |||
197 | } | 204 | } |
198 | 205 | ||
199 | /* operating mode constraint check */ | 206 | /* operating mode constraint check */ |
200 | static int regulator_check_mode(struct regulator_dev *rdev, int mode) | 207 | static int regulator_mode_constrain(struct regulator_dev *rdev, int *mode) |
201 | { | 208 | { |
202 | switch (mode) { | 209 | switch (*mode) { |
203 | case REGULATOR_MODE_FAST: | 210 | case REGULATOR_MODE_FAST: |
204 | case REGULATOR_MODE_NORMAL: | 211 | case REGULATOR_MODE_NORMAL: |
205 | case REGULATOR_MODE_IDLE: | 212 | case REGULATOR_MODE_IDLE: |
@@ -217,11 +224,17 @@ static int regulator_check_mode(struct regulator_dev *rdev, int mode) | |||
217 | rdev_err(rdev, "operation not allowed\n"); | 224 | rdev_err(rdev, "operation not allowed\n"); |
218 | return -EPERM; | 225 | return -EPERM; |
219 | } | 226 | } |
220 | if (!(rdev->constraints->valid_modes_mask & mode)) { | 227 | |
221 | rdev_err(rdev, "invalid mode %x\n", mode); | 228 | /* The modes are bitmasks, the most power hungry modes having |
222 | return -EINVAL; | 229 | * the lowest values. If the requested mode isn't supported |
230 | * try higher modes. */ | ||
231 | while (*mode) { | ||
232 | if (rdev->constraints->valid_modes_mask & *mode) | ||
233 | return 0; | ||
234 | *mode /= 2; | ||
223 | } | 235 | } |
224 | return 0; | 236 | |
237 | return -EINVAL; | ||
225 | } | 238 | } |
226 | 239 | ||
227 | /* dynamic regulator mode switching constraint check */ | 240 | /* dynamic regulator mode switching constraint check */ |
@@ -612,7 +625,7 @@ static void drms_uA_update(struct regulator_dev *rdev) | |||
612 | output_uV, current_uA); | 625 | output_uV, current_uA); |
613 | 626 | ||
614 | /* check the new mode is allowed */ | 627 | /* check the new mode is allowed */ |
615 | err = regulator_check_mode(rdev, mode); | 628 | err = regulator_mode_constrain(rdev, &mode); |
616 | if (err == 0) | 629 | if (err == 0) |
617 | rdev->desc->ops->set_mode(rdev, mode); | 630 | rdev->desc->ops->set_mode(rdev, mode); |
618 | } | 631 | } |
@@ -718,6 +731,10 @@ static void print_constraints(struct regulator_dev *rdev) | |||
718 | count += sprintf(buf + count, "at %d mV ", ret / 1000); | 731 | count += sprintf(buf + count, "at %d mV ", ret / 1000); |
719 | } | 732 | } |
720 | 733 | ||
734 | if (constraints->uV_offset) | ||
735 | count += sprintf(buf, "%dmV offset ", | ||
736 | constraints->uV_offset / 1000); | ||
737 | |||
721 | if (constraints->min_uA && constraints->max_uA) { | 738 | if (constraints->min_uA && constraints->max_uA) { |
722 | if (constraints->min_uA == constraints->max_uA) | 739 | if (constraints->min_uA == constraints->max_uA) |
723 | count += sprintf(buf + count, "%d mA ", | 740 | count += sprintf(buf + count, "%d mA ", |
@@ -1498,13 +1515,14 @@ static int _regulator_force_disable(struct regulator_dev *rdev, | |||
1498 | */ | 1515 | */ |
1499 | int regulator_force_disable(struct regulator *regulator) | 1516 | int regulator_force_disable(struct regulator *regulator) |
1500 | { | 1517 | { |
1518 | struct regulator_dev *rdev = regulator->rdev; | ||
1501 | struct regulator_dev *supply_rdev = NULL; | 1519 | struct regulator_dev *supply_rdev = NULL; |
1502 | int ret; | 1520 | int ret; |
1503 | 1521 | ||
1504 | mutex_lock(®ulator->rdev->mutex); | 1522 | mutex_lock(&rdev->mutex); |
1505 | regulator->uA_load = 0; | 1523 | regulator->uA_load = 0; |
1506 | ret = _regulator_force_disable(regulator->rdev, &supply_rdev); | 1524 | ret = _regulator_force_disable(rdev, &supply_rdev); |
1507 | mutex_unlock(®ulator->rdev->mutex); | 1525 | mutex_unlock(&rdev->mutex); |
1508 | 1526 | ||
1509 | if (supply_rdev) | 1527 | if (supply_rdev) |
1510 | regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev))); | 1528 | regulator_disable(get_device_regulator(rdev_get_dev(supply_rdev))); |
@@ -1634,6 +1652,9 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev, | |||
1634 | 1652 | ||
1635 | trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV); | 1653 | trace_regulator_set_voltage(rdev_get_name(rdev), min_uV, max_uV); |
1636 | 1654 | ||
1655 | min_uV += rdev->constraints->uV_offset; | ||
1656 | max_uV += rdev->constraints->uV_offset; | ||
1657 | |||
1637 | if (rdev->desc->ops->set_voltage) { | 1658 | if (rdev->desc->ops->set_voltage) { |
1638 | ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, | 1659 | ret = rdev->desc->ops->set_voltage(rdev, min_uV, max_uV, |
1639 | &selector); | 1660 | &selector); |
@@ -1858,18 +1879,22 @@ EXPORT_SYMBOL_GPL(regulator_sync_voltage); | |||
1858 | 1879 | ||
1859 | static int _regulator_get_voltage(struct regulator_dev *rdev) | 1880 | static int _regulator_get_voltage(struct regulator_dev *rdev) |
1860 | { | 1881 | { |
1861 | int sel; | 1882 | int sel, ret; |
1862 | 1883 | ||
1863 | if (rdev->desc->ops->get_voltage_sel) { | 1884 | if (rdev->desc->ops->get_voltage_sel) { |
1864 | sel = rdev->desc->ops->get_voltage_sel(rdev); | 1885 | sel = rdev->desc->ops->get_voltage_sel(rdev); |
1865 | if (sel < 0) | 1886 | if (sel < 0) |
1866 | return sel; | 1887 | return sel; |
1867 | return rdev->desc->ops->list_voltage(rdev, sel); | 1888 | ret = rdev->desc->ops->list_voltage(rdev, sel); |
1868 | } | 1889 | } else if (rdev->desc->ops->get_voltage) { |
1869 | if (rdev->desc->ops->get_voltage) | 1890 | ret = rdev->desc->ops->get_voltage(rdev); |
1870 | return rdev->desc->ops->get_voltage(rdev); | 1891 | } else { |
1871 | else | ||
1872 | return -EINVAL; | 1892 | return -EINVAL; |
1893 | } | ||
1894 | |||
1895 | if (ret < 0) | ||
1896 | return ret; | ||
1897 | return ret - rdev->constraints->uV_offset; | ||
1873 | } | 1898 | } |
1874 | 1899 | ||
1875 | /** | 1900 | /** |
@@ -2005,7 +2030,7 @@ int regulator_set_mode(struct regulator *regulator, unsigned int mode) | |||
2005 | } | 2030 | } |
2006 | 2031 | ||
2007 | /* constraints check */ | 2032 | /* constraints check */ |
2008 | ret = regulator_check_mode(rdev, mode); | 2033 | ret = regulator_mode_constrain(rdev, &mode); |
2009 | if (ret < 0) | 2034 | if (ret < 0) |
2010 | goto out; | 2035 | goto out; |
2011 | 2036 | ||
@@ -2081,16 +2106,26 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) | |||
2081 | 2106 | ||
2082 | mutex_lock(&rdev->mutex); | 2107 | mutex_lock(&rdev->mutex); |
2083 | 2108 | ||
2109 | /* | ||
2110 | * first check to see if we can set modes at all, otherwise just | ||
2111 | * tell the consumer everything is OK. | ||
2112 | */ | ||
2084 | regulator->uA_load = uA_load; | 2113 | regulator->uA_load = uA_load; |
2085 | ret = regulator_check_drms(rdev); | 2114 | ret = regulator_check_drms(rdev); |
2086 | if (ret < 0) | 2115 | if (ret < 0) { |
2116 | ret = 0; | ||
2087 | goto out; | 2117 | goto out; |
2088 | ret = -EINVAL; | 2118 | } |
2089 | 2119 | ||
2090 | /* sanity check */ | ||
2091 | if (!rdev->desc->ops->get_optimum_mode) | 2120 | if (!rdev->desc->ops->get_optimum_mode) |
2092 | goto out; | 2121 | goto out; |
2093 | 2122 | ||
2123 | /* | ||
2124 | * we can actually do this so any errors are indicators of | ||
2125 | * potential real failure. | ||
2126 | */ | ||
2127 | ret = -EINVAL; | ||
2128 | |||
2094 | /* get output voltage */ | 2129 | /* get output voltage */ |
2095 | output_uV = _regulator_get_voltage(rdev); | 2130 | output_uV = _regulator_get_voltage(rdev); |
2096 | if (output_uV <= 0) { | 2131 | if (output_uV <= 0) { |
@@ -2116,7 +2151,7 @@ int regulator_set_optimum_mode(struct regulator *regulator, int uA_load) | |||
2116 | mode = rdev->desc->ops->get_optimum_mode(rdev, | 2151 | mode = rdev->desc->ops->get_optimum_mode(rdev, |
2117 | input_uV, output_uV, | 2152 | input_uV, output_uV, |
2118 | total_uA_load); | 2153 | total_uA_load); |
2119 | ret = regulator_check_mode(rdev, mode); | 2154 | ret = regulator_mode_constrain(rdev, &mode); |
2120 | if (ret < 0) { | 2155 | if (ret < 0) { |
2121 | rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n", | 2156 | rdev_err(rdev, "failed to get optimum mode @ %d uA %d -> %d uV\n", |
2122 | total_uA_load, input_uV, output_uV); | 2157 | total_uA_load, input_uV, output_uV); |
@@ -2589,14 +2624,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, | |||
2589 | if (ret < 0) | 2624 | if (ret < 0) |
2590 | goto scrub; | 2625 | goto scrub; |
2591 | 2626 | ||
2592 | /* set supply regulator if it exists */ | ||
2593 | if (init_data->supply_regulator && init_data->supply_regulator_dev) { | ||
2594 | dev_err(dev, | ||
2595 | "Supply regulator specified by both name and dev\n"); | ||
2596 | ret = -EINVAL; | ||
2597 | goto scrub; | ||
2598 | } | ||
2599 | |||
2600 | if (init_data->supply_regulator) { | 2627 | if (init_data->supply_regulator) { |
2601 | struct regulator_dev *r; | 2628 | struct regulator_dev *r; |
2602 | int found = 0; | 2629 | int found = 0; |
@@ -2621,14 +2648,6 @@ struct regulator_dev *regulator_register(struct regulator_desc *regulator_desc, | |||
2621 | goto scrub; | 2648 | goto scrub; |
2622 | } | 2649 | } |
2623 | 2650 | ||
2624 | if (init_data->supply_regulator_dev) { | ||
2625 | dev_warn(dev, "Uses supply_regulator_dev instead of regulator_supply\n"); | ||
2626 | ret = set_supply(rdev, | ||
2627 | dev_get_drvdata(init_data->supply_regulator_dev)); | ||
2628 | if (ret < 0) | ||
2629 | goto scrub; | ||
2630 | } | ||
2631 | |||
2632 | /* add consumers devices */ | 2651 | /* add consumers devices */ |
2633 | for (i = 0; i < init_data->num_consumer_supplies; i++) { | 2652 | for (i = 0; i < init_data->num_consumer_supplies; i++) { |
2634 | ret = set_consumer_device_supply(rdev, | 2653 | ret = set_consumer_device_supply(rdev, |
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c index 1089a961616e..e5f7b8fe51f4 100644 --- a/drivers/regulator/db8500-prcmu.c +++ b/drivers/regulator/db8500-prcmu.c | |||
@@ -13,7 +13,6 @@ | |||
13 | #include <linux/err.h> | 13 | #include <linux/err.h> |
14 | #include <linux/spinlock.h> | 14 | #include <linux/spinlock.h> |
15 | #include <linux/platform_device.h> | 15 | #include <linux/platform_device.h> |
16 | #include <linux/mfd/core.h> | ||
17 | #include <linux/mfd/db8500-prcmu.h> | 16 | #include <linux/mfd/db8500-prcmu.h> |
18 | #include <linux/regulator/driver.h> | 17 | #include <linux/regulator/driver.h> |
19 | #include <linux/regulator/machine.h> | 18 | #include <linux/regulator/machine.h> |
@@ -471,7 +470,8 @@ static struct db8500_regulator_info | |||
471 | 470 | ||
472 | static int __devinit db8500_regulator_probe(struct platform_device *pdev) | 471 | static int __devinit db8500_regulator_probe(struct platform_device *pdev) |
473 | { | 472 | { |
474 | struct regulator_init_data *db8500_init_data = mfd_get_data(pdev); | 473 | struct regulator_init_data *db8500_init_data = |
474 | dev_get_platdata(&pdev->dev); | ||
475 | int i, err; | 475 | int i, err; |
476 | 476 | ||
477 | /* register all regulators */ | 477 | /* register all regulators */ |
diff --git a/drivers/regulator/max8925-regulator.c b/drivers/regulator/max8925-regulator.c index 8ae147549c6a..e4dbd667c043 100644 --- a/drivers/regulator/max8925-regulator.c +++ b/drivers/regulator/max8925-regulator.c | |||
@@ -23,6 +23,10 @@ | |||
23 | #define SD1_DVM_SHIFT 5 /* SDCTL1 bit5 */ | 23 | #define SD1_DVM_SHIFT 5 /* SDCTL1 bit5 */ |
24 | #define SD1_DVM_EN 6 /* SDV1 bit 6 */ | 24 | #define SD1_DVM_EN 6 /* SDV1 bit 6 */ |
25 | 25 | ||
26 | /* bit definitions in SD & LDO control registers */ | ||
27 | #define OUT_ENABLE 0x1f /* Power U/D sequence as I2C */ | ||
28 | #define OUT_DISABLE 0x1e /* Power U/D sequence as I2C */ | ||
29 | |||
26 | struct max8925_regulator_info { | 30 | struct max8925_regulator_info { |
27 | struct regulator_desc desc; | 31 | struct regulator_desc desc; |
28 | struct regulator_dev *regulator; | 32 | struct regulator_dev *regulator; |
@@ -93,8 +97,8 @@ static int max8925_enable(struct regulator_dev *rdev) | |||
93 | struct max8925_regulator_info *info = rdev_get_drvdata(rdev); | 97 | struct max8925_regulator_info *info = rdev_get_drvdata(rdev); |
94 | 98 | ||
95 | return max8925_set_bits(info->i2c, info->enable_reg, | 99 | return max8925_set_bits(info->i2c, info->enable_reg, |
96 | 1 << info->enable_bit, | 100 | OUT_ENABLE << info->enable_bit, |
97 | 1 << info->enable_bit); | 101 | OUT_ENABLE << info->enable_bit); |
98 | } | 102 | } |
99 | 103 | ||
100 | static int max8925_disable(struct regulator_dev *rdev) | 104 | static int max8925_disable(struct regulator_dev *rdev) |
@@ -102,7 +106,8 @@ static int max8925_disable(struct regulator_dev *rdev) | |||
102 | struct max8925_regulator_info *info = rdev_get_drvdata(rdev); | 106 | struct max8925_regulator_info *info = rdev_get_drvdata(rdev); |
103 | 107 | ||
104 | return max8925_set_bits(info->i2c, info->enable_reg, | 108 | return max8925_set_bits(info->i2c, info->enable_reg, |
105 | 1 << info->enable_bit, 0); | 109 | OUT_ENABLE << info->enable_bit, |
110 | OUT_DISABLE << info->enable_bit); | ||
106 | } | 111 | } |
107 | 112 | ||
108 | static int max8925_is_enabled(struct regulator_dev *rdev) | 113 | static int max8925_is_enabled(struct regulator_dev *rdev) |
diff --git a/drivers/regulator/max8997.c b/drivers/regulator/max8997.c index 77e0cfb30b23..10d5a1d9768e 100644 --- a/drivers/regulator/max8997.c +++ b/drivers/regulator/max8997.c | |||
@@ -267,7 +267,6 @@ static int max8997_get_enable_register(struct regulator_dev *rdev, | |||
267 | default: | 267 | default: |
268 | /* Not controllable or not exists */ | 268 | /* Not controllable or not exists */ |
269 | return -EINVAL; | 269 | return -EINVAL; |
270 | break; | ||
271 | } | 270 | } |
272 | 271 | ||
273 | return 0; | 272 | return 0; |
@@ -1033,11 +1032,11 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev) | |||
1033 | 1032 | ||
1034 | /* For the safety, set max voltage before setting up */ | 1033 | /* For the safety, set max voltage before setting up */ |
1035 | for (i = 0; i < 8; i++) { | 1034 | for (i = 0; i < 8; i++) { |
1036 | max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1), | 1035 | max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i, |
1037 | max_buck1, 0x3f); | 1036 | max_buck1, 0x3f); |
1038 | max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1), | 1037 | max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i, |
1039 | max_buck2, 0x3f); | 1038 | max_buck2, 0x3f); |
1040 | max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1), | 1039 | max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i, |
1041 | max_buck5, 0x3f); | 1040 | max_buck5, 0x3f); |
1042 | } | 1041 | } |
1043 | 1042 | ||
@@ -1114,13 +1113,13 @@ static __devinit int max8997_pmic_probe(struct platform_device *pdev) | |||
1114 | 1113 | ||
1115 | /* Initialize all the DVS related BUCK registers */ | 1114 | /* Initialize all the DVS related BUCK registers */ |
1116 | for (i = 0; i < 8; i++) { | 1115 | for (i = 0; i < 8; i++) { |
1117 | max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS(i + 1), | 1116 | max8997_update_reg(i2c, MAX8997_REG_BUCK1DVS1 + i, |
1118 | max8997->buck1_vol[i], | 1117 | max8997->buck1_vol[i], |
1119 | 0x3f); | 1118 | 0x3f); |
1120 | max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS(i + 1), | 1119 | max8997_update_reg(i2c, MAX8997_REG_BUCK2DVS1 + i, |
1121 | max8997->buck2_vol[i], | 1120 | max8997->buck2_vol[i], |
1122 | 0x3f); | 1121 | 0x3f); |
1123 | max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS(i + 1), | 1122 | max8997_update_reg(i2c, MAX8997_REG_BUCK5DVS1 + i, |
1124 | max8997->buck5_vol[i], | 1123 | max8997->buck5_vol[i], |
1125 | 0x3f); | 1124 | 0x3f); |
1126 | } | 1125 | } |
diff --git a/drivers/regulator/max8998.c b/drivers/regulator/max8998.c index f57e9c42fdb4..41a1495eec2b 100644 --- a/drivers/regulator/max8998.c +++ b/drivers/regulator/max8998.c | |||
@@ -732,13 +732,15 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) | |||
732 | if (!pdata->buck1_set1) { | 732 | if (!pdata->buck1_set1) { |
733 | printk(KERN_ERR "MAX8998 SET1 GPIO defined as 0 !\n"); | 733 | printk(KERN_ERR "MAX8998 SET1 GPIO defined as 0 !\n"); |
734 | WARN_ON(!pdata->buck1_set1); | 734 | WARN_ON(!pdata->buck1_set1); |
735 | return -EIO; | 735 | ret = -EIO; |
736 | goto err_free_mem; | ||
736 | } | 737 | } |
737 | /* Check if SET2 is not equal to 0 */ | 738 | /* Check if SET2 is not equal to 0 */ |
738 | if (!pdata->buck1_set2) { | 739 | if (!pdata->buck1_set2) { |
739 | printk(KERN_ERR "MAX8998 SET2 GPIO defined as 0 !\n"); | 740 | printk(KERN_ERR "MAX8998 SET2 GPIO defined as 0 !\n"); |
740 | WARN_ON(!pdata->buck1_set2); | 741 | WARN_ON(!pdata->buck1_set2); |
741 | return -EIO; | 742 | ret = -EIO; |
743 | goto err_free_mem; | ||
742 | } | 744 | } |
743 | 745 | ||
744 | gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1"); | 746 | gpio_request(pdata->buck1_set1, "MAX8998 BUCK1_SET1"); |
@@ -758,7 +760,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) | |||
758 | max8998->buck1_vol[0] = i; | 760 | max8998->buck1_vol[0] = i; |
759 | ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i); | 761 | ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE1, i); |
760 | if (ret) | 762 | if (ret) |
761 | return ret; | 763 | goto err_free_mem; |
762 | 764 | ||
763 | /* Set predefined value for BUCK1 register 2 */ | 765 | /* Set predefined value for BUCK1 register 2 */ |
764 | i = 0; | 766 | i = 0; |
@@ -770,7 +772,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) | |||
770 | max8998->buck1_vol[1] = i; | 772 | max8998->buck1_vol[1] = i; |
771 | ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i); | 773 | ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE2, i); |
772 | if (ret) | 774 | if (ret) |
773 | return ret; | 775 | goto err_free_mem; |
774 | 776 | ||
775 | /* Set predefined value for BUCK1 register 3 */ | 777 | /* Set predefined value for BUCK1 register 3 */ |
776 | i = 0; | 778 | i = 0; |
@@ -782,7 +784,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) | |||
782 | max8998->buck1_vol[2] = i; | 784 | max8998->buck1_vol[2] = i; |
783 | ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i); | 785 | ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE3, i); |
784 | if (ret) | 786 | if (ret) |
785 | return ret; | 787 | goto err_free_mem; |
786 | 788 | ||
787 | /* Set predefined value for BUCK1 register 4 */ | 789 | /* Set predefined value for BUCK1 register 4 */ |
788 | i = 0; | 790 | i = 0; |
@@ -794,7 +796,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) | |||
794 | max8998->buck1_vol[3] = i; | 796 | max8998->buck1_vol[3] = i; |
795 | ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i); | 797 | ret = max8998_write_reg(i2c, MAX8998_REG_BUCK1_VOLTAGE4, i); |
796 | if (ret) | 798 | if (ret) |
797 | return ret; | 799 | goto err_free_mem; |
798 | 800 | ||
799 | } | 801 | } |
800 | 802 | ||
@@ -803,7 +805,8 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) | |||
803 | if (!pdata->buck2_set3) { | 805 | if (!pdata->buck2_set3) { |
804 | printk(KERN_ERR "MAX8998 SET3 GPIO defined as 0 !\n"); | 806 | printk(KERN_ERR "MAX8998 SET3 GPIO defined as 0 !\n"); |
805 | WARN_ON(!pdata->buck2_set3); | 807 | WARN_ON(!pdata->buck2_set3); |
806 | return -EIO; | 808 | ret = -EIO; |
809 | goto err_free_mem; | ||
807 | } | 810 | } |
808 | gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3"); | 811 | gpio_request(pdata->buck2_set3, "MAX8998 BUCK2_SET3"); |
809 | gpio_direction_output(pdata->buck2_set3, | 812 | gpio_direction_output(pdata->buck2_set3, |
@@ -818,7 +821,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) | |||
818 | max8998->buck2_vol[0] = i; | 821 | max8998->buck2_vol[0] = i; |
819 | ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i); | 822 | ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE1, i); |
820 | if (ret) | 823 | if (ret) |
821 | return ret; | 824 | goto err_free_mem; |
822 | 825 | ||
823 | /* BUCK2 register 2 */ | 826 | /* BUCK2 register 2 */ |
824 | i = 0; | 827 | i = 0; |
@@ -830,7 +833,7 @@ static __devinit int max8998_pmic_probe(struct platform_device *pdev) | |||
830 | max8998->buck2_vol[1] = i; | 833 | max8998->buck2_vol[1] = i; |
831 | ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i); | 834 | ret = max8998_write_reg(i2c, MAX8998_REG_BUCK2_VOLTAGE2, i); |
832 | if (ret) | 835 | if (ret) |
833 | return ret; | 836 | goto err_free_mem; |
834 | } | 837 | } |
835 | 838 | ||
836 | for (i = 0; i < pdata->num_regulators; i++) { | 839 | for (i = 0; i < pdata->num_regulators; i++) { |
@@ -860,6 +863,7 @@ err: | |||
860 | if (rdev[i]) | 863 | if (rdev[i]) |
861 | regulator_unregister(rdev[i]); | 864 | regulator_unregister(rdev[i]); |
862 | 865 | ||
866 | err_free_mem: | ||
863 | kfree(max8998->rdev); | 867 | kfree(max8998->rdev); |
864 | kfree(max8998); | 868 | kfree(max8998); |
865 | 869 | ||
diff --git a/drivers/regulator/mc13783-regulator.c b/drivers/regulator/mc13783-regulator.c index b8a00c7fa441..730f43ad415b 100644 --- a/drivers/regulator/mc13783-regulator.c +++ b/drivers/regulator/mc13783-regulator.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/regulator/driver.h> | 15 | #include <linux/regulator/driver.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/mfd/core.h> | ||
19 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
20 | #include <linux/init.h> | 19 | #include <linux/init.h> |
21 | #include <linux/err.h> | 20 | #include <linux/err.h> |
@@ -337,7 +336,8 @@ static int __devinit mc13783_regulator_probe(struct platform_device *pdev) | |||
337 | { | 336 | { |
338 | struct mc13xxx_regulator_priv *priv; | 337 | struct mc13xxx_regulator_priv *priv; |
339 | struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent); | 338 | struct mc13xxx *mc13783 = dev_get_drvdata(pdev->dev.parent); |
340 | struct mc13783_regulator_platform_data *pdata = mfd_get_data(pdev); | 339 | struct mc13783_regulator_platform_data *pdata = |
340 | dev_get_platdata(&pdev->dev); | ||
341 | struct mc13783_regulator_init_data *init_data; | 341 | struct mc13783_regulator_init_data *init_data; |
342 | int i, ret; | 342 | int i, ret; |
343 | 343 | ||
@@ -381,7 +381,8 @@ err: | |||
381 | static int __devexit mc13783_regulator_remove(struct platform_device *pdev) | 381 | static int __devexit mc13783_regulator_remove(struct platform_device *pdev) |
382 | { | 382 | { |
383 | struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev); | 383 | struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev); |
384 | struct mc13783_regulator_platform_data *pdata = mfd_get_data(pdev); | 384 | struct mc13783_regulator_platform_data *pdata = |
385 | dev_get_platdata(&pdev->dev); | ||
385 | int i; | 386 | int i; |
386 | 387 | ||
387 | platform_set_drvdata(pdev, NULL); | 388 | platform_set_drvdata(pdev, NULL); |
diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c index 6f15168e5ed4..3285d41842f2 100644 --- a/drivers/regulator/mc13892-regulator.c +++ b/drivers/regulator/mc13892-regulator.c | |||
@@ -15,7 +15,6 @@ | |||
15 | #include <linux/regulator/driver.h> | 15 | #include <linux/regulator/driver.h> |
16 | #include <linux/platform_device.h> | 16 | #include <linux/platform_device.h> |
17 | #include <linux/kernel.h> | 17 | #include <linux/kernel.h> |
18 | #include <linux/mfd/core.h> | ||
19 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
20 | #include <linux/init.h> | 19 | #include <linux/init.h> |
21 | #include <linux/err.h> | 20 | #include <linux/err.h> |
@@ -432,7 +431,8 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev, | |||
432 | int min_uV, int max_uV, unsigned *selector) | 431 | int min_uV, int max_uV, unsigned *selector) |
433 | { | 432 | { |
434 | struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); | 433 | struct mc13xxx_regulator_priv *priv = rdev_get_drvdata(rdev); |
435 | int hi, value, val, mask, id = rdev_get_id(rdev); | 434 | int hi, value, mask, id = rdev_get_id(rdev); |
435 | u32 valread; | ||
436 | int ret; | 436 | int ret; |
437 | 437 | ||
438 | dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", | 438 | dev_dbg(rdev_get_dev(rdev), "%s id: %d min_uV: %d max_uV: %d\n", |
@@ -448,15 +448,16 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev, | |||
448 | 448 | ||
449 | mc13xxx_lock(priv->mc13xxx); | 449 | mc13xxx_lock(priv->mc13xxx); |
450 | ret = mc13xxx_reg_read(priv->mc13xxx, | 450 | ret = mc13xxx_reg_read(priv->mc13xxx, |
451 | mc13892_regulators[id].vsel_reg, &val); | 451 | mc13892_regulators[id].vsel_reg, &valread); |
452 | if (ret) | 452 | if (ret) |
453 | goto err; | 453 | goto err; |
454 | 454 | ||
455 | hi = val & MC13892_SWITCHERS0_SWxHI; | 455 | if (value > 1375000) |
456 | if (value > 1375) | ||
457 | hi = 1; | 456 | hi = 1; |
458 | if (value < 1100) | 457 | else if (value < 1100000) |
459 | hi = 0; | 458 | hi = 0; |
459 | else | ||
460 | hi = valread & MC13892_SWITCHERS0_SWxHI; | ||
460 | 461 | ||
461 | if (hi) { | 462 | if (hi) { |
462 | value = (value - 1100000) / 25000; | 463 | value = (value - 1100000) / 25000; |
@@ -465,8 +466,10 @@ static int mc13892_sw_regulator_set_voltage(struct regulator_dev *rdev, | |||
465 | value = (value - 600000) / 25000; | 466 | value = (value - 600000) / 25000; |
466 | 467 | ||
467 | mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI; | 468 | mask = mc13892_regulators[id].vsel_mask | MC13892_SWITCHERS0_SWxHI; |
468 | ret = mc13xxx_reg_rmw(priv->mc13xxx, mc13892_regulators[id].vsel_reg, | 469 | valread = (valread & ~mask) | |
469 | mask, value << mc13892_regulators[id].vsel_shift); | 470 | (value << mc13892_regulators[id].vsel_shift); |
471 | ret = mc13xxx_reg_write(priv->mc13xxx, mc13892_regulators[id].vsel_reg, | ||
472 | valread); | ||
470 | err: | 473 | err: |
471 | mc13xxx_unlock(priv->mc13xxx); | 474 | mc13xxx_unlock(priv->mc13xxx); |
472 | 475 | ||
@@ -521,7 +524,8 @@ static int __devinit mc13892_regulator_probe(struct platform_device *pdev) | |||
521 | { | 524 | { |
522 | struct mc13xxx_regulator_priv *priv; | 525 | struct mc13xxx_regulator_priv *priv; |
523 | struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent); | 526 | struct mc13xxx *mc13892 = dev_get_drvdata(pdev->dev.parent); |
524 | struct mc13xxx_regulator_platform_data *pdata = mfd_get_data(pdev); | 527 | struct mc13xxx_regulator_platform_data *pdata = |
528 | dev_get_platdata(&pdev->dev); | ||
525 | struct mc13xxx_regulator_init_data *init_data; | 529 | struct mc13xxx_regulator_init_data *init_data; |
526 | int i, ret; | 530 | int i, ret; |
527 | u32 val; | 531 | u32 val; |
@@ -595,7 +599,8 @@ err_free: | |||
595 | static int __devexit mc13892_regulator_remove(struct platform_device *pdev) | 599 | static int __devexit mc13892_regulator_remove(struct platform_device *pdev) |
596 | { | 600 | { |
597 | struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev); | 601 | struct mc13xxx_regulator_priv *priv = platform_get_drvdata(pdev); |
598 | struct mc13xxx_regulator_platform_data *pdata = mfd_get_data(pdev); | 602 | struct mc13xxx_regulator_platform_data *pdata = |
603 | dev_get_platdata(&pdev->dev); | ||
599 | int i; | 604 | int i; |
600 | 605 | ||
601 | platform_set_drvdata(pdev, NULL); | 606 | platform_set_drvdata(pdev, NULL); |
diff --git a/drivers/regulator/mc13xxx-regulator-core.c b/drivers/regulator/mc13xxx-regulator-core.c index 2bb5de1f2421..bc27ab136378 100644 --- a/drivers/regulator/mc13xxx-regulator-core.c +++ b/drivers/regulator/mc13xxx-regulator-core.c | |||
@@ -174,7 +174,7 @@ static int mc13xxx_regulator_get_voltage(struct regulator_dev *rdev) | |||
174 | 174 | ||
175 | dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); | 175 | dev_dbg(rdev_get_dev(rdev), "%s id: %d val: %d\n", __func__, id, val); |
176 | 176 | ||
177 | BUG_ON(val > mc13xxx_regulators[id].desc.n_voltages); | 177 | BUG_ON(val >= mc13xxx_regulators[id].desc.n_voltages); |
178 | 178 | ||
179 | return mc13xxx_regulators[id].voltages[val]; | 179 | return mc13xxx_regulators[id].voltages[val]; |
180 | } | 180 | } |
diff --git a/drivers/regulator/tps6105x-regulator.c b/drivers/regulator/tps6105x-regulator.c index 1661499feda4..1011873896dc 100644 --- a/drivers/regulator/tps6105x-regulator.c +++ b/drivers/regulator/tps6105x-regulator.c | |||
@@ -137,7 +137,7 @@ static struct regulator_desc tps6105x_regulator_desc = { | |||
137 | */ | 137 | */ |
138 | static int __devinit tps6105x_regulator_probe(struct platform_device *pdev) | 138 | static int __devinit tps6105x_regulator_probe(struct platform_device *pdev) |
139 | { | 139 | { |
140 | struct tps6105x *tps6105x = mfd_get_data(pdev); | 140 | struct tps6105x *tps6105x = dev_get_platdata(&pdev->dev); |
141 | struct tps6105x_platform_data *pdata = tps6105x->pdata; | 141 | struct tps6105x_platform_data *pdata = tps6105x->pdata; |
142 | int ret; | 142 | int ret; |
143 | 143 | ||
@@ -158,13 +158,14 @@ static int __devinit tps6105x_regulator_probe(struct platform_device *pdev) | |||
158 | "failed to register regulator\n"); | 158 | "failed to register regulator\n"); |
159 | return ret; | 159 | return ret; |
160 | } | 160 | } |
161 | platform_set_drvdata(pdev, tps6105x); | ||
161 | 162 | ||
162 | return 0; | 163 | return 0; |
163 | } | 164 | } |
164 | 165 | ||
165 | static int __devexit tps6105x_regulator_remove(struct platform_device *pdev) | 166 | static int __devexit tps6105x_regulator_remove(struct platform_device *pdev) |
166 | { | 167 | { |
167 | struct tps6105x *tps6105x = platform_get_drvdata(pdev); | 168 | struct tps6105x *tps6105x = dev_get_platdata(&pdev->dev); |
168 | regulator_unregister(tps6105x->regulator); | 169 | regulator_unregister(tps6105x->regulator); |
169 | return 0; | 170 | return 0; |
170 | } | 171 | } |
diff --git a/drivers/regulator/tps65023-regulator.c b/drivers/regulator/tps65023-regulator.c index 60a7ca5409e9..fbddc15e1811 100644 --- a/drivers/regulator/tps65023-regulator.c +++ b/drivers/regulator/tps65023-regulator.c | |||
@@ -466,7 +466,6 @@ static struct regulator_ops tps65023_ldo_ops = { | |||
466 | static int __devinit tps_65023_probe(struct i2c_client *client, | 466 | static int __devinit tps_65023_probe(struct i2c_client *client, |
467 | const struct i2c_device_id *id) | 467 | const struct i2c_device_id *id) |
468 | { | 468 | { |
469 | static int desc_id; | ||
470 | const struct tps_info *info = (void *)id->driver_data; | 469 | const struct tps_info *info = (void *)id->driver_data; |
471 | struct regulator_init_data *init_data; | 470 | struct regulator_init_data *init_data; |
472 | struct regulator_dev *rdev; | 471 | struct regulator_dev *rdev; |
@@ -499,7 +498,7 @@ static int __devinit tps_65023_probe(struct i2c_client *client, | |||
499 | tps->info[i] = info; | 498 | tps->info[i] = info; |
500 | 499 | ||
501 | tps->desc[i].name = info->name; | 500 | tps->desc[i].name = info->name; |
502 | tps->desc[i].id = desc_id++; | 501 | tps->desc[i].id = i; |
503 | tps->desc[i].n_voltages = num_voltages[i]; | 502 | tps->desc[i].n_voltages = num_voltages[i]; |
504 | tps->desc[i].ops = (i > TPS65023_DCDC_3 ? | 503 | tps->desc[i].ops = (i > TPS65023_DCDC_3 ? |
505 | &tps65023_ldo_ops : &tps65023_dcdc_ops); | 504 | &tps65023_ldo_ops : &tps65023_dcdc_ops); |
diff --git a/drivers/regulator/tps6507x-regulator.c b/drivers/regulator/tps6507x-regulator.c index 064755290599..bfffabc21eda 100644 --- a/drivers/regulator/tps6507x-regulator.c +++ b/drivers/regulator/tps6507x-regulator.c | |||
@@ -553,7 +553,6 @@ static __devinit | |||
553 | int tps6507x_pmic_probe(struct platform_device *pdev) | 553 | int tps6507x_pmic_probe(struct platform_device *pdev) |
554 | { | 554 | { |
555 | struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); | 555 | struct tps6507x_dev *tps6507x_dev = dev_get_drvdata(pdev->dev.parent); |
556 | static int desc_id; | ||
557 | struct tps_info *info = &tps6507x_pmic_regs[0]; | 556 | struct tps_info *info = &tps6507x_pmic_regs[0]; |
558 | struct regulator_init_data *init_data; | 557 | struct regulator_init_data *init_data; |
559 | struct regulator_dev *rdev; | 558 | struct regulator_dev *rdev; |
@@ -598,7 +597,7 @@ int tps6507x_pmic_probe(struct platform_device *pdev) | |||
598 | } | 597 | } |
599 | 598 | ||
600 | tps->desc[i].name = info->name; | 599 | tps->desc[i].name = info->name; |
601 | tps->desc[i].id = desc_id++; | 600 | tps->desc[i].id = i; |
602 | tps->desc[i].n_voltages = num_voltages[i]; | 601 | tps->desc[i].n_voltages = num_voltages[i]; |
603 | tps->desc[i].ops = (i > TPS6507X_DCDC_3 ? | 602 | tps->desc[i].ops = (i > TPS6507X_DCDC_3 ? |
604 | &tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops); | 603 | &tps6507x_pmic_ldo_ops : &tps6507x_pmic_dcdc_ops); |
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c new file mode 100644 index 000000000000..55dd4e6650db --- /dev/null +++ b/drivers/regulator/tps65910-regulator.c | |||
@@ -0,0 +1,993 @@ | |||
1 | /* | ||
2 | * tps65910.c -- TI tps65910 | ||
3 | * | ||
4 | * Copyright 2010 Texas Instruments Inc. | ||
5 | * | ||
6 | * Author: Graeme Gregory <gg@slimlogic.co.uk> | ||
7 | * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/init.h> | ||
19 | #include <linux/err.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/regulator/driver.h> | ||
22 | #include <linux/regulator/machine.h> | ||
23 | #include <linux/delay.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/gpio.h> | ||
26 | #include <linux/mfd/tps65910.h> | ||
27 | |||
28 | #define TPS65910_REG_VRTC 0 | ||
29 | #define TPS65910_REG_VIO 1 | ||
30 | #define TPS65910_REG_VDD1 2 | ||
31 | #define TPS65910_REG_VDD2 3 | ||
32 | #define TPS65910_REG_VDD3 4 | ||
33 | #define TPS65910_REG_VDIG1 5 | ||
34 | #define TPS65910_REG_VDIG2 6 | ||
35 | #define TPS65910_REG_VPLL 7 | ||
36 | #define TPS65910_REG_VDAC 8 | ||
37 | #define TPS65910_REG_VAUX1 9 | ||
38 | #define TPS65910_REG_VAUX2 10 | ||
39 | #define TPS65910_REG_VAUX33 11 | ||
40 | #define TPS65910_REG_VMMC 12 | ||
41 | |||
42 | #define TPS65911_REG_VDDCTRL 4 | ||
43 | #define TPS65911_REG_LDO1 5 | ||
44 | #define TPS65911_REG_LDO2 6 | ||
45 | #define TPS65911_REG_LDO3 7 | ||
46 | #define TPS65911_REG_LDO4 8 | ||
47 | #define TPS65911_REG_LDO5 9 | ||
48 | #define TPS65911_REG_LDO6 10 | ||
49 | #define TPS65911_REG_LDO7 11 | ||
50 | #define TPS65911_REG_LDO8 12 | ||
51 | |||
52 | #define TPS65910_NUM_REGULATOR 13 | ||
53 | #define TPS65910_SUPPLY_STATE_ENABLED 0x1 | ||
54 | |||
55 | /* supported VIO voltages in milivolts */ | ||
56 | static const u16 VIO_VSEL_table[] = { | ||
57 | 1500, 1800, 2500, 3300, | ||
58 | }; | ||
59 | |||
60 | /* VSEL tables for TPS65910 specific LDOs and dcdc's */ | ||
61 | |||
62 | /* supported VDD3 voltages in milivolts */ | ||
63 | static const u16 VDD3_VSEL_table[] = { | ||
64 | 5000, | ||
65 | }; | ||
66 | |||
67 | /* supported VDIG1 voltages in milivolts */ | ||
68 | static const u16 VDIG1_VSEL_table[] = { | ||
69 | 1200, 1500, 1800, 2700, | ||
70 | }; | ||
71 | |||
72 | /* supported VDIG2 voltages in milivolts */ | ||
73 | static const u16 VDIG2_VSEL_table[] = { | ||
74 | 1000, 1100, 1200, 1800, | ||
75 | }; | ||
76 | |||
77 | /* supported VPLL voltages in milivolts */ | ||
78 | static const u16 VPLL_VSEL_table[] = { | ||
79 | 1000, 1100, 1800, 2500, | ||
80 | }; | ||
81 | |||
82 | /* supported VDAC voltages in milivolts */ | ||
83 | static const u16 VDAC_VSEL_table[] = { | ||
84 | 1800, 2600, 2800, 2850, | ||
85 | }; | ||
86 | |||
87 | /* supported VAUX1 voltages in milivolts */ | ||
88 | static const u16 VAUX1_VSEL_table[] = { | ||
89 | 1800, 2500, 2800, 2850, | ||
90 | }; | ||
91 | |||
92 | /* supported VAUX2 voltages in milivolts */ | ||
93 | static const u16 VAUX2_VSEL_table[] = { | ||
94 | 1800, 2800, 2900, 3300, | ||
95 | }; | ||
96 | |||
97 | /* supported VAUX33 voltages in milivolts */ | ||
98 | static const u16 VAUX33_VSEL_table[] = { | ||
99 | 1800, 2000, 2800, 3300, | ||
100 | }; | ||
101 | |||
102 | /* supported VMMC voltages in milivolts */ | ||
103 | static const u16 VMMC_VSEL_table[] = { | ||
104 | 1800, 2800, 3000, 3300, | ||
105 | }; | ||
106 | |||
107 | struct tps_info { | ||
108 | const char *name; | ||
109 | unsigned min_uV; | ||
110 | unsigned max_uV; | ||
111 | u8 table_len; | ||
112 | const u16 *table; | ||
113 | }; | ||
114 | |||
115 | static struct tps_info tps65910_regs[] = { | ||
116 | { | ||
117 | .name = "VRTC", | ||
118 | }, | ||
119 | { | ||
120 | .name = "VIO", | ||
121 | .min_uV = 1500000, | ||
122 | .max_uV = 3300000, | ||
123 | .table_len = ARRAY_SIZE(VIO_VSEL_table), | ||
124 | .table = VIO_VSEL_table, | ||
125 | }, | ||
126 | { | ||
127 | .name = "VDD1", | ||
128 | .min_uV = 600000, | ||
129 | .max_uV = 4500000, | ||
130 | }, | ||
131 | { | ||
132 | .name = "VDD2", | ||
133 | .min_uV = 600000, | ||
134 | .max_uV = 4500000, | ||
135 | }, | ||
136 | { | ||
137 | .name = "VDD3", | ||
138 | .min_uV = 5000000, | ||
139 | .max_uV = 5000000, | ||
140 | .table_len = ARRAY_SIZE(VDD3_VSEL_table), | ||
141 | .table = VDD3_VSEL_table, | ||
142 | }, | ||
143 | { | ||
144 | .name = "VDIG1", | ||
145 | .min_uV = 1200000, | ||
146 | .max_uV = 2700000, | ||
147 | .table_len = ARRAY_SIZE(VDIG1_VSEL_table), | ||
148 | .table = VDIG1_VSEL_table, | ||
149 | }, | ||
150 | { | ||
151 | .name = "VDIG2", | ||
152 | .min_uV = 1000000, | ||
153 | .max_uV = 1800000, | ||
154 | .table_len = ARRAY_SIZE(VDIG2_VSEL_table), | ||
155 | .table = VDIG2_VSEL_table, | ||
156 | }, | ||
157 | { | ||
158 | .name = "VPLL", | ||
159 | .min_uV = 1000000, | ||
160 | .max_uV = 2500000, | ||
161 | .table_len = ARRAY_SIZE(VPLL_VSEL_table), | ||
162 | .table = VPLL_VSEL_table, | ||
163 | }, | ||
164 | { | ||
165 | .name = "VDAC", | ||
166 | .min_uV = 1800000, | ||
167 | .max_uV = 2850000, | ||
168 | .table_len = ARRAY_SIZE(VDAC_VSEL_table), | ||
169 | .table = VDAC_VSEL_table, | ||
170 | }, | ||
171 | { | ||
172 | .name = "VAUX1", | ||
173 | .min_uV = 1800000, | ||
174 | .max_uV = 2850000, | ||
175 | .table_len = ARRAY_SIZE(VAUX1_VSEL_table), | ||
176 | .table = VAUX1_VSEL_table, | ||
177 | }, | ||
178 | { | ||
179 | .name = "VAUX2", | ||
180 | .min_uV = 1800000, | ||
181 | .max_uV = 3300000, | ||
182 | .table_len = ARRAY_SIZE(VAUX2_VSEL_table), | ||
183 | .table = VAUX2_VSEL_table, | ||
184 | }, | ||
185 | { | ||
186 | .name = "VAUX33", | ||
187 | .min_uV = 1800000, | ||
188 | .max_uV = 3300000, | ||
189 | .table_len = ARRAY_SIZE(VAUX33_VSEL_table), | ||
190 | .table = VAUX33_VSEL_table, | ||
191 | }, | ||
192 | { | ||
193 | .name = "VMMC", | ||
194 | .min_uV = 1800000, | ||
195 | .max_uV = 3300000, | ||
196 | .table_len = ARRAY_SIZE(VMMC_VSEL_table), | ||
197 | .table = VMMC_VSEL_table, | ||
198 | }, | ||
199 | }; | ||
200 | |||
201 | static struct tps_info tps65911_regs[] = { | ||
202 | { | ||
203 | .name = "VIO", | ||
204 | .min_uV = 1500000, | ||
205 | .max_uV = 3300000, | ||
206 | .table_len = ARRAY_SIZE(VIO_VSEL_table), | ||
207 | .table = VIO_VSEL_table, | ||
208 | }, | ||
209 | { | ||
210 | .name = "VDD1", | ||
211 | .min_uV = 600000, | ||
212 | .max_uV = 4500000, | ||
213 | }, | ||
214 | { | ||
215 | .name = "VDD2", | ||
216 | .min_uV = 600000, | ||
217 | .max_uV = 4500000, | ||
218 | }, | ||
219 | { | ||
220 | .name = "VDDCTRL", | ||
221 | .min_uV = 600000, | ||
222 | .max_uV = 1400000, | ||
223 | }, | ||
224 | { | ||
225 | .name = "LDO1", | ||
226 | .min_uV = 1000000, | ||
227 | .max_uV = 3300000, | ||
228 | }, | ||
229 | { | ||
230 | .name = "LDO2", | ||
231 | .min_uV = 1000000, | ||
232 | .max_uV = 3300000, | ||
233 | }, | ||
234 | { | ||
235 | .name = "LDO3", | ||
236 | .min_uV = 1000000, | ||
237 | .max_uV = 3300000, | ||
238 | }, | ||
239 | { | ||
240 | .name = "LDO4", | ||
241 | .min_uV = 1000000, | ||
242 | .max_uV = 3300000, | ||
243 | }, | ||
244 | { | ||
245 | .name = "LDO5", | ||
246 | .min_uV = 1000000, | ||
247 | .max_uV = 3300000, | ||
248 | }, | ||
249 | { | ||
250 | .name = "LDO6", | ||
251 | .min_uV = 1000000, | ||
252 | .max_uV = 3300000, | ||
253 | }, | ||
254 | { | ||
255 | .name = "LDO7", | ||
256 | .min_uV = 1000000, | ||
257 | .max_uV = 3300000, | ||
258 | }, | ||
259 | { | ||
260 | .name = "LDO8", | ||
261 | .min_uV = 1000000, | ||
262 | .max_uV = 3300000, | ||
263 | }, | ||
264 | }; | ||
265 | |||
266 | struct tps65910_reg { | ||
267 | struct regulator_desc desc[TPS65910_NUM_REGULATOR]; | ||
268 | struct tps65910 *mfd; | ||
269 | struct regulator_dev *rdev[TPS65910_NUM_REGULATOR]; | ||
270 | struct tps_info *info[TPS65910_NUM_REGULATOR]; | ||
271 | struct mutex mutex; | ||
272 | int mode; | ||
273 | int (*get_ctrl_reg)(int); | ||
274 | }; | ||
275 | |||
276 | static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg) | ||
277 | { | ||
278 | u8 val; | ||
279 | int err; | ||
280 | |||
281 | err = pmic->mfd->read(pmic->mfd, reg, 1, &val); | ||
282 | if (err) | ||
283 | return err; | ||
284 | |||
285 | return val; | ||
286 | } | ||
287 | |||
288 | static inline int tps65910_write(struct tps65910_reg *pmic, u8 reg, u8 val) | ||
289 | { | ||
290 | return pmic->mfd->write(pmic->mfd, reg, 1, &val); | ||
291 | } | ||
292 | |||
293 | static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg, | ||
294 | u8 set_mask, u8 clear_mask) | ||
295 | { | ||
296 | int err, data; | ||
297 | |||
298 | mutex_lock(&pmic->mutex); | ||
299 | |||
300 | data = tps65910_read(pmic, reg); | ||
301 | if (data < 0) { | ||
302 | dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg); | ||
303 | err = data; | ||
304 | goto out; | ||
305 | } | ||
306 | |||
307 | data &= ~clear_mask; | ||
308 | data |= set_mask; | ||
309 | err = tps65910_write(pmic, reg, data); | ||
310 | if (err) | ||
311 | dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg); | ||
312 | |||
313 | out: | ||
314 | mutex_unlock(&pmic->mutex); | ||
315 | return err; | ||
316 | } | ||
317 | |||
318 | static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg) | ||
319 | { | ||
320 | int data; | ||
321 | |||
322 | mutex_lock(&pmic->mutex); | ||
323 | |||
324 | data = tps65910_read(pmic, reg); | ||
325 | if (data < 0) | ||
326 | dev_err(pmic->mfd->dev, "Read from reg 0x%x failed\n", reg); | ||
327 | |||
328 | mutex_unlock(&pmic->mutex); | ||
329 | return data; | ||
330 | } | ||
331 | |||
332 | static int tps65910_reg_write(struct tps65910_reg *pmic, u8 reg, u8 val) | ||
333 | { | ||
334 | int err; | ||
335 | |||
336 | mutex_lock(&pmic->mutex); | ||
337 | |||
338 | err = tps65910_write(pmic, reg, val); | ||
339 | if (err < 0) | ||
340 | dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg); | ||
341 | |||
342 | mutex_unlock(&pmic->mutex); | ||
343 | return err; | ||
344 | } | ||
345 | |||
346 | static int tps65910_get_ctrl_register(int id) | ||
347 | { | ||
348 | switch (id) { | ||
349 | case TPS65910_REG_VRTC: | ||
350 | return TPS65910_VRTC; | ||
351 | case TPS65910_REG_VIO: | ||
352 | return TPS65910_VIO; | ||
353 | case TPS65910_REG_VDD1: | ||
354 | return TPS65910_VDD1; | ||
355 | case TPS65910_REG_VDD2: | ||
356 | return TPS65910_VDD2; | ||
357 | case TPS65910_REG_VDD3: | ||
358 | return TPS65910_VDD3; | ||
359 | case TPS65910_REG_VDIG1: | ||
360 | return TPS65910_VDIG1; | ||
361 | case TPS65910_REG_VDIG2: | ||
362 | return TPS65910_VDIG2; | ||
363 | case TPS65910_REG_VPLL: | ||
364 | return TPS65910_VPLL; | ||
365 | case TPS65910_REG_VDAC: | ||
366 | return TPS65910_VDAC; | ||
367 | case TPS65910_REG_VAUX1: | ||
368 | return TPS65910_VAUX1; | ||
369 | case TPS65910_REG_VAUX2: | ||
370 | return TPS65910_VAUX2; | ||
371 | case TPS65910_REG_VAUX33: | ||
372 | return TPS65910_VAUX33; | ||
373 | case TPS65910_REG_VMMC: | ||
374 | return TPS65910_VMMC; | ||
375 | default: | ||
376 | return -EINVAL; | ||
377 | } | ||
378 | } | ||
379 | |||
380 | static int tps65911_get_ctrl_register(int id) | ||
381 | { | ||
382 | switch (id) { | ||
383 | case TPS65910_REG_VRTC: | ||
384 | return TPS65910_VRTC; | ||
385 | case TPS65910_REG_VIO: | ||
386 | return TPS65910_VIO; | ||
387 | case TPS65910_REG_VDD1: | ||
388 | return TPS65910_VDD1; | ||
389 | case TPS65910_REG_VDD2: | ||
390 | return TPS65910_VDD2; | ||
391 | case TPS65911_REG_VDDCTRL: | ||
392 | return TPS65911_VDDCTRL; | ||
393 | case TPS65911_REG_LDO1: | ||
394 | return TPS65911_LDO1; | ||
395 | case TPS65911_REG_LDO2: | ||
396 | return TPS65911_LDO2; | ||
397 | case TPS65911_REG_LDO3: | ||
398 | return TPS65911_LDO3; | ||
399 | case TPS65911_REG_LDO4: | ||
400 | return TPS65911_LDO4; | ||
401 | case TPS65911_REG_LDO5: | ||
402 | return TPS65911_LDO5; | ||
403 | case TPS65911_REG_LDO6: | ||
404 | return TPS65911_LDO6; | ||
405 | case TPS65911_REG_LDO7: | ||
406 | return TPS65911_LDO7; | ||
407 | case TPS65911_REG_LDO8: | ||
408 | return TPS65911_LDO8; | ||
409 | default: | ||
410 | return -EINVAL; | ||
411 | } | ||
412 | } | ||
413 | |||
414 | static int tps65910_is_enabled(struct regulator_dev *dev) | ||
415 | { | ||
416 | struct tps65910_reg *pmic = rdev_get_drvdata(dev); | ||
417 | int reg, value, id = rdev_get_id(dev); | ||
418 | |||
419 | reg = pmic->get_ctrl_reg(id); | ||
420 | if (reg < 0) | ||
421 | return reg; | ||
422 | |||
423 | value = tps65910_reg_read(pmic, reg); | ||
424 | if (value < 0) | ||
425 | return value; | ||
426 | |||
427 | return value & TPS65910_SUPPLY_STATE_ENABLED; | ||
428 | } | ||
429 | |||
430 | static int tps65910_enable(struct regulator_dev *dev) | ||
431 | { | ||
432 | struct tps65910_reg *pmic = rdev_get_drvdata(dev); | ||
433 | struct tps65910 *mfd = pmic->mfd; | ||
434 | int reg, id = rdev_get_id(dev); | ||
435 | |||
436 | reg = pmic->get_ctrl_reg(id); | ||
437 | if (reg < 0) | ||
438 | return reg; | ||
439 | |||
440 | return tps65910_set_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED); | ||
441 | } | ||
442 | |||
443 | static int tps65910_disable(struct regulator_dev *dev) | ||
444 | { | ||
445 | struct tps65910_reg *pmic = rdev_get_drvdata(dev); | ||
446 | struct tps65910 *mfd = pmic->mfd; | ||
447 | int reg, id = rdev_get_id(dev); | ||
448 | |||
449 | reg = pmic->get_ctrl_reg(id); | ||
450 | if (reg < 0) | ||
451 | return reg; | ||
452 | |||
453 | return tps65910_clear_bits(mfd, reg, TPS65910_SUPPLY_STATE_ENABLED); | ||
454 | } | ||
455 | |||
456 | |||
457 | static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode) | ||
458 | { | ||
459 | struct tps65910_reg *pmic = rdev_get_drvdata(dev); | ||
460 | struct tps65910 *mfd = pmic->mfd; | ||
461 | int reg, value, id = rdev_get_id(dev); | ||
462 | |||
463 | reg = pmic->get_ctrl_reg(id); | ||
464 | if (reg < 0) | ||
465 | return reg; | ||
466 | |||
467 | switch (mode) { | ||
468 | case REGULATOR_MODE_NORMAL: | ||
469 | return tps65910_modify_bits(pmic, reg, LDO_ST_ON_BIT, | ||
470 | LDO_ST_MODE_BIT); | ||
471 | case REGULATOR_MODE_IDLE: | ||
472 | value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT; | ||
473 | return tps65910_set_bits(mfd, reg, value); | ||
474 | case REGULATOR_MODE_STANDBY: | ||
475 | return tps65910_clear_bits(mfd, reg, LDO_ST_ON_BIT); | ||
476 | } | ||
477 | |||
478 | return -EINVAL; | ||
479 | } | ||
480 | |||
481 | static unsigned int tps65910_get_mode(struct regulator_dev *dev) | ||
482 | { | ||
483 | struct tps65910_reg *pmic = rdev_get_drvdata(dev); | ||
484 | int reg, value, id = rdev_get_id(dev); | ||
485 | |||
486 | reg = pmic->get_ctrl_reg(id); | ||
487 | if (reg < 0) | ||
488 | return reg; | ||
489 | |||
490 | value = tps65910_reg_read(pmic, reg); | ||
491 | if (value < 0) | ||
492 | return value; | ||
493 | |||
494 | if (value & LDO_ST_ON_BIT) | ||
495 | return REGULATOR_MODE_STANDBY; | ||
496 | else if (value & LDO_ST_MODE_BIT) | ||
497 | return REGULATOR_MODE_IDLE; | ||
498 | else | ||
499 | return REGULATOR_MODE_NORMAL; | ||
500 | } | ||
501 | |||
502 | static int tps65910_get_voltage_dcdc(struct regulator_dev *dev) | ||
503 | { | ||
504 | struct tps65910_reg *pmic = rdev_get_drvdata(dev); | ||
505 | int id = rdev_get_id(dev), voltage = 0; | ||
506 | int opvsel = 0, srvsel = 0, vselmax = 0, mult = 0, sr = 0; | ||
507 | |||
508 | switch (id) { | ||
509 | case TPS65910_REG_VDD1: | ||
510 | opvsel = tps65910_reg_read(pmic, TPS65910_VDD1_OP); | ||
511 | mult = tps65910_reg_read(pmic, TPS65910_VDD1); | ||
512 | mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT; | ||
513 | srvsel = tps65910_reg_read(pmic, TPS65910_VDD1_SR); | ||
514 | sr = opvsel & VDD1_OP_CMD_MASK; | ||
515 | opvsel &= VDD1_OP_SEL_MASK; | ||
516 | srvsel &= VDD1_SR_SEL_MASK; | ||
517 | vselmax = 75; | ||
518 | break; | ||
519 | case TPS65910_REG_VDD2: | ||
520 | opvsel = tps65910_reg_read(pmic, TPS65910_VDD2_OP); | ||
521 | mult = tps65910_reg_read(pmic, TPS65910_VDD2); | ||
522 | mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT; | ||
523 | srvsel = tps65910_reg_read(pmic, TPS65910_VDD2_SR); | ||
524 | sr = opvsel & VDD2_OP_CMD_MASK; | ||
525 | opvsel &= VDD2_OP_SEL_MASK; | ||
526 | srvsel &= VDD2_SR_SEL_MASK; | ||
527 | vselmax = 75; | ||
528 | break; | ||
529 | case TPS65911_REG_VDDCTRL: | ||
530 | opvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_OP); | ||
531 | srvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_SR); | ||
532 | sr = opvsel & VDDCTRL_OP_CMD_MASK; | ||
533 | opvsel &= VDDCTRL_OP_SEL_MASK; | ||
534 | srvsel &= VDDCTRL_SR_SEL_MASK; | ||
535 | vselmax = 64; | ||
536 | break; | ||
537 | } | ||
538 | |||
539 | /* multiplier 0 == 1 but 2,3 normal */ | ||
540 | if (!mult) | ||
541 | mult=1; | ||
542 | |||
543 | if (sr) { | ||
544 | /* normalise to valid range */ | ||
545 | if (srvsel < 3) | ||
546 | srvsel = 3; | ||
547 | if (srvsel > vselmax) | ||
548 | srvsel = vselmax; | ||
549 | srvsel -= 3; | ||
550 | |||
551 | voltage = (srvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100; | ||
552 | } else { | ||
553 | |||
554 | /* normalise to valid range*/ | ||
555 | if (opvsel < 3) | ||
556 | opvsel = 3; | ||
557 | if (opvsel > vselmax) | ||
558 | opvsel = vselmax; | ||
559 | opvsel -= 3; | ||
560 | |||
561 | voltage = (opvsel * VDD1_2_OFFSET + VDD1_2_MIN_VOLT) * 100; | ||
562 | } | ||
563 | |||
564 | voltage *= mult; | ||
565 | |||
566 | return voltage; | ||
567 | } | ||
568 | |||
569 | static int tps65910_get_voltage(struct regulator_dev *dev) | ||
570 | { | ||
571 | struct tps65910_reg *pmic = rdev_get_drvdata(dev); | ||
572 | int reg, value, id = rdev_get_id(dev), voltage = 0; | ||
573 | |||
574 | reg = pmic->get_ctrl_reg(id); | ||
575 | if (reg < 0) | ||
576 | return reg; | ||
577 | |||
578 | value = tps65910_reg_read(pmic, reg); | ||
579 | if (value < 0) | ||
580 | return value; | ||
581 | |||
582 | switch (id) { | ||
583 | case TPS65910_REG_VIO: | ||
584 | case TPS65910_REG_VDIG1: | ||
585 | case TPS65910_REG_VDIG2: | ||
586 | case TPS65910_REG_VPLL: | ||
587 | case TPS65910_REG_VDAC: | ||
588 | case TPS65910_REG_VAUX1: | ||
589 | case TPS65910_REG_VAUX2: | ||
590 | case TPS65910_REG_VAUX33: | ||
591 | case TPS65910_REG_VMMC: | ||
592 | value &= LDO_SEL_MASK; | ||
593 | value >>= LDO_SEL_SHIFT; | ||
594 | break; | ||
595 | default: | ||
596 | return -EINVAL; | ||
597 | } | ||
598 | |||
599 | voltage = pmic->info[id]->table[value] * 1000; | ||
600 | |||
601 | return voltage; | ||
602 | } | ||
603 | |||
604 | static int tps65910_get_voltage_vdd3(struct regulator_dev *dev) | ||
605 | { | ||
606 | return 5 * 1000 * 1000; | ||
607 | } | ||
608 | |||
609 | static int tps65911_get_voltage(struct regulator_dev *dev) | ||
610 | { | ||
611 | struct tps65910_reg *pmic = rdev_get_drvdata(dev); | ||
612 | int step_mv, id = rdev_get_id(dev); | ||
613 | u8 value, reg; | ||
614 | |||
615 | reg = pmic->get_ctrl_reg(id); | ||
616 | |||
617 | value = tps65910_reg_read(pmic, reg); | ||
618 | |||
619 | switch (id) { | ||
620 | case TPS65911_REG_LDO1: | ||
621 | case TPS65911_REG_LDO2: | ||
622 | case TPS65911_REG_LDO4: | ||
623 | value &= LDO1_SEL_MASK; | ||
624 | value >>= LDO_SEL_SHIFT; | ||
625 | /* The first 5 values of the selector correspond to 1V */ | ||
626 | if (value < 5) | ||
627 | value = 0; | ||
628 | else | ||
629 | value -= 4; | ||
630 | |||
631 | step_mv = 50; | ||
632 | break; | ||
633 | case TPS65911_REG_LDO3: | ||
634 | case TPS65911_REG_LDO5: | ||
635 | case TPS65911_REG_LDO6: | ||
636 | case TPS65911_REG_LDO7: | ||
637 | case TPS65911_REG_LDO8: | ||
638 | value &= LDO3_SEL_MASK; | ||
639 | value >>= LDO_SEL_SHIFT; | ||
640 | /* The first 3 values of the selector correspond to 1V */ | ||
641 | if (value < 3) | ||
642 | value = 0; | ||
643 | else | ||
644 | value -= 2; | ||
645 | |||
646 | step_mv = 100; | ||
647 | break; | ||
648 | case TPS65910_REG_VIO: | ||
649 | return pmic->info[id]->table[value] * 1000; | ||
650 | break; | ||
651 | default: | ||
652 | return -EINVAL; | ||
653 | } | ||
654 | |||
655 | return (LDO_MIN_VOLT + value * step_mv) * 1000; | ||
656 | } | ||
657 | |||
658 | static int tps65910_set_voltage_dcdc(struct regulator_dev *dev, | ||
659 | unsigned selector) | ||
660 | { | ||
661 | struct tps65910_reg *pmic = rdev_get_drvdata(dev); | ||
662 | int id = rdev_get_id(dev), vsel; | ||
663 | int dcdc_mult = 0; | ||
664 | |||
665 | switch (id) { | ||
666 | case TPS65910_REG_VDD1: | ||
667 | dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1; | ||
668 | if (dcdc_mult == 1) | ||
669 | dcdc_mult--; | ||
670 | vsel = (selector % VDD1_2_NUM_VOLTS) + 3; | ||
671 | |||
672 | tps65910_modify_bits(pmic, TPS65910_VDD1, | ||
673 | (dcdc_mult << VDD1_VGAIN_SEL_SHIFT), | ||
674 | VDD1_VGAIN_SEL_MASK); | ||
675 | tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel); | ||
676 | break; | ||
677 | case TPS65910_REG_VDD2: | ||
678 | dcdc_mult = (selector / VDD1_2_NUM_VOLTS) + 1; | ||
679 | if (dcdc_mult == 1) | ||
680 | dcdc_mult--; | ||
681 | vsel = (selector % VDD1_2_NUM_VOLTS) + 3; | ||
682 | |||
683 | tps65910_modify_bits(pmic, TPS65910_VDD2, | ||
684 | (dcdc_mult << VDD2_VGAIN_SEL_SHIFT), | ||
685 | VDD1_VGAIN_SEL_MASK); | ||
686 | tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel); | ||
687 | break; | ||
688 | case TPS65911_REG_VDDCTRL: | ||
689 | vsel = selector; | ||
690 | tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel); | ||
691 | } | ||
692 | |||
693 | return 0; | ||
694 | } | ||
695 | |||
696 | static int tps65910_set_voltage(struct regulator_dev *dev, unsigned selector) | ||
697 | { | ||
698 | struct tps65910_reg *pmic = rdev_get_drvdata(dev); | ||
699 | int reg, id = rdev_get_id(dev); | ||
700 | |||
701 | reg = pmic->get_ctrl_reg(id); | ||
702 | if (reg < 0) | ||
703 | return reg; | ||
704 | |||
705 | switch (id) { | ||
706 | case TPS65910_REG_VIO: | ||
707 | case TPS65910_REG_VDIG1: | ||
708 | case TPS65910_REG_VDIG2: | ||
709 | case TPS65910_REG_VPLL: | ||
710 | case TPS65910_REG_VDAC: | ||
711 | case TPS65910_REG_VAUX1: | ||
712 | case TPS65910_REG_VAUX2: | ||
713 | case TPS65910_REG_VAUX33: | ||
714 | case TPS65910_REG_VMMC: | ||
715 | return tps65910_modify_bits(pmic, reg, | ||
716 | (selector << LDO_SEL_SHIFT), LDO_SEL_MASK); | ||
717 | } | ||
718 | |||
719 | return -EINVAL; | ||
720 | } | ||
721 | |||
722 | static int tps65911_set_voltage(struct regulator_dev *dev, unsigned selector) | ||
723 | { | ||
724 | struct tps65910_reg *pmic = rdev_get_drvdata(dev); | ||
725 | int reg, id = rdev_get_id(dev); | ||
726 | |||
727 | reg = pmic->get_ctrl_reg(id); | ||
728 | if (reg < 0) | ||
729 | return reg; | ||
730 | |||
731 | switch (id) { | ||
732 | case TPS65911_REG_LDO1: | ||
733 | case TPS65911_REG_LDO2: | ||
734 | case TPS65911_REG_LDO4: | ||
735 | return tps65910_modify_bits(pmic, reg, | ||
736 | (selector << LDO_SEL_SHIFT), LDO1_SEL_MASK); | ||
737 | case TPS65911_REG_LDO3: | ||
738 | case TPS65911_REG_LDO5: | ||
739 | case TPS65911_REG_LDO6: | ||
740 | case TPS65911_REG_LDO7: | ||
741 | case TPS65911_REG_LDO8: | ||
742 | case TPS65910_REG_VIO: | ||
743 | return tps65910_modify_bits(pmic, reg, | ||
744 | (selector << LDO_SEL_SHIFT), LDO3_SEL_MASK); | ||
745 | } | ||
746 | |||
747 | return -EINVAL; | ||
748 | } | ||
749 | |||
750 | |||
751 | static int tps65910_list_voltage_dcdc(struct regulator_dev *dev, | ||
752 | unsigned selector) | ||
753 | { | ||
754 | int volt, mult = 1, id = rdev_get_id(dev); | ||
755 | |||
756 | switch (id) { | ||
757 | case TPS65910_REG_VDD1: | ||
758 | case TPS65910_REG_VDD2: | ||
759 | mult = (selector / VDD1_2_NUM_VOLTS) + 1; | ||
760 | volt = VDD1_2_MIN_VOLT + | ||
761 | (selector % VDD1_2_NUM_VOLTS) * VDD1_2_OFFSET; | ||
762 | case TPS65911_REG_VDDCTRL: | ||
763 | volt = VDDCTRL_MIN_VOLT + (selector * VDDCTRL_OFFSET); | ||
764 | } | ||
765 | |||
766 | return volt * 100 * mult; | ||
767 | } | ||
768 | |||
769 | static int tps65910_list_voltage(struct regulator_dev *dev, | ||
770 | unsigned selector) | ||
771 | { | ||
772 | struct tps65910_reg *pmic = rdev_get_drvdata(dev); | ||
773 | int id = rdev_get_id(dev), voltage; | ||
774 | |||
775 | if (id < TPS65910_REG_VIO || id > TPS65910_REG_VMMC) | ||
776 | return -EINVAL; | ||
777 | |||
778 | if (selector >= pmic->info[id]->table_len) | ||
779 | return -EINVAL; | ||
780 | else | ||
781 | voltage = pmic->info[id]->table[selector] * 1000; | ||
782 | |||
783 | return voltage; | ||
784 | } | ||
785 | |||
786 | static int tps65911_list_voltage(struct regulator_dev *dev, unsigned selector) | ||
787 | { | ||
788 | struct tps65910_reg *pmic = rdev_get_drvdata(dev); | ||
789 | int step_mv = 0, id = rdev_get_id(dev); | ||
790 | |||
791 | switch(id) { | ||
792 | case TPS65911_REG_LDO1: | ||
793 | case TPS65911_REG_LDO2: | ||
794 | case TPS65911_REG_LDO4: | ||
795 | /* The first 5 values of the selector correspond to 1V */ | ||
796 | if (selector < 5) | ||
797 | selector = 0; | ||
798 | else | ||
799 | selector -= 4; | ||
800 | |||
801 | step_mv = 50; | ||
802 | break; | ||
803 | case TPS65911_REG_LDO3: | ||
804 | case TPS65911_REG_LDO5: | ||
805 | case TPS65911_REG_LDO6: | ||
806 | case TPS65911_REG_LDO7: | ||
807 | case TPS65911_REG_LDO8: | ||
808 | /* The first 3 values of the selector correspond to 1V */ | ||
809 | if (selector < 3) | ||
810 | selector = 0; | ||
811 | else | ||
812 | selector -= 2; | ||
813 | |||
814 | step_mv = 100; | ||
815 | break; | ||
816 | case TPS65910_REG_VIO: | ||
817 | return pmic->info[id]->table[selector] * 1000; | ||
818 | default: | ||
819 | return -EINVAL; | ||
820 | } | ||
821 | |||
822 | return (LDO_MIN_VOLT + selector * step_mv) * 1000; | ||
823 | } | ||
824 | |||
825 | /* Regulator ops (except VRTC) */ | ||
826 | static struct regulator_ops tps65910_ops_dcdc = { | ||
827 | .is_enabled = tps65910_is_enabled, | ||
828 | .enable = tps65910_enable, | ||
829 | .disable = tps65910_disable, | ||
830 | .set_mode = tps65910_set_mode, | ||
831 | .get_mode = tps65910_get_mode, | ||
832 | .get_voltage = tps65910_get_voltage_dcdc, | ||
833 | .set_voltage_sel = tps65910_set_voltage_dcdc, | ||
834 | .list_voltage = tps65910_list_voltage_dcdc, | ||
835 | }; | ||
836 | |||
837 | static struct regulator_ops tps65910_ops_vdd3 = { | ||
838 | .is_enabled = tps65910_is_enabled, | ||
839 | .enable = tps65910_enable, | ||
840 | .disable = tps65910_disable, | ||
841 | .set_mode = tps65910_set_mode, | ||
842 | .get_mode = tps65910_get_mode, | ||
843 | .get_voltage = tps65910_get_voltage_vdd3, | ||
844 | .list_voltage = tps65910_list_voltage, | ||
845 | }; | ||
846 | |||
847 | static struct regulator_ops tps65910_ops = { | ||
848 | .is_enabled = tps65910_is_enabled, | ||
849 | .enable = tps65910_enable, | ||
850 | .disable = tps65910_disable, | ||
851 | .set_mode = tps65910_set_mode, | ||
852 | .get_mode = tps65910_get_mode, | ||
853 | .get_voltage = tps65910_get_voltage, | ||
854 | .set_voltage_sel = tps65910_set_voltage, | ||
855 | .list_voltage = tps65910_list_voltage, | ||
856 | }; | ||
857 | |||
858 | static struct regulator_ops tps65911_ops = { | ||
859 | .is_enabled = tps65910_is_enabled, | ||
860 | .enable = tps65910_enable, | ||
861 | .disable = tps65910_disable, | ||
862 | .set_mode = tps65910_set_mode, | ||
863 | .get_mode = tps65910_get_mode, | ||
864 | .get_voltage = tps65911_get_voltage, | ||
865 | .set_voltage_sel = tps65911_set_voltage, | ||
866 | .list_voltage = tps65911_list_voltage, | ||
867 | }; | ||
868 | |||
869 | static __devinit int tps65910_probe(struct platform_device *pdev) | ||
870 | { | ||
871 | struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent); | ||
872 | struct tps_info *info; | ||
873 | struct regulator_init_data *reg_data; | ||
874 | struct regulator_dev *rdev; | ||
875 | struct tps65910_reg *pmic; | ||
876 | struct tps65910_board *pmic_plat_data; | ||
877 | int i, err; | ||
878 | |||
879 | pmic_plat_data = dev_get_platdata(tps65910->dev); | ||
880 | if (!pmic_plat_data) | ||
881 | return -EINVAL; | ||
882 | |||
883 | reg_data = pmic_plat_data->tps65910_pmic_init_data; | ||
884 | |||
885 | pmic = kzalloc(sizeof(*pmic), GFP_KERNEL); | ||
886 | if (!pmic) | ||
887 | return -ENOMEM; | ||
888 | |||
889 | mutex_init(&pmic->mutex); | ||
890 | pmic->mfd = tps65910; | ||
891 | platform_set_drvdata(pdev, pmic); | ||
892 | |||
893 | /* Give control of all register to control port */ | ||
894 | tps65910_set_bits(pmic->mfd, TPS65910_DEVCTRL, | ||
895 | DEVCTRL_SR_CTL_I2C_SEL_MASK); | ||
896 | |||
897 | switch(tps65910_chip_id(tps65910)) { | ||
898 | case TPS65910: | ||
899 | pmic->get_ctrl_reg = &tps65910_get_ctrl_register; | ||
900 | info = tps65910_regs; | ||
901 | case TPS65911: | ||
902 | pmic->get_ctrl_reg = &tps65911_get_ctrl_register; | ||
903 | info = tps65911_regs; | ||
904 | default: | ||
905 | pr_err("Invalid tps chip version\n"); | ||
906 | return -ENODEV; | ||
907 | } | ||
908 | |||
909 | for (i = 0; i < TPS65910_NUM_REGULATOR; i++, info++, reg_data++) { | ||
910 | /* Register the regulators */ | ||
911 | pmic->info[i] = info; | ||
912 | |||
913 | pmic->desc[i].name = info->name; | ||
914 | pmic->desc[i].id = i; | ||
915 | pmic->desc[i].n_voltages = info->table_len; | ||
916 | |||
917 | if (i == TPS65910_REG_VDD1 || i == TPS65910_REG_VDD2) { | ||
918 | pmic->desc[i].ops = &tps65910_ops_dcdc; | ||
919 | } else if (i == TPS65910_REG_VDD3) { | ||
920 | if (tps65910_chip_id(tps65910) == TPS65910) | ||
921 | pmic->desc[i].ops = &tps65910_ops_vdd3; | ||
922 | else | ||
923 | pmic->desc[i].ops = &tps65910_ops_dcdc; | ||
924 | } else { | ||
925 | if (tps65910_chip_id(tps65910) == TPS65910) | ||
926 | pmic->desc[i].ops = &tps65910_ops; | ||
927 | else | ||
928 | pmic->desc[i].ops = &tps65911_ops; | ||
929 | } | ||
930 | |||
931 | pmic->desc[i].type = REGULATOR_VOLTAGE; | ||
932 | pmic->desc[i].owner = THIS_MODULE; | ||
933 | |||
934 | rdev = regulator_register(&pmic->desc[i], | ||
935 | tps65910->dev, reg_data, pmic); | ||
936 | if (IS_ERR(rdev)) { | ||
937 | dev_err(tps65910->dev, | ||
938 | "failed to register %s regulator\n", | ||
939 | pdev->name); | ||
940 | err = PTR_ERR(rdev); | ||
941 | goto err; | ||
942 | } | ||
943 | |||
944 | /* Save regulator for cleanup */ | ||
945 | pmic->rdev[i] = rdev; | ||
946 | } | ||
947 | return 0; | ||
948 | |||
949 | err: | ||
950 | while (--i >= 0) | ||
951 | regulator_unregister(pmic->rdev[i]); | ||
952 | |||
953 | kfree(pmic); | ||
954 | return err; | ||
955 | } | ||
956 | |||
957 | static int __devexit tps65910_remove(struct platform_device *pdev) | ||
958 | { | ||
959 | struct tps65910_reg *tps65910_reg = platform_get_drvdata(pdev); | ||
960 | int i; | ||
961 | |||
962 | for (i = 0; i < TPS65910_NUM_REGULATOR; i++) | ||
963 | regulator_unregister(tps65910_reg->rdev[i]); | ||
964 | |||
965 | kfree(tps65910_reg); | ||
966 | return 0; | ||
967 | } | ||
968 | |||
969 | static struct platform_driver tps65910_driver = { | ||
970 | .driver = { | ||
971 | .name = "tps65910-pmic", | ||
972 | .owner = THIS_MODULE, | ||
973 | }, | ||
974 | .probe = tps65910_probe, | ||
975 | .remove = __devexit_p(tps65910_remove), | ||
976 | }; | ||
977 | |||
978 | static int __init tps65910_init(void) | ||
979 | { | ||
980 | return platform_driver_register(&tps65910_driver); | ||
981 | } | ||
982 | subsys_initcall(tps65910_init); | ||
983 | |||
984 | static void __exit tps65910_cleanup(void) | ||
985 | { | ||
986 | platform_driver_unregister(&tps65910_driver); | ||
987 | } | ||
988 | module_exit(tps65910_cleanup); | ||
989 | |||
990 | MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>"); | ||
991 | MODULE_DESCRIPTION("TPS6507x voltage regulator driver"); | ||
992 | MODULE_LICENSE("GPL v2"); | ||
993 | MODULE_ALIAS("platform:tps65910-pmic"); | ||
diff --git a/drivers/regulator/twl-regulator.c b/drivers/regulator/twl-regulator.c index 6a292852a358..87fe0f75a56e 100644 --- a/drivers/regulator/twl-regulator.c +++ b/drivers/regulator/twl-regulator.c | |||
@@ -51,8 +51,13 @@ struct twlreg_info { | |||
51 | u16 min_mV; | 51 | u16 min_mV; |
52 | u16 max_mV; | 52 | u16 max_mV; |
53 | 53 | ||
54 | u8 flags; | ||
55 | |||
54 | /* used by regulator core */ | 56 | /* used by regulator core */ |
55 | struct regulator_desc desc; | 57 | struct regulator_desc desc; |
58 | |||
59 | /* chip specific features */ | ||
60 | unsigned long features; | ||
56 | }; | 61 | }; |
57 | 62 | ||
58 | 63 | ||
@@ -70,12 +75,35 @@ struct twlreg_info { | |||
70 | #define VREG_TRANS 1 | 75 | #define VREG_TRANS 1 |
71 | #define VREG_STATE 2 | 76 | #define VREG_STATE 2 |
72 | #define VREG_VOLTAGE 3 | 77 | #define VREG_VOLTAGE 3 |
78 | #define VREG_VOLTAGE_SMPS 4 | ||
73 | /* TWL6030 Misc register offsets */ | 79 | /* TWL6030 Misc register offsets */ |
74 | #define VREG_BC_ALL 1 | 80 | #define VREG_BC_ALL 1 |
75 | #define VREG_BC_REF 2 | 81 | #define VREG_BC_REF 2 |
76 | #define VREG_BC_PROC 3 | 82 | #define VREG_BC_PROC 3 |
77 | #define VREG_BC_CLK_RST 4 | 83 | #define VREG_BC_CLK_RST 4 |
78 | 84 | ||
85 | /* TWL6030 LDO register values for CFG_STATE */ | ||
86 | #define TWL6030_CFG_STATE_OFF 0x00 | ||
87 | #define TWL6030_CFG_STATE_ON 0x01 | ||
88 | #define TWL6030_CFG_STATE_OFF2 0x02 | ||
89 | #define TWL6030_CFG_STATE_SLEEP 0x03 | ||
90 | #define TWL6030_CFG_STATE_GRP_SHIFT 5 | ||
91 | #define TWL6030_CFG_STATE_APP_SHIFT 2 | ||
92 | #define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT) | ||
93 | #define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\ | ||
94 | TWL6030_CFG_STATE_APP_SHIFT) | ||
95 | |||
96 | /* Flags for SMPS Voltage reading */ | ||
97 | #define SMPS_OFFSET_EN BIT(0) | ||
98 | #define SMPS_EXTENDED_EN BIT(1) | ||
99 | |||
100 | /* twl6025 SMPS EPROM values */ | ||
101 | #define TWL6030_SMPS_OFFSET 0xB0 | ||
102 | #define TWL6030_SMPS_MULT 0xB3 | ||
103 | #define SMPS_MULTOFFSET_SMPS4 BIT(0) | ||
104 | #define SMPS_MULTOFFSET_VIO BIT(1) | ||
105 | #define SMPS_MULTOFFSET_SMPS3 BIT(6) | ||
106 | |||
79 | static inline int | 107 | static inline int |
80 | twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset) | 108 | twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset) |
81 | { | 109 | { |
@@ -118,21 +146,38 @@ static int twlreg_grp(struct regulator_dev *rdev) | |||
118 | #define P2_GRP_6030 BIT(1) /* "peripherals" */ | 146 | #define P2_GRP_6030 BIT(1) /* "peripherals" */ |
119 | #define P1_GRP_6030 BIT(0) /* CPU/Linux */ | 147 | #define P1_GRP_6030 BIT(0) /* CPU/Linux */ |
120 | 148 | ||
121 | static int twlreg_is_enabled(struct regulator_dev *rdev) | 149 | static int twl4030reg_is_enabled(struct regulator_dev *rdev) |
122 | { | 150 | { |
123 | int state = twlreg_grp(rdev); | 151 | int state = twlreg_grp(rdev); |
124 | 152 | ||
125 | if (state < 0) | 153 | if (state < 0) |
126 | return state; | 154 | return state; |
127 | 155 | ||
128 | if (twl_class_is_4030()) | 156 | return state & P1_GRP_4030; |
129 | state &= P1_GRP_4030; | 157 | } |
158 | |||
159 | static int twl6030reg_is_enabled(struct regulator_dev *rdev) | ||
160 | { | ||
161 | struct twlreg_info *info = rdev_get_drvdata(rdev); | ||
162 | int grp = 0, val; | ||
163 | |||
164 | if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS))) | ||
165 | grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP); | ||
166 | if (grp < 0) | ||
167 | return grp; | ||
168 | |||
169 | if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS))) | ||
170 | grp &= P1_GRP_6030; | ||
130 | else | 171 | else |
131 | state &= P1_GRP_6030; | 172 | grp = 1; |
132 | return state; | 173 | |
174 | val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); | ||
175 | val = TWL6030_CFG_STATE_APP(val); | ||
176 | |||
177 | return grp && (val == TWL6030_CFG_STATE_ON); | ||
133 | } | 178 | } |
134 | 179 | ||
135 | static int twlreg_enable(struct regulator_dev *rdev) | 180 | static int twl4030reg_enable(struct regulator_dev *rdev) |
136 | { | 181 | { |
137 | struct twlreg_info *info = rdev_get_drvdata(rdev); | 182 | struct twlreg_info *info = rdev_get_drvdata(rdev); |
138 | int grp; | 183 | int grp; |
@@ -142,10 +187,7 @@ static int twlreg_enable(struct regulator_dev *rdev) | |||
142 | if (grp < 0) | 187 | if (grp < 0) |
143 | return grp; | 188 | return grp; |
144 | 189 | ||
145 | if (twl_class_is_4030()) | 190 | grp |= P1_GRP_4030; |
146 | grp |= P1_GRP_4030; | ||
147 | else | ||
148 | grp |= P1_GRP_6030; | ||
149 | 191 | ||
150 | ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); | 192 | ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); |
151 | 193 | ||
@@ -154,29 +196,63 @@ static int twlreg_enable(struct regulator_dev *rdev) | |||
154 | return ret; | 196 | return ret; |
155 | } | 197 | } |
156 | 198 | ||
157 | static int twlreg_disable(struct regulator_dev *rdev) | 199 | static int twl6030reg_enable(struct regulator_dev *rdev) |
200 | { | ||
201 | struct twlreg_info *info = rdev_get_drvdata(rdev); | ||
202 | int grp = 0; | ||
203 | int ret; | ||
204 | |||
205 | if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS))) | ||
206 | grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP); | ||
207 | if (grp < 0) | ||
208 | return grp; | ||
209 | |||
210 | ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, | ||
211 | grp << TWL6030_CFG_STATE_GRP_SHIFT | | ||
212 | TWL6030_CFG_STATE_ON); | ||
213 | |||
214 | udelay(info->delay); | ||
215 | |||
216 | return ret; | ||
217 | } | ||
218 | |||
219 | static int twl4030reg_disable(struct regulator_dev *rdev) | ||
158 | { | 220 | { |
159 | struct twlreg_info *info = rdev_get_drvdata(rdev); | 221 | struct twlreg_info *info = rdev_get_drvdata(rdev); |
160 | int grp; | 222 | int grp; |
223 | int ret; | ||
161 | 224 | ||
162 | grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP); | 225 | grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP); |
163 | if (grp < 0) | 226 | if (grp < 0) |
164 | return grp; | 227 | return grp; |
165 | 228 | ||
166 | if (twl_class_is_4030()) | 229 | grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030); |
167 | grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030); | ||
168 | else | ||
169 | grp &= ~(P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030); | ||
170 | 230 | ||
171 | return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); | 231 | ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); |
232 | |||
233 | return ret; | ||
172 | } | 234 | } |
173 | 235 | ||
174 | static int twlreg_get_status(struct regulator_dev *rdev) | 236 | static int twl6030reg_disable(struct regulator_dev *rdev) |
175 | { | 237 | { |
176 | int state = twlreg_grp(rdev); | 238 | struct twlreg_info *info = rdev_get_drvdata(rdev); |
239 | int grp = 0; | ||
240 | int ret; | ||
241 | |||
242 | if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS))) | ||
243 | grp = P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030; | ||
244 | |||
245 | /* For 6030, set the off state for all grps enabled */ | ||
246 | ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, | ||
247 | (grp) << TWL6030_CFG_STATE_GRP_SHIFT | | ||
248 | TWL6030_CFG_STATE_OFF); | ||
249 | |||
250 | return ret; | ||
251 | } | ||
177 | 252 | ||
178 | if (twl_class_is_6030()) | 253 | static int twl4030reg_get_status(struct regulator_dev *rdev) |
179 | return 0; /* FIXME return for 6030 regulator */ | 254 | { |
255 | int state = twlreg_grp(rdev); | ||
180 | 256 | ||
181 | if (state < 0) | 257 | if (state < 0) |
182 | return state; | 258 | return state; |
@@ -190,15 +266,39 @@ static int twlreg_get_status(struct regulator_dev *rdev) | |||
190 | : REGULATOR_STATUS_STANDBY; | 266 | : REGULATOR_STATUS_STANDBY; |
191 | } | 267 | } |
192 | 268 | ||
193 | static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode) | 269 | static int twl6030reg_get_status(struct regulator_dev *rdev) |
270 | { | ||
271 | struct twlreg_info *info = rdev_get_drvdata(rdev); | ||
272 | int val; | ||
273 | |||
274 | val = twlreg_grp(rdev); | ||
275 | if (val < 0) | ||
276 | return val; | ||
277 | |||
278 | val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); | ||
279 | |||
280 | switch (TWL6030_CFG_STATE_APP(val)) { | ||
281 | case TWL6030_CFG_STATE_ON: | ||
282 | return REGULATOR_STATUS_NORMAL; | ||
283 | |||
284 | case TWL6030_CFG_STATE_SLEEP: | ||
285 | return REGULATOR_STATUS_STANDBY; | ||
286 | |||
287 | case TWL6030_CFG_STATE_OFF: | ||
288 | case TWL6030_CFG_STATE_OFF2: | ||
289 | default: | ||
290 | break; | ||
291 | } | ||
292 | |||
293 | return REGULATOR_STATUS_OFF; | ||
294 | } | ||
295 | |||
296 | static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode) | ||
194 | { | 297 | { |
195 | struct twlreg_info *info = rdev_get_drvdata(rdev); | 298 | struct twlreg_info *info = rdev_get_drvdata(rdev); |
196 | unsigned message; | 299 | unsigned message; |
197 | int status; | 300 | int status; |
198 | 301 | ||
199 | if (twl_class_is_6030()) | ||
200 | return 0; /* FIXME return for 6030 regulator */ | ||
201 | |||
202 | /* We can only set the mode through state machine commands... */ | 302 | /* We can only set the mode through state machine commands... */ |
203 | switch (mode) { | 303 | switch (mode) { |
204 | case REGULATOR_MODE_NORMAL: | 304 | case REGULATOR_MODE_NORMAL: |
@@ -227,6 +327,36 @@ static int twlreg_set_mode(struct regulator_dev *rdev, unsigned mode) | |||
227 | message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB); | 327 | message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB); |
228 | } | 328 | } |
229 | 329 | ||
330 | static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode) | ||
331 | { | ||
332 | struct twlreg_info *info = rdev_get_drvdata(rdev); | ||
333 | int grp = 0; | ||
334 | int val; | ||
335 | |||
336 | if (!(twl_class_is_6030() && (info->features & TWL6025_SUBCLASS))) | ||
337 | grp = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_GRP); | ||
338 | |||
339 | if (grp < 0) | ||
340 | return grp; | ||
341 | |||
342 | /* Compose the state register settings */ | ||
343 | val = grp << TWL6030_CFG_STATE_GRP_SHIFT; | ||
344 | /* We can only set the mode through state machine commands... */ | ||
345 | switch (mode) { | ||
346 | case REGULATOR_MODE_NORMAL: | ||
347 | val |= TWL6030_CFG_STATE_ON; | ||
348 | break; | ||
349 | case REGULATOR_MODE_STANDBY: | ||
350 | val |= TWL6030_CFG_STATE_SLEEP; | ||
351 | break; | ||
352 | |||
353 | default: | ||
354 | return -EINVAL; | ||
355 | } | ||
356 | |||
357 | return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, val); | ||
358 | } | ||
359 | |||
230 | /*----------------------------------------------------------------------*/ | 360 | /*----------------------------------------------------------------------*/ |
231 | 361 | ||
232 | /* | 362 | /* |
@@ -375,13 +505,13 @@ static struct regulator_ops twl4030ldo_ops = { | |||
375 | .set_voltage = twl4030ldo_set_voltage, | 505 | .set_voltage = twl4030ldo_set_voltage, |
376 | .get_voltage = twl4030ldo_get_voltage, | 506 | .get_voltage = twl4030ldo_get_voltage, |
377 | 507 | ||
378 | .enable = twlreg_enable, | 508 | .enable = twl4030reg_enable, |
379 | .disable = twlreg_disable, | 509 | .disable = twl4030reg_disable, |
380 | .is_enabled = twlreg_is_enabled, | 510 | .is_enabled = twl4030reg_is_enabled, |
381 | 511 | ||
382 | .set_mode = twlreg_set_mode, | 512 | .set_mode = twl4030reg_set_mode, |
383 | 513 | ||
384 | .get_status = twlreg_get_status, | 514 | .get_status = twl4030reg_get_status, |
385 | }; | 515 | }; |
386 | 516 | ||
387 | static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) | 517 | static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) |
@@ -433,13 +563,13 @@ static struct regulator_ops twl6030ldo_ops = { | |||
433 | .set_voltage = twl6030ldo_set_voltage, | 563 | .set_voltage = twl6030ldo_set_voltage, |
434 | .get_voltage = twl6030ldo_get_voltage, | 564 | .get_voltage = twl6030ldo_get_voltage, |
435 | 565 | ||
436 | .enable = twlreg_enable, | 566 | .enable = twl6030reg_enable, |
437 | .disable = twlreg_disable, | 567 | .disable = twl6030reg_disable, |
438 | .is_enabled = twlreg_is_enabled, | 568 | .is_enabled = twl6030reg_is_enabled, |
439 | 569 | ||
440 | .set_mode = twlreg_set_mode, | 570 | .set_mode = twl6030reg_set_mode, |
441 | 571 | ||
442 | .get_status = twlreg_get_status, | 572 | .get_status = twl6030reg_get_status, |
443 | }; | 573 | }; |
444 | 574 | ||
445 | /*----------------------------------------------------------------------*/ | 575 | /*----------------------------------------------------------------------*/ |
@@ -461,25 +591,242 @@ static int twlfixed_get_voltage(struct regulator_dev *rdev) | |||
461 | return info->min_mV * 1000; | 591 | return info->min_mV * 1000; |
462 | } | 592 | } |
463 | 593 | ||
464 | static struct regulator_ops twlfixed_ops = { | 594 | static struct regulator_ops twl4030fixed_ops = { |
595 | .list_voltage = twlfixed_list_voltage, | ||
596 | |||
597 | .get_voltage = twlfixed_get_voltage, | ||
598 | |||
599 | .enable = twl4030reg_enable, | ||
600 | .disable = twl4030reg_disable, | ||
601 | .is_enabled = twl4030reg_is_enabled, | ||
602 | |||
603 | .set_mode = twl4030reg_set_mode, | ||
604 | |||
605 | .get_status = twl4030reg_get_status, | ||
606 | }; | ||
607 | |||
608 | static struct regulator_ops twl6030fixed_ops = { | ||
465 | .list_voltage = twlfixed_list_voltage, | 609 | .list_voltage = twlfixed_list_voltage, |
466 | 610 | ||
467 | .get_voltage = twlfixed_get_voltage, | 611 | .get_voltage = twlfixed_get_voltage, |
468 | 612 | ||
469 | .enable = twlreg_enable, | 613 | .enable = twl6030reg_enable, |
470 | .disable = twlreg_disable, | 614 | .disable = twl6030reg_disable, |
471 | .is_enabled = twlreg_is_enabled, | 615 | .is_enabled = twl6030reg_is_enabled, |
472 | 616 | ||
473 | .set_mode = twlreg_set_mode, | 617 | .set_mode = twl6030reg_set_mode, |
474 | 618 | ||
475 | .get_status = twlreg_get_status, | 619 | .get_status = twl6030reg_get_status, |
476 | }; | 620 | }; |
477 | 621 | ||
478 | static struct regulator_ops twl6030_fixed_resource = { | 622 | static struct regulator_ops twl6030_fixed_resource = { |
479 | .enable = twlreg_enable, | 623 | .enable = twl6030reg_enable, |
480 | .disable = twlreg_disable, | 624 | .disable = twl6030reg_disable, |
481 | .is_enabled = twlreg_is_enabled, | 625 | .is_enabled = twl6030reg_is_enabled, |
482 | .get_status = twlreg_get_status, | 626 | .get_status = twl6030reg_get_status, |
627 | }; | ||
628 | |||
629 | /* | ||
630 | * SMPS status and control | ||
631 | */ | ||
632 | |||
633 | static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index) | ||
634 | { | ||
635 | struct twlreg_info *info = rdev_get_drvdata(rdev); | ||
636 | |||
637 | int voltage = 0; | ||
638 | |||
639 | switch (info->flags) { | ||
640 | case SMPS_OFFSET_EN: | ||
641 | voltage = 100000; | ||
642 | /* fall through */ | ||
643 | case 0: | ||
644 | switch (index) { | ||
645 | case 0: | ||
646 | voltage = 0; | ||
647 | break; | ||
648 | case 58: | ||
649 | voltage = 1350 * 1000; | ||
650 | break; | ||
651 | case 59: | ||
652 | voltage = 1500 * 1000; | ||
653 | break; | ||
654 | case 60: | ||
655 | voltage = 1800 * 1000; | ||
656 | break; | ||
657 | case 61: | ||
658 | voltage = 1900 * 1000; | ||
659 | break; | ||
660 | case 62: | ||
661 | voltage = 2100 * 1000; | ||
662 | break; | ||
663 | default: | ||
664 | voltage += (600000 + (12500 * (index - 1))); | ||
665 | } | ||
666 | break; | ||
667 | case SMPS_EXTENDED_EN: | ||
668 | switch (index) { | ||
669 | case 0: | ||
670 | voltage = 0; | ||
671 | break; | ||
672 | case 58: | ||
673 | voltage = 2084 * 1000; | ||
674 | break; | ||
675 | case 59: | ||
676 | voltage = 2315 * 1000; | ||
677 | break; | ||
678 | case 60: | ||
679 | voltage = 2778 * 1000; | ||
680 | break; | ||
681 | case 61: | ||
682 | voltage = 2932 * 1000; | ||
683 | break; | ||
684 | case 62: | ||
685 | voltage = 3241 * 1000; | ||
686 | break; | ||
687 | default: | ||
688 | voltage = (1852000 + (38600 * (index - 1))); | ||
689 | } | ||
690 | break; | ||
691 | case SMPS_OFFSET_EN | SMPS_EXTENDED_EN: | ||
692 | switch (index) { | ||
693 | case 0: | ||
694 | voltage = 0; | ||
695 | break; | ||
696 | case 58: | ||
697 | voltage = 4167 * 1000; | ||
698 | break; | ||
699 | case 59: | ||
700 | voltage = 2315 * 1000; | ||
701 | break; | ||
702 | case 60: | ||
703 | voltage = 2778 * 1000; | ||
704 | break; | ||
705 | case 61: | ||
706 | voltage = 2932 * 1000; | ||
707 | break; | ||
708 | case 62: | ||
709 | voltage = 3241 * 1000; | ||
710 | break; | ||
711 | default: | ||
712 | voltage = (2161000 + (38600 * (index - 1))); | ||
713 | } | ||
714 | break; | ||
715 | } | ||
716 | |||
717 | return voltage; | ||
718 | } | ||
719 | |||
720 | static int | ||
721 | twl6030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, | ||
722 | unsigned int *selector) | ||
723 | { | ||
724 | struct twlreg_info *info = rdev_get_drvdata(rdev); | ||
725 | int vsel = 0; | ||
726 | |||
727 | switch (info->flags) { | ||
728 | case 0: | ||
729 | if (min_uV == 0) | ||
730 | vsel = 0; | ||
731 | else if ((min_uV >= 600000) && (max_uV <= 1300000)) { | ||
732 | vsel = (min_uV - 600000) / 125; | ||
733 | if (vsel % 100) | ||
734 | vsel += 100; | ||
735 | vsel /= 100; | ||
736 | vsel++; | ||
737 | } | ||
738 | /* Values 1..57 for vsel are linear and can be calculated | ||
739 | * values 58..62 are non linear. | ||
740 | */ | ||
741 | else if ((min_uV > 1900000) && (max_uV >= 2100000)) | ||
742 | vsel = 62; | ||
743 | else if ((min_uV > 1800000) && (max_uV >= 1900000)) | ||
744 | vsel = 61; | ||
745 | else if ((min_uV > 1500000) && (max_uV >= 1800000)) | ||
746 | vsel = 60; | ||
747 | else if ((min_uV > 1350000) && (max_uV >= 1500000)) | ||
748 | vsel = 59; | ||
749 | else if ((min_uV > 1300000) && (max_uV >= 1350000)) | ||
750 | vsel = 58; | ||
751 | else | ||
752 | return -EINVAL; | ||
753 | break; | ||
754 | case SMPS_OFFSET_EN: | ||
755 | if (min_uV == 0) | ||
756 | vsel = 0; | ||
757 | else if ((min_uV >= 700000) && (max_uV <= 1420000)) { | ||
758 | vsel = (min_uV - 700000) / 125; | ||
759 | if (vsel % 100) | ||
760 | vsel += 100; | ||
761 | vsel /= 100; | ||
762 | vsel++; | ||
763 | } | ||
764 | /* Values 1..57 for vsel are linear and can be calculated | ||
765 | * values 58..62 are non linear. | ||
766 | */ | ||
767 | else if ((min_uV > 1900000) && (max_uV >= 2100000)) | ||
768 | vsel = 62; | ||
769 | else if ((min_uV > 1800000) && (max_uV >= 1900000)) | ||
770 | vsel = 61; | ||
771 | else if ((min_uV > 1350000) && (max_uV >= 1800000)) | ||
772 | vsel = 60; | ||
773 | else if ((min_uV > 1350000) && (max_uV >= 1500000)) | ||
774 | vsel = 59; | ||
775 | else if ((min_uV > 1300000) && (max_uV >= 1350000)) | ||
776 | vsel = 58; | ||
777 | else | ||
778 | return -EINVAL; | ||
779 | break; | ||
780 | case SMPS_EXTENDED_EN: | ||
781 | if (min_uV == 0) | ||
782 | vsel = 0; | ||
783 | else if ((min_uV >= 1852000) && (max_uV <= 4013600)) { | ||
784 | vsel = (min_uV - 1852000) / 386; | ||
785 | if (vsel % 100) | ||
786 | vsel += 100; | ||
787 | vsel /= 100; | ||
788 | vsel++; | ||
789 | } | ||
790 | break; | ||
791 | case SMPS_OFFSET_EN|SMPS_EXTENDED_EN: | ||
792 | if (min_uV == 0) | ||
793 | vsel = 0; | ||
794 | else if ((min_uV >= 2161000) && (max_uV <= 4321000)) { | ||
795 | vsel = (min_uV - 1852000) / 386; | ||
796 | if (vsel % 100) | ||
797 | vsel += 100; | ||
798 | vsel /= 100; | ||
799 | vsel++; | ||
800 | } | ||
801 | break; | ||
802 | } | ||
803 | |||
804 | *selector = vsel; | ||
805 | |||
806 | return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS, | ||
807 | vsel); | ||
808 | } | ||
809 | |||
810 | static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev) | ||
811 | { | ||
812 | struct twlreg_info *info = rdev_get_drvdata(rdev); | ||
813 | |||
814 | return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS); | ||
815 | } | ||
816 | |||
817 | static struct regulator_ops twlsmps_ops = { | ||
818 | .list_voltage = twl6030smps_list_voltage, | ||
819 | |||
820 | .set_voltage = twl6030smps_set_voltage, | ||
821 | .get_voltage_sel = twl6030smps_get_voltage_sel, | ||
822 | |||
823 | .enable = twl6030reg_enable, | ||
824 | .disable = twl6030reg_disable, | ||
825 | .is_enabled = twl6030reg_is_enabled, | ||
826 | |||
827 | .set_mode = twl6030reg_set_mode, | ||
828 | |||
829 | .get_status = twl6030reg_get_status, | ||
483 | }; | 830 | }; |
484 | 831 | ||
485 | /*----------------------------------------------------------------------*/ | 832 | /*----------------------------------------------------------------------*/ |
@@ -487,11 +834,10 @@ static struct regulator_ops twl6030_fixed_resource = { | |||
487 | #define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ | 834 | #define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ |
488 | remap_conf) \ | 835 | remap_conf) \ |
489 | TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ | 836 | TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ |
490 | remap_conf, TWL4030) | 837 | remap_conf, TWL4030, twl4030fixed_ops) |
491 | #define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ | 838 | #define TWL6030_FIXED_LDO(label, offset, mVolts, num, turnon_delay) \ |
492 | remap_conf) \ | ||
493 | TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ | 839 | TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ |
494 | remap_conf, TWL6030) | 840 | 0x0, TWL6030, twl6030fixed_ops) |
495 | 841 | ||
496 | #define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \ | 842 | #define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) { \ |
497 | .base = offset, \ | 843 | .base = offset, \ |
@@ -510,13 +856,11 @@ static struct regulator_ops twl6030_fixed_resource = { | |||
510 | }, \ | 856 | }, \ |
511 | } | 857 | } |
512 | 858 | ||
513 | #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num, \ | 859 | #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \ |
514 | remap_conf) { \ | ||
515 | .base = offset, \ | 860 | .base = offset, \ |
516 | .id = num, \ | 861 | .id = num, \ |
517 | .min_mV = min_mVolts, \ | 862 | .min_mV = min_mVolts, \ |
518 | .max_mV = max_mVolts, \ | 863 | .max_mV = max_mVolts, \ |
519 | .remap = remap_conf, \ | ||
520 | .desc = { \ | 864 | .desc = { \ |
521 | .name = #label, \ | 865 | .name = #label, \ |
522 | .id = TWL6030_REG_##label, \ | 866 | .id = TWL6030_REG_##label, \ |
@@ -527,9 +871,23 @@ static struct regulator_ops twl6030_fixed_resource = { | |||
527 | }, \ | 871 | }, \ |
528 | } | 872 | } |
529 | 873 | ||
874 | #define TWL6025_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts, num) { \ | ||
875 | .base = offset, \ | ||
876 | .id = num, \ | ||
877 | .min_mV = min_mVolts, \ | ||
878 | .max_mV = max_mVolts, \ | ||
879 | .desc = { \ | ||
880 | .name = #label, \ | ||
881 | .id = TWL6025_REG_##label, \ | ||
882 | .n_voltages = ((max_mVolts - min_mVolts)/100) + 1, \ | ||
883 | .ops = &twl6030ldo_ops, \ | ||
884 | .type = REGULATOR_VOLTAGE, \ | ||
885 | .owner = THIS_MODULE, \ | ||
886 | }, \ | ||
887 | } | ||
530 | 888 | ||
531 | #define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \ | 889 | #define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \ |
532 | family) { \ | 890 | family, operations) { \ |
533 | .base = offset, \ | 891 | .base = offset, \ |
534 | .id = num, \ | 892 | .id = num, \ |
535 | .min_mV = mVolts, \ | 893 | .min_mV = mVolts, \ |
@@ -539,17 +897,16 @@ static struct regulator_ops twl6030_fixed_resource = { | |||
539 | .name = #label, \ | 897 | .name = #label, \ |
540 | .id = family##_REG_##label, \ | 898 | .id = family##_REG_##label, \ |
541 | .n_voltages = 1, \ | 899 | .n_voltages = 1, \ |
542 | .ops = &twlfixed_ops, \ | 900 | .ops = &operations, \ |
543 | .type = REGULATOR_VOLTAGE, \ | 901 | .type = REGULATOR_VOLTAGE, \ |
544 | .owner = THIS_MODULE, \ | 902 | .owner = THIS_MODULE, \ |
545 | }, \ | 903 | }, \ |
546 | } | 904 | } |
547 | 905 | ||
548 | #define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay, remap_conf) { \ | 906 | #define TWL6030_FIXED_RESOURCE(label, offset, num, turnon_delay) { \ |
549 | .base = offset, \ | 907 | .base = offset, \ |
550 | .id = num, \ | 908 | .id = num, \ |
551 | .delay = turnon_delay, \ | 909 | .delay = turnon_delay, \ |
552 | .remap = remap_conf, \ | ||
553 | .desc = { \ | 910 | .desc = { \ |
554 | .name = #label, \ | 911 | .name = #label, \ |
555 | .id = TWL6030_REG_##label, \ | 912 | .id = TWL6030_REG_##label, \ |
@@ -559,6 +916,21 @@ static struct regulator_ops twl6030_fixed_resource = { | |||
559 | }, \ | 916 | }, \ |
560 | } | 917 | } |
561 | 918 | ||
919 | #define TWL6025_ADJUSTABLE_SMPS(label, offset, num) { \ | ||
920 | .base = offset, \ | ||
921 | .id = num, \ | ||
922 | .min_mV = 600, \ | ||
923 | .max_mV = 2100, \ | ||
924 | .desc = { \ | ||
925 | .name = #label, \ | ||
926 | .id = TWL6025_REG_##label, \ | ||
927 | .n_voltages = 63, \ | ||
928 | .ops = &twlsmps_ops, \ | ||
929 | .type = REGULATOR_VOLTAGE, \ | ||
930 | .owner = THIS_MODULE, \ | ||
931 | }, \ | ||
932 | } | ||
933 | |||
562 | /* | 934 | /* |
563 | * We list regulators here if systems need some level of | 935 | * We list regulators here if systems need some level of |
564 | * software control over them after boot. | 936 | * software control over them after boot. |
@@ -589,19 +961,52 @@ static struct twlreg_info twl_regs[] = { | |||
589 | /* 6030 REG with base as PMC Slave Misc : 0x0030 */ | 961 | /* 6030 REG with base as PMC Slave Misc : 0x0030 */ |
590 | /* Turnon-delay and remap configuration values for 6030 are not | 962 | /* Turnon-delay and remap configuration values for 6030 are not |
591 | verified since the specification is not public */ | 963 | verified since the specification is not public */ |
592 | TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1, 0x21), | 964 | TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300, 1), |
593 | TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2, 0x21), | 965 | TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300, 2), |
594 | TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3, 0x21), | 966 | TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300, 3), |
595 | TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4, 0x21), | 967 | TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300, 4), |
596 | TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5, 0x21), | 968 | TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300, 5), |
597 | TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7, 0x21), | 969 | TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300, 7), |
598 | TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0, 0x21), | 970 | TWL6030_FIXED_LDO(VANA, 0x50, 2100, 15, 0), |
599 | TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0, 0x21), | 971 | TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 16, 0), |
600 | TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0, 0x21), | 972 | TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 17, 0), |
601 | TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0, 0x21), | 973 | TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 18, 0), |
602 | TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0, 0x21), | 974 | TWL6030_FIXED_RESOURCE(CLK32KG, 0x8C, 48, 0), |
975 | |||
976 | /* 6025 are renamed compared to 6030 versions */ | ||
977 | TWL6025_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300, 1), | ||
978 | TWL6025_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300, 2), | ||
979 | TWL6025_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300, 3), | ||
980 | TWL6025_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300, 4), | ||
981 | TWL6025_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300, 5), | ||
982 | TWL6025_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300, 7), | ||
983 | TWL6025_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300, 16), | ||
984 | TWL6025_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300, 17), | ||
985 | TWL6025_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300, 18), | ||
986 | |||
987 | TWL6025_ADJUSTABLE_SMPS(SMPS3, 0x34, 1), | ||
988 | TWL6025_ADJUSTABLE_SMPS(SMPS4, 0x10, 2), | ||
989 | TWL6025_ADJUSTABLE_SMPS(VIO, 0x16, 3), | ||
603 | }; | 990 | }; |
604 | 991 | ||
992 | static u8 twl_get_smps_offset(void) | ||
993 | { | ||
994 | u8 value; | ||
995 | |||
996 | twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value, | ||
997 | TWL6030_SMPS_OFFSET); | ||
998 | return value; | ||
999 | } | ||
1000 | |||
1001 | static u8 twl_get_smps_mult(void) | ||
1002 | { | ||
1003 | u8 value; | ||
1004 | |||
1005 | twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value, | ||
1006 | TWL6030_SMPS_MULT); | ||
1007 | return value; | ||
1008 | } | ||
1009 | |||
605 | static int __devinit twlreg_probe(struct platform_device *pdev) | 1010 | static int __devinit twlreg_probe(struct platform_device *pdev) |
606 | { | 1011 | { |
607 | int i; | 1012 | int i; |
@@ -623,6 +1028,9 @@ static int __devinit twlreg_probe(struct platform_device *pdev) | |||
623 | if (!initdata) | 1028 | if (!initdata) |
624 | return -EINVAL; | 1029 | return -EINVAL; |
625 | 1030 | ||
1031 | /* copy the features into regulator data */ | ||
1032 | info->features = (unsigned long)initdata->driver_data; | ||
1033 | |||
626 | /* Constrain board-specific capabilities according to what | 1034 | /* Constrain board-specific capabilities according to what |
627 | * this driver and the chip itself can actually do. | 1035 | * this driver and the chip itself can actually do. |
628 | */ | 1036 | */ |
@@ -645,6 +1053,27 @@ static int __devinit twlreg_probe(struct platform_device *pdev) | |||
645 | break; | 1053 | break; |
646 | } | 1054 | } |
647 | 1055 | ||
1056 | switch (pdev->id) { | ||
1057 | case TWL6025_REG_SMPS3: | ||
1058 | if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS3) | ||
1059 | info->flags |= SMPS_EXTENDED_EN; | ||
1060 | if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS3) | ||
1061 | info->flags |= SMPS_OFFSET_EN; | ||
1062 | break; | ||
1063 | case TWL6025_REG_SMPS4: | ||
1064 | if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS4) | ||
1065 | info->flags |= SMPS_EXTENDED_EN; | ||
1066 | if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS4) | ||
1067 | info->flags |= SMPS_OFFSET_EN; | ||
1068 | break; | ||
1069 | case TWL6025_REG_VIO: | ||
1070 | if (twl_get_smps_mult() & SMPS_MULTOFFSET_VIO) | ||
1071 | info->flags |= SMPS_EXTENDED_EN; | ||
1072 | if (twl_get_smps_offset() & SMPS_MULTOFFSET_VIO) | ||
1073 | info->flags |= SMPS_OFFSET_EN; | ||
1074 | break; | ||
1075 | } | ||
1076 | |||
648 | rdev = regulator_register(&info->desc, &pdev->dev, initdata, info); | 1077 | rdev = regulator_register(&info->desc, &pdev->dev, initdata, info); |
649 | if (IS_ERR(rdev)) { | 1078 | if (IS_ERR(rdev)) { |
650 | dev_err(&pdev->dev, "can't register %s, %ld\n", | 1079 | dev_err(&pdev->dev, "can't register %s, %ld\n", |
@@ -653,7 +1082,8 @@ static int __devinit twlreg_probe(struct platform_device *pdev) | |||
653 | } | 1082 | } |
654 | platform_set_drvdata(pdev, rdev); | 1083 | platform_set_drvdata(pdev, rdev); |
655 | 1084 | ||
656 | twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP, | 1085 | if (twl_class_is_4030()) |
1086 | twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP, | ||
657 | info->remap); | 1087 | info->remap); |
658 | 1088 | ||
659 | /* NOTE: many regulators support short-circuit IRQs (presentable | 1089 | /* NOTE: many regulators support short-circuit IRQs (presentable |
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c index e93453b1b978..a0982e809851 100644 --- a/drivers/regulator/wm831x-dcdc.c +++ b/drivers/regulator/wm831x-dcdc.c | |||
@@ -600,7 +600,6 @@ err: | |||
600 | static __devexit int wm831x_buckv_remove(struct platform_device *pdev) | 600 | static __devexit int wm831x_buckv_remove(struct platform_device *pdev) |
601 | { | 601 | { |
602 | struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); | 602 | struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); |
603 | struct wm831x *wm831x = dcdc->wm831x; | ||
604 | 603 | ||
605 | platform_set_drvdata(pdev, NULL); | 604 | platform_set_drvdata(pdev, NULL); |
606 | 605 | ||
@@ -776,7 +775,6 @@ err: | |||
776 | static __devexit int wm831x_buckp_remove(struct platform_device *pdev) | 775 | static __devexit int wm831x_buckp_remove(struct platform_device *pdev) |
777 | { | 776 | { |
778 | struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); | 777 | struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); |
779 | struct wm831x *wm831x = dcdc->wm831x; | ||
780 | 778 | ||
781 | platform_set_drvdata(pdev, NULL); | 779 | platform_set_drvdata(pdev, NULL); |
782 | 780 | ||
diff --git a/drivers/regulator/wm8400-regulator.c b/drivers/regulator/wm8400-regulator.c index b42d01cef35a..0f12c70bebc9 100644 --- a/drivers/regulator/wm8400-regulator.c +++ b/drivers/regulator/wm8400-regulator.c | |||
@@ -55,7 +55,7 @@ static int wm8400_ldo_list_voltage(struct regulator_dev *dev, | |||
55 | return 1600000 + ((selector - 14) * 100000); | 55 | return 1600000 + ((selector - 14) * 100000); |
56 | } | 56 | } |
57 | 57 | ||
58 | static int wm8400_ldo_get_voltage(struct regulator_dev *dev) | 58 | static int wm8400_ldo_get_voltage_sel(struct regulator_dev *dev) |
59 | { | 59 | { |
60 | struct wm8400 *wm8400 = rdev_get_drvdata(dev); | 60 | struct wm8400 *wm8400 = rdev_get_drvdata(dev); |
61 | u16 val; | 61 | u16 val; |
@@ -63,7 +63,7 @@ static int wm8400_ldo_get_voltage(struct regulator_dev *dev) | |||
63 | val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev)); | 63 | val = wm8400_reg_read(wm8400, WM8400_LDO1_CONTROL + rdev_get_id(dev)); |
64 | val &= WM8400_LDO1_VSEL_MASK; | 64 | val &= WM8400_LDO1_VSEL_MASK; |
65 | 65 | ||
66 | return wm8400_ldo_list_voltage(dev, val); | 66 | return val; |
67 | } | 67 | } |
68 | 68 | ||
69 | static int wm8400_ldo_set_voltage(struct regulator_dev *dev, | 69 | static int wm8400_ldo_set_voltage(struct regulator_dev *dev, |
@@ -104,7 +104,7 @@ static struct regulator_ops wm8400_ldo_ops = { | |||
104 | .enable = wm8400_ldo_enable, | 104 | .enable = wm8400_ldo_enable, |
105 | .disable = wm8400_ldo_disable, | 105 | .disable = wm8400_ldo_disable, |
106 | .list_voltage = wm8400_ldo_list_voltage, | 106 | .list_voltage = wm8400_ldo_list_voltage, |
107 | .get_voltage = wm8400_ldo_get_voltage, | 107 | .get_voltage_sel = wm8400_ldo_get_voltage_sel, |
108 | .set_voltage = wm8400_ldo_set_voltage, | 108 | .set_voltage = wm8400_ldo_set_voltage, |
109 | }; | 109 | }; |
110 | 110 | ||
@@ -145,7 +145,7 @@ static int wm8400_dcdc_list_voltage(struct regulator_dev *dev, | |||
145 | return 850000 + (selector * 25000); | 145 | return 850000 + (selector * 25000); |
146 | } | 146 | } |
147 | 147 | ||
148 | static int wm8400_dcdc_get_voltage(struct regulator_dev *dev) | 148 | static int wm8400_dcdc_get_voltage_sel(struct regulator_dev *dev) |
149 | { | 149 | { |
150 | struct wm8400 *wm8400 = rdev_get_drvdata(dev); | 150 | struct wm8400 *wm8400 = rdev_get_drvdata(dev); |
151 | u16 val; | 151 | u16 val; |
@@ -154,7 +154,7 @@ static int wm8400_dcdc_get_voltage(struct regulator_dev *dev) | |||
154 | val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset); | 154 | val = wm8400_reg_read(wm8400, WM8400_DCDC1_CONTROL_1 + offset); |
155 | val &= WM8400_DC1_VSEL_MASK; | 155 | val &= WM8400_DC1_VSEL_MASK; |
156 | 156 | ||
157 | return 850000 + (25000 * val); | 157 | return val; |
158 | } | 158 | } |
159 | 159 | ||
160 | static int wm8400_dcdc_set_voltage(struct regulator_dev *dev, | 160 | static int wm8400_dcdc_set_voltage(struct regulator_dev *dev, |
@@ -261,7 +261,7 @@ static struct regulator_ops wm8400_dcdc_ops = { | |||
261 | .enable = wm8400_dcdc_enable, | 261 | .enable = wm8400_dcdc_enable, |
262 | .disable = wm8400_dcdc_disable, | 262 | .disable = wm8400_dcdc_disable, |
263 | .list_voltage = wm8400_dcdc_list_voltage, | 263 | .list_voltage = wm8400_dcdc_list_voltage, |
264 | .get_voltage = wm8400_dcdc_get_voltage, | 264 | .get_voltage_sel = wm8400_dcdc_get_voltage_sel, |
265 | .set_voltage = wm8400_dcdc_set_voltage, | 265 | .set_voltage = wm8400_dcdc_set_voltage, |
266 | .get_mode = wm8400_dcdc_get_mode, | 266 | .get_mode = wm8400_dcdc_get_mode, |
267 | .set_mode = wm8400_dcdc_set_mode, | 267 | .set_mode = wm8400_dcdc_set_mode, |
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index b8f4e9e66cd5..f822e13dc04b 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig | |||
@@ -125,6 +125,16 @@ comment "I2C RTC drivers" | |||
125 | 125 | ||
126 | if I2C | 126 | if I2C |
127 | 127 | ||
128 | config RTC_DRV_88PM860X | ||
129 | tristate "Marvell 88PM860x" | ||
130 | depends on RTC_CLASS && I2C && MFD_88PM860X | ||
131 | help | ||
132 | If you say yes here you get support for RTC function in Marvell | ||
133 | 88PM860x chips. | ||
134 | |||
135 | This driver can also be built as a module. If so, the module | ||
136 | will be called rtc-88pm860x. | ||
137 | |||
128 | config RTC_DRV_DS1307 | 138 | config RTC_DRV_DS1307 |
129 | tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025" | 139 | tristate "Dallas/Maxim DS1307/37/38/39/40, ST M41T00, EPSON RX-8025" |
130 | help | 140 | help |
@@ -351,12 +361,39 @@ config RTC_DRV_RX8025 | |||
351 | This driver can also be built as a module. If so, the module | 361 | This driver can also be built as a module. If so, the module |
352 | will be called rtc-rx8025. | 362 | will be called rtc-rx8025. |
353 | 363 | ||
364 | config RTC_DRV_EM3027 | ||
365 | tristate "EM Microelectronic EM3027" | ||
366 | help | ||
367 | If you say yes here you get support for the EM | ||
368 | Microelectronic EM3027 RTC chips. | ||
369 | |||
370 | This driver can also be built as a module. If so, the module | ||
371 | will be called rtc-em3027. | ||
372 | |||
373 | config RTC_DRV_RV3029C2 | ||
374 | tristate "Micro Crystal RTC" | ||
375 | help | ||
376 | If you say yes here you get support for the Micro Crystal | ||
377 | RV3029-C2 RTC chips. | ||
378 | |||
379 | This driver can also be built as a module. If so, the module | ||
380 | will be called rtc-rv3029c2. | ||
381 | |||
354 | endif # I2C | 382 | endif # I2C |
355 | 383 | ||
356 | comment "SPI RTC drivers" | 384 | comment "SPI RTC drivers" |
357 | 385 | ||
358 | if SPI_MASTER | 386 | if SPI_MASTER |
359 | 387 | ||
388 | config RTC_DRV_M41T93 | ||
389 | tristate "ST M41T93" | ||
390 | help | ||
391 | If you say yes here you will get support for the | ||
392 | ST M41T93 SPI RTC chip. | ||
393 | |||
394 | This driver can also be built as a module. If so, the module | ||
395 | will be called rtc-m41t93. | ||
396 | |||
360 | config RTC_DRV_M41T94 | 397 | config RTC_DRV_M41T94 |
361 | tristate "ST M41T94" | 398 | tristate "ST M41T94" |
362 | help | 399 | help |
@@ -645,6 +682,14 @@ config RTC_DRV_WM8350 | |||
645 | This driver can also be built as a module. If so, the module | 682 | This driver can also be built as a module. If so, the module |
646 | will be called "rtc-wm8350". | 683 | will be called "rtc-wm8350". |
647 | 684 | ||
685 | config RTC_DRV_SPEAR | ||
686 | tristate "SPEAR ST RTC" | ||
687 | depends on PLAT_SPEAR | ||
688 | default y | ||
689 | help | ||
690 | If you say Y here you will get support for the RTC found on | ||
691 | spear | ||
692 | |||
648 | config RTC_DRV_PCF50633 | 693 | config RTC_DRV_PCF50633 |
649 | depends on MFD_PCF50633 | 694 | depends on MFD_PCF50633 |
650 | tristate "NXP PCF50633 RTC" | 695 | tristate "NXP PCF50633 RTC" |
@@ -874,6 +919,13 @@ config RTC_DRV_PXA | |||
874 | This RTC driver uses PXA RTC registers available since pxa27x | 919 | This RTC driver uses PXA RTC registers available since pxa27x |
875 | series (RDxR, RYxR) instead of legacy RCNR, RTAR. | 920 | series (RDxR, RYxR) instead of legacy RCNR, RTAR. |
876 | 921 | ||
922 | config RTC_DRV_VT8500 | ||
923 | tristate "VIA/WonderMedia 85xx SoC RTC" | ||
924 | depends on ARCH_VT8500 | ||
925 | help | ||
926 | If you say Y here you will get access to the real time clock | ||
927 | built into your VIA VT8500 SoC or its relatives. | ||
928 | |||
877 | 929 | ||
878 | config RTC_DRV_SUN4V | 930 | config RTC_DRV_SUN4V |
879 | bool "SUN4V Hypervisor RTC" | 931 | bool "SUN4V Hypervisor RTC" |
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile index 9574748d1c73..213d725f16d4 100644 --- a/drivers/rtc/Makefile +++ b/drivers/rtc/Makefile | |||
@@ -15,6 +15,7 @@ rtc-core-$(CONFIG_RTC_INTF_SYSFS) += rtc-sysfs.o | |||
15 | 15 | ||
16 | # Keep the list ordered. | 16 | # Keep the list ordered. |
17 | 17 | ||
18 | obj-$(CONFIG_RTC_DRV_88PM860X) += rtc-88pm860x.o | ||
18 | obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o | 19 | obj-$(CONFIG_RTC_DRV_AB3100) += rtc-ab3100.o |
19 | obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o | 20 | obj-$(CONFIG_RTC_DRV_AB8500) += rtc-ab8500.o |
20 | obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o | 21 | obj-$(CONFIG_RTC_DRV_AT32AP700X)+= rtc-at32ap700x.o |
@@ -43,6 +44,7 @@ obj-$(CONFIG_RTC_DRV_DS1742) += rtc-ds1742.o | |||
43 | obj-$(CONFIG_RTC_DRV_DS3232) += rtc-ds3232.o | 44 | obj-$(CONFIG_RTC_DRV_DS3232) += rtc-ds3232.o |
44 | obj-$(CONFIG_RTC_DRV_DS3234) += rtc-ds3234.o | 45 | obj-$(CONFIG_RTC_DRV_DS3234) += rtc-ds3234.o |
45 | obj-$(CONFIG_RTC_DRV_EFI) += rtc-efi.o | 46 | obj-$(CONFIG_RTC_DRV_EFI) += rtc-efi.o |
47 | obj-$(CONFIG_RTC_DRV_EM3027) += rtc-em3027.o | ||
46 | obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o | 48 | obj-$(CONFIG_RTC_DRV_EP93XX) += rtc-ep93xx.o |
47 | obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o | 49 | obj-$(CONFIG_RTC_DRV_FM3130) += rtc-fm3130.o |
48 | obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o | 50 | obj-$(CONFIG_RTC_DRV_GENERIC) += rtc-generic.o |
@@ -52,6 +54,7 @@ obj-$(CONFIG_RTC_DRV_ISL12022) += rtc-isl12022.o | |||
52 | obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o | 54 | obj-$(CONFIG_RTC_DRV_JZ4740) += rtc-jz4740.o |
53 | obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o | 55 | obj-$(CONFIG_RTC_DRV_LPC32XX) += rtc-lpc32xx.o |
54 | obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o | 56 | obj-$(CONFIG_RTC_DRV_M41T80) += rtc-m41t80.o |
57 | obj-$(CONFIG_RTC_DRV_M41T93) += rtc-m41t93.o | ||
55 | obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o | 58 | obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o |
56 | obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o | 59 | obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o |
57 | obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o | 60 | obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o |
@@ -81,12 +84,14 @@ obj-$(CONFIG_RTC_DRV_RP5C01) += rtc-rp5c01.o | |||
81 | obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o | 84 | obj-$(CONFIG_RTC_DRV_RS5C313) += rtc-rs5c313.o |
82 | obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o | 85 | obj-$(CONFIG_RTC_DRV_RS5C348) += rtc-rs5c348.o |
83 | obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o | 86 | obj-$(CONFIG_RTC_DRV_RS5C372) += rtc-rs5c372.o |
87 | obj-$(CONFIG_RTC_DRV_RV3029C2) += rtc-rv3029c2.o | ||
84 | obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o | 88 | obj-$(CONFIG_RTC_DRV_RX8025) += rtc-rx8025.o |
85 | obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o | 89 | obj-$(CONFIG_RTC_DRV_RX8581) += rtc-rx8581.o |
86 | obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o | 90 | obj-$(CONFIG_RTC_DRV_S35390A) += rtc-s35390a.o |
87 | obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o | 91 | obj-$(CONFIG_RTC_DRV_S3C) += rtc-s3c.o |
88 | obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o | 92 | obj-$(CONFIG_RTC_DRV_SA1100) += rtc-sa1100.o |
89 | obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o | 93 | obj-$(CONFIG_RTC_DRV_SH) += rtc-sh.o |
94 | obj-$(CONFIG_RTC_DRV_SPEAR) += rtc-spear.o | ||
90 | obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o | 95 | obj-$(CONFIG_RTC_DRV_STARFIRE) += rtc-starfire.o |
91 | obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o | 96 | obj-$(CONFIG_RTC_DRV_STK17TA8) += rtc-stk17ta8.o |
92 | obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o | 97 | obj-$(CONFIG_RTC_DRV_STMP) += rtc-stmp3xxx.o |
@@ -98,6 +103,7 @@ obj-$(CONFIG_RTC_DRV_TWL4030) += rtc-twl.o | |||
98 | obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o | 103 | obj-$(CONFIG_RTC_DRV_TX4939) += rtc-tx4939.o |
99 | obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o | 104 | obj-$(CONFIG_RTC_DRV_V3020) += rtc-v3020.o |
100 | obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o | 105 | obj-$(CONFIG_RTC_DRV_VR41XX) += rtc-vr41xx.o |
106 | obj-$(CONFIG_RTC_DRV_VT8500) += rtc-vt8500.o | ||
101 | obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o | 107 | obj-$(CONFIG_RTC_DRV_WM831X) += rtc-wm831x.o |
102 | obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o | 108 | obj-$(CONFIG_RTC_DRV_WM8350) += rtc-wm8350.o |
103 | obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o | 109 | obj-$(CONFIG_RTC_DRV_X1205) += rtc-x1205.o |
diff --git a/drivers/rtc/rtc-88pm860x.c b/drivers/rtc/rtc-88pm860x.c new file mode 100644 index 000000000000..64b847b7f970 --- /dev/null +++ b/drivers/rtc/rtc-88pm860x.c | |||
@@ -0,0 +1,427 @@ | |||
1 | /* | ||
2 | * Real Time Clock driver for Marvell 88PM860x PMIC | ||
3 | * | ||
4 | * Copyright (c) 2010 Marvell International Ltd. | ||
5 | * Author: Haojian Zhuang <haojian.zhuang@marvell.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/module.h> | ||
14 | #include <linux/platform_device.h> | ||
15 | #include <linux/slab.h> | ||
16 | #include <linux/mutex.h> | ||
17 | #include <linux/rtc.h> | ||
18 | #include <linux/delay.h> | ||
19 | #include <linux/mfd/core.h> | ||
20 | #include <linux/mfd/88pm860x.h> | ||
21 | |||
22 | #define VRTC_CALIBRATION | ||
23 | |||
24 | struct pm860x_rtc_info { | ||
25 | struct pm860x_chip *chip; | ||
26 | struct i2c_client *i2c; | ||
27 | struct rtc_device *rtc_dev; | ||
28 | struct device *dev; | ||
29 | struct delayed_work calib_work; | ||
30 | |||
31 | int irq; | ||
32 | int vrtc; | ||
33 | int (*sync)(unsigned int ticks); | ||
34 | }; | ||
35 | |||
36 | #define REG_VRTC_MEAS1 0x7D | ||
37 | |||
38 | #define REG0_ADDR 0xB0 | ||
39 | #define REG1_ADDR 0xB2 | ||
40 | #define REG2_ADDR 0xB4 | ||
41 | #define REG3_ADDR 0xB6 | ||
42 | |||
43 | #define REG0_DATA 0xB1 | ||
44 | #define REG1_DATA 0xB3 | ||
45 | #define REG2_DATA 0xB5 | ||
46 | #define REG3_DATA 0xB7 | ||
47 | |||
48 | /* bit definitions of Measurement Enable Register 2 (0x51) */ | ||
49 | #define MEAS2_VRTC (1 << 0) | ||
50 | |||
51 | /* bit definitions of RTC Register 1 (0xA0) */ | ||
52 | #define ALARM_EN (1 << 3) | ||
53 | #define ALARM_WAKEUP (1 << 4) | ||
54 | #define ALARM (1 << 5) | ||
55 | #define RTC1_USE_XO (1 << 7) | ||
56 | |||
57 | #define VRTC_CALIB_INTERVAL (HZ * 60 * 10) /* 10 minutes */ | ||
58 | |||
59 | static irqreturn_t rtc_update_handler(int irq, void *data) | ||
60 | { | ||
61 | struct pm860x_rtc_info *info = (struct pm860x_rtc_info *)data; | ||
62 | int mask; | ||
63 | |||
64 | mask = ALARM | ALARM_WAKEUP; | ||
65 | pm860x_set_bits(info->i2c, PM8607_RTC1, mask | ALARM_EN, mask); | ||
66 | rtc_update_irq(info->rtc_dev, 1, RTC_AF); | ||
67 | return IRQ_HANDLED; | ||
68 | } | ||
69 | |||
70 | static int pm860x_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
71 | { | ||
72 | struct pm860x_rtc_info *info = dev_get_drvdata(dev); | ||
73 | |||
74 | if (enabled) | ||
75 | pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM, ALARM); | ||
76 | else | ||
77 | pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM, 0); | ||
78 | return 0; | ||
79 | } | ||
80 | |||
81 | /* | ||
82 | * Calculate the next alarm time given the requested alarm time mask | ||
83 | * and the current time. | ||
84 | */ | ||
85 | static void rtc_next_alarm_time(struct rtc_time *next, struct rtc_time *now, | ||
86 | struct rtc_time *alrm) | ||
87 | { | ||
88 | unsigned long next_time; | ||
89 | unsigned long now_time; | ||
90 | |||
91 | next->tm_year = now->tm_year; | ||
92 | next->tm_mon = now->tm_mon; | ||
93 | next->tm_mday = now->tm_mday; | ||
94 | next->tm_hour = alrm->tm_hour; | ||
95 | next->tm_min = alrm->tm_min; | ||
96 | next->tm_sec = alrm->tm_sec; | ||
97 | |||
98 | rtc_tm_to_time(now, &now_time); | ||
99 | rtc_tm_to_time(next, &next_time); | ||
100 | |||
101 | if (next_time < now_time) { | ||
102 | /* Advance one day */ | ||
103 | next_time += 60 * 60 * 24; | ||
104 | rtc_time_to_tm(next_time, next); | ||
105 | } | ||
106 | } | ||
107 | |||
108 | static int pm860x_rtc_read_time(struct device *dev, struct rtc_time *tm) | ||
109 | { | ||
110 | struct pm860x_rtc_info *info = dev_get_drvdata(dev); | ||
111 | unsigned char buf[8]; | ||
112 | unsigned long ticks, base, data; | ||
113 | |||
114 | pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf); | ||
115 | dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1], | ||
116 | buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); | ||
117 | base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7]; | ||
118 | |||
119 | /* load 32-bit read-only counter */ | ||
120 | pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); | ||
121 | data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; | ||
122 | ticks = base + data; | ||
123 | dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", | ||
124 | base, data, ticks); | ||
125 | |||
126 | rtc_time_to_tm(ticks, tm); | ||
127 | |||
128 | return 0; | ||
129 | } | ||
130 | |||
131 | static int pm860x_rtc_set_time(struct device *dev, struct rtc_time *tm) | ||
132 | { | ||
133 | struct pm860x_rtc_info *info = dev_get_drvdata(dev); | ||
134 | unsigned char buf[4]; | ||
135 | unsigned long ticks, base, data; | ||
136 | |||
137 | if ((tm->tm_year < 70) || (tm->tm_year > 138)) { | ||
138 | dev_dbg(info->dev, "Set time %d out of range. " | ||
139 | "Please set time between 1970 to 2038.\n", | ||
140 | 1900 + tm->tm_year); | ||
141 | return -EINVAL; | ||
142 | } | ||
143 | rtc_tm_to_time(tm, &ticks); | ||
144 | |||
145 | /* load 32-bit read-only counter */ | ||
146 | pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); | ||
147 | data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; | ||
148 | base = ticks - data; | ||
149 | dev_dbg(info->dev, "set base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", | ||
150 | base, data, ticks); | ||
151 | |||
152 | pm860x_page_reg_write(info->i2c, REG0_DATA, (base >> 24) & 0xFF); | ||
153 | pm860x_page_reg_write(info->i2c, REG1_DATA, (base >> 16) & 0xFF); | ||
154 | pm860x_page_reg_write(info->i2c, REG2_DATA, (base >> 8) & 0xFF); | ||
155 | pm860x_page_reg_write(info->i2c, REG3_DATA, base & 0xFF); | ||
156 | |||
157 | if (info->sync) | ||
158 | info->sync(ticks); | ||
159 | return 0; | ||
160 | } | ||
161 | |||
162 | static int pm860x_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) | ||
163 | { | ||
164 | struct pm860x_rtc_info *info = dev_get_drvdata(dev); | ||
165 | unsigned char buf[8]; | ||
166 | unsigned long ticks, base, data; | ||
167 | int ret; | ||
168 | |||
169 | pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf); | ||
170 | dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1], | ||
171 | buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); | ||
172 | base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7]; | ||
173 | |||
174 | pm860x_bulk_read(info->i2c, PM8607_RTC_EXPIRE1, 4, buf); | ||
175 | data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; | ||
176 | ticks = base + data; | ||
177 | dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", | ||
178 | base, data, ticks); | ||
179 | |||
180 | rtc_time_to_tm(ticks, &alrm->time); | ||
181 | ret = pm860x_reg_read(info->i2c, PM8607_RTC1); | ||
182 | alrm->enabled = (ret & ALARM_EN) ? 1 : 0; | ||
183 | alrm->pending = (ret & (ALARM | ALARM_WAKEUP)) ? 1 : 0; | ||
184 | return 0; | ||
185 | } | ||
186 | |||
187 | static int pm860x_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) | ||
188 | { | ||
189 | struct pm860x_rtc_info *info = dev_get_drvdata(dev); | ||
190 | struct rtc_time now_tm, alarm_tm; | ||
191 | unsigned long ticks, base, data; | ||
192 | unsigned char buf[8]; | ||
193 | int mask; | ||
194 | |||
195 | pm860x_set_bits(info->i2c, PM8607_RTC1, ALARM_EN, 0); | ||
196 | |||
197 | pm860x_page_bulk_read(info->i2c, REG0_ADDR, 8, buf); | ||
198 | dev_dbg(info->dev, "%x-%x-%x-%x-%x-%x-%x-%x\n", buf[0], buf[1], | ||
199 | buf[2], buf[3], buf[4], buf[5], buf[6], buf[7]); | ||
200 | base = (buf[1] << 24) | (buf[3] << 16) | (buf[5] << 8) | buf[7]; | ||
201 | |||
202 | /* load 32-bit read-only counter */ | ||
203 | pm860x_bulk_read(info->i2c, PM8607_RTC_COUNTER1, 4, buf); | ||
204 | data = (buf[3] << 24) | (buf[2] << 16) | (buf[1] << 8) | buf[0]; | ||
205 | ticks = base + data; | ||
206 | dev_dbg(info->dev, "get base:0x%lx, RO count:0x%lx, ticks:0x%lx\n", | ||
207 | base, data, ticks); | ||
208 | |||
209 | rtc_time_to_tm(ticks, &now_tm); | ||
210 | rtc_next_alarm_time(&alarm_tm, &now_tm, &alrm->time); | ||
211 | /* get new ticks for alarm in 24 hours */ | ||
212 | rtc_tm_to_time(&alarm_tm, &ticks); | ||
213 | data = ticks - base; | ||
214 | |||
215 | buf[0] = data & 0xff; | ||
216 | buf[1] = (data >> 8) & 0xff; | ||
217 | buf[2] = (data >> 16) & 0xff; | ||
218 | buf[3] = (data >> 24) & 0xff; | ||
219 | pm860x_bulk_write(info->i2c, PM8607_RTC_EXPIRE1, 4, buf); | ||
220 | if (alrm->enabled) { | ||
221 | mask = ALARM | ALARM_WAKEUP | ALARM_EN; | ||
222 | pm860x_set_bits(info->i2c, PM8607_RTC1, mask, mask); | ||
223 | } else { | ||
224 | mask = ALARM | ALARM_WAKEUP | ALARM_EN; | ||
225 | pm860x_set_bits(info->i2c, PM8607_RTC1, mask, | ||
226 | ALARM | ALARM_WAKEUP); | ||
227 | } | ||
228 | return 0; | ||
229 | } | ||
230 | |||
231 | static const struct rtc_class_ops pm860x_rtc_ops = { | ||
232 | .read_time = pm860x_rtc_read_time, | ||
233 | .set_time = pm860x_rtc_set_time, | ||
234 | .read_alarm = pm860x_rtc_read_alarm, | ||
235 | .set_alarm = pm860x_rtc_set_alarm, | ||
236 | .alarm_irq_enable = pm860x_rtc_alarm_irq_enable, | ||
237 | }; | ||
238 | |||
239 | #ifdef VRTC_CALIBRATION | ||
240 | static void calibrate_vrtc_work(struct work_struct *work) | ||
241 | { | ||
242 | struct pm860x_rtc_info *info = container_of(work, | ||
243 | struct pm860x_rtc_info, calib_work.work); | ||
244 | unsigned char buf[2]; | ||
245 | unsigned int sum, data, mean, vrtc_set; | ||
246 | int i; | ||
247 | |||
248 | for (i = 0, sum = 0; i < 16; i++) { | ||
249 | msleep(100); | ||
250 | pm860x_bulk_read(info->i2c, REG_VRTC_MEAS1, 2, buf); | ||
251 | data = (buf[0] << 4) | buf[1]; | ||
252 | data = (data * 5400) >> 12; /* convert to mv */ | ||
253 | sum += data; | ||
254 | } | ||
255 | mean = sum >> 4; | ||
256 | vrtc_set = 2700 + (info->vrtc & 0x3) * 200; | ||
257 | dev_dbg(info->dev, "mean:%d, vrtc_set:%d\n", mean, vrtc_set); | ||
258 | |||
259 | sum = pm860x_reg_read(info->i2c, PM8607_RTC_MISC1); | ||
260 | data = sum & 0x3; | ||
261 | if ((mean + 200) < vrtc_set) { | ||
262 | /* try higher voltage */ | ||
263 | if (++data == 4) | ||
264 | goto out; | ||
265 | data = (sum & 0xf8) | (data & 0x3); | ||
266 | pm860x_reg_write(info->i2c, PM8607_RTC_MISC1, data); | ||
267 | } else if ((mean - 200) > vrtc_set) { | ||
268 | /* try lower voltage */ | ||
269 | if (data-- == 0) | ||
270 | goto out; | ||
271 | data = (sum & 0xf8) | (data & 0x3); | ||
272 | pm860x_reg_write(info->i2c, PM8607_RTC_MISC1, data); | ||
273 | } else | ||
274 | goto out; | ||
275 | dev_dbg(info->dev, "set 0x%x to RTC_MISC1\n", data); | ||
276 | /* trigger next calibration since VRTC is updated */ | ||
277 | schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL); | ||
278 | return; | ||
279 | out: | ||
280 | /* disable measurement */ | ||
281 | pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0); | ||
282 | dev_dbg(info->dev, "finish VRTC calibration\n"); | ||
283 | return; | ||
284 | } | ||
285 | #endif | ||
286 | |||
287 | static int __devinit pm860x_rtc_probe(struct platform_device *pdev) | ||
288 | { | ||
289 | struct pm860x_chip *chip = dev_get_drvdata(pdev->dev.parent); | ||
290 | struct pm860x_rtc_pdata *pdata = NULL; | ||
291 | struct pm860x_rtc_info *info; | ||
292 | struct rtc_time tm; | ||
293 | unsigned long ticks = 0; | ||
294 | int ret; | ||
295 | |||
296 | pdata = pdev->dev.platform_data; | ||
297 | if (pdata == NULL) | ||
298 | dev_warn(&pdev->dev, "No platform data!\n"); | ||
299 | |||
300 | info = kzalloc(sizeof(struct pm860x_rtc_info), GFP_KERNEL); | ||
301 | if (!info) | ||
302 | return -ENOMEM; | ||
303 | info->irq = platform_get_irq(pdev, 0); | ||
304 | if (info->irq < 0) { | ||
305 | dev_err(&pdev->dev, "No IRQ resource!\n"); | ||
306 | ret = -EINVAL; | ||
307 | goto out; | ||
308 | } | ||
309 | |||
310 | info->chip = chip; | ||
311 | info->i2c = (chip->id == CHIP_PM8607) ? chip->client : chip->companion; | ||
312 | info->dev = &pdev->dev; | ||
313 | dev_set_drvdata(&pdev->dev, info); | ||
314 | |||
315 | ret = request_threaded_irq(info->irq, NULL, rtc_update_handler, | ||
316 | IRQF_ONESHOT, "rtc", info); | ||
317 | if (ret < 0) { | ||
318 | dev_err(chip->dev, "Failed to request IRQ: #%d: %d\n", | ||
319 | info->irq, ret); | ||
320 | goto out; | ||
321 | } | ||
322 | |||
323 | /* set addresses of 32-bit base value for RTC time */ | ||
324 | pm860x_page_reg_write(info->i2c, REG0_ADDR, REG0_DATA); | ||
325 | pm860x_page_reg_write(info->i2c, REG1_ADDR, REG1_DATA); | ||
326 | pm860x_page_reg_write(info->i2c, REG2_ADDR, REG2_DATA); | ||
327 | pm860x_page_reg_write(info->i2c, REG3_ADDR, REG3_DATA); | ||
328 | |||
329 | ret = pm860x_rtc_read_time(&pdev->dev, &tm); | ||
330 | if (ret < 0) { | ||
331 | dev_err(&pdev->dev, "Failed to read initial time.\n"); | ||
332 | goto out_rtc; | ||
333 | } | ||
334 | if ((tm.tm_year < 70) || (tm.tm_year > 138)) { | ||
335 | tm.tm_year = 70; | ||
336 | tm.tm_mon = 0; | ||
337 | tm.tm_mday = 1; | ||
338 | tm.tm_hour = 0; | ||
339 | tm.tm_min = 0; | ||
340 | tm.tm_sec = 0; | ||
341 | ret = pm860x_rtc_set_time(&pdev->dev, &tm); | ||
342 | if (ret < 0) { | ||
343 | dev_err(&pdev->dev, "Failed to set initial time.\n"); | ||
344 | goto out_rtc; | ||
345 | } | ||
346 | } | ||
347 | rtc_tm_to_time(&tm, &ticks); | ||
348 | if (pdata && pdata->sync) { | ||
349 | pdata->sync(ticks); | ||
350 | info->sync = pdata->sync; | ||
351 | } | ||
352 | |||
353 | info->rtc_dev = rtc_device_register("88pm860x-rtc", &pdev->dev, | ||
354 | &pm860x_rtc_ops, THIS_MODULE); | ||
355 | ret = PTR_ERR(info->rtc_dev); | ||
356 | if (IS_ERR(info->rtc_dev)) { | ||
357 | dev_err(&pdev->dev, "Failed to register RTC device: %d\n", ret); | ||
358 | goto out_rtc; | ||
359 | } | ||
360 | |||
361 | /* | ||
362 | * enable internal XO instead of internal 3.25MHz clock since it can | ||
363 | * free running in PMIC power-down state. | ||
364 | */ | ||
365 | pm860x_set_bits(info->i2c, PM8607_RTC1, RTC1_USE_XO, RTC1_USE_XO); | ||
366 | |||
367 | #ifdef VRTC_CALIBRATION | ||
368 | /* <00> -- 2.7V, <01> -- 2.9V, <10> -- 3.1V, <11> -- 3.3V */ | ||
369 | if (pdata && pdata->vrtc) | ||
370 | info->vrtc = pdata->vrtc & 0x3; | ||
371 | else | ||
372 | info->vrtc = 1; | ||
373 | pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, MEAS2_VRTC); | ||
374 | |||
375 | /* calibrate VRTC */ | ||
376 | INIT_DELAYED_WORK(&info->calib_work, calibrate_vrtc_work); | ||
377 | schedule_delayed_work(&info->calib_work, VRTC_CALIB_INTERVAL); | ||
378 | #endif /* VRTC_CALIBRATION */ | ||
379 | return 0; | ||
380 | out_rtc: | ||
381 | free_irq(info->irq, info); | ||
382 | out: | ||
383 | kfree(info); | ||
384 | return ret; | ||
385 | } | ||
386 | |||
387 | static int __devexit pm860x_rtc_remove(struct platform_device *pdev) | ||
388 | { | ||
389 | struct pm860x_rtc_info *info = platform_get_drvdata(pdev); | ||
390 | |||
391 | #ifdef VRTC_CALIBRATION | ||
392 | flush_scheduled_work(); | ||
393 | /* disable measurement */ | ||
394 | pm860x_set_bits(info->i2c, PM8607_MEAS_EN2, MEAS2_VRTC, 0); | ||
395 | #endif /* VRTC_CALIBRATION */ | ||
396 | |||
397 | platform_set_drvdata(pdev, NULL); | ||
398 | rtc_device_unregister(info->rtc_dev); | ||
399 | free_irq(info->irq, info); | ||
400 | kfree(info); | ||
401 | return 0; | ||
402 | } | ||
403 | |||
404 | static struct platform_driver pm860x_rtc_driver = { | ||
405 | .driver = { | ||
406 | .name = "88pm860x-rtc", | ||
407 | .owner = THIS_MODULE, | ||
408 | }, | ||
409 | .probe = pm860x_rtc_probe, | ||
410 | .remove = __devexit_p(pm860x_rtc_remove), | ||
411 | }; | ||
412 | |||
413 | static int __init pm860x_rtc_init(void) | ||
414 | { | ||
415 | return platform_driver_register(&pm860x_rtc_driver); | ||
416 | } | ||
417 | module_init(pm860x_rtc_init); | ||
418 | |||
419 | static void __exit pm860x_rtc_exit(void) | ||
420 | { | ||
421 | platform_driver_unregister(&pm860x_rtc_driver); | ||
422 | } | ||
423 | module_exit(pm860x_rtc_exit); | ||
424 | |||
425 | MODULE_DESCRIPTION("Marvell 88PM860x RTC driver"); | ||
426 | MODULE_AUTHOR("Haojian Zhuang <haojian.zhuang@marvell.com>"); | ||
427 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/rtc/rtc-em3027.c b/drivers/rtc/rtc-em3027.c new file mode 100644 index 000000000000..d8e1c2578553 --- /dev/null +++ b/drivers/rtc/rtc-em3027.c | |||
@@ -0,0 +1,161 @@ | |||
1 | /* | ||
2 | * An rtc/i2c driver for the EM Microelectronic EM3027 | ||
3 | * Copyright 2011 CompuLab, Ltd. | ||
4 | * | ||
5 | * Author: Mike Rapoport <mike@compulab.co.il> | ||
6 | * | ||
7 | * Based on rtc-ds1672.c by Alessandro Zummo <a.zummo@towertech.it> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify | ||
10 | * it under the terms of the GNU General Public License version 2 as | ||
11 | * published by the Free Software Foundation. | ||
12 | */ | ||
13 | |||
14 | #include <linux/i2c.h> | ||
15 | #include <linux/rtc.h> | ||
16 | #include <linux/bcd.h> | ||
17 | |||
18 | /* Registers */ | ||
19 | #define EM3027_REG_ON_OFF_CTRL 0x00 | ||
20 | #define EM3027_REG_IRQ_CTRL 0x01 | ||
21 | #define EM3027_REG_IRQ_FLAGS 0x02 | ||
22 | #define EM3027_REG_STATUS 0x03 | ||
23 | #define EM3027_REG_RST_CTRL 0x04 | ||
24 | |||
25 | #define EM3027_REG_WATCH_SEC 0x08 | ||
26 | #define EM3027_REG_WATCH_MIN 0x09 | ||
27 | #define EM3027_REG_WATCH_HOUR 0x0a | ||
28 | #define EM3027_REG_WATCH_DATE 0x0b | ||
29 | #define EM3027_REG_WATCH_DAY 0x0c | ||
30 | #define EM3027_REG_WATCH_MON 0x0d | ||
31 | #define EM3027_REG_WATCH_YEAR 0x0e | ||
32 | |||
33 | #define EM3027_REG_ALARM_SEC 0x10 | ||
34 | #define EM3027_REG_ALARM_MIN 0x11 | ||
35 | #define EM3027_REG_ALARM_HOUR 0x12 | ||
36 | #define EM3027_REG_ALARM_DATE 0x13 | ||
37 | #define EM3027_REG_ALARM_DAY 0x14 | ||
38 | #define EM3027_REG_ALARM_MON 0x15 | ||
39 | #define EM3027_REG_ALARM_YEAR 0x16 | ||
40 | |||
41 | static struct i2c_driver em3027_driver; | ||
42 | |||
43 | static int em3027_get_time(struct device *dev, struct rtc_time *tm) | ||
44 | { | ||
45 | struct i2c_client *client = to_i2c_client(dev); | ||
46 | |||
47 | unsigned char addr = EM3027_REG_WATCH_SEC; | ||
48 | unsigned char buf[7]; | ||
49 | |||
50 | struct i2c_msg msgs[] = { | ||
51 | {client->addr, 0, 1, &addr}, /* setup read addr */ | ||
52 | {client->addr, I2C_M_RD, 7, buf}, /* read time/date */ | ||
53 | }; | ||
54 | |||
55 | /* read time/date registers */ | ||
56 | if ((i2c_transfer(client->adapter, &msgs[0], 2)) != 2) { | ||
57 | dev_err(&client->dev, "%s: read error\n", __func__); | ||
58 | return -EIO; | ||
59 | } | ||
60 | |||
61 | tm->tm_sec = bcd2bin(buf[0]); | ||
62 | tm->tm_min = bcd2bin(buf[1]); | ||
63 | tm->tm_hour = bcd2bin(buf[2]); | ||
64 | tm->tm_mday = bcd2bin(buf[3]); | ||
65 | tm->tm_wday = bcd2bin(buf[4]); | ||
66 | tm->tm_mon = bcd2bin(buf[5]); | ||
67 | tm->tm_year = bcd2bin(buf[6]) + 100; | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static int em3027_set_time(struct device *dev, struct rtc_time *tm) | ||
73 | { | ||
74 | struct i2c_client *client = to_i2c_client(dev); | ||
75 | unsigned char buf[8]; | ||
76 | |||
77 | struct i2c_msg msg = { | ||
78 | client->addr, 0, 8, buf, /* write time/date */ | ||
79 | }; | ||
80 | |||
81 | buf[0] = EM3027_REG_WATCH_SEC; | ||
82 | buf[1] = bin2bcd(tm->tm_sec); | ||
83 | buf[2] = bin2bcd(tm->tm_min); | ||
84 | buf[3] = bin2bcd(tm->tm_hour); | ||
85 | buf[4] = bin2bcd(tm->tm_mday); | ||
86 | buf[5] = bin2bcd(tm->tm_wday); | ||
87 | buf[6] = bin2bcd(tm->tm_mon); | ||
88 | buf[7] = bin2bcd(tm->tm_year % 100); | ||
89 | |||
90 | /* write time/date registers */ | ||
91 | if ((i2c_transfer(client->adapter, &msg, 1)) != 1) { | ||
92 | dev_err(&client->dev, "%s: write error\n", __func__); | ||
93 | return -EIO; | ||
94 | } | ||
95 | |||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static const struct rtc_class_ops em3027_rtc_ops = { | ||
100 | .read_time = em3027_get_time, | ||
101 | .set_time = em3027_set_time, | ||
102 | }; | ||
103 | |||
104 | static int em3027_probe(struct i2c_client *client, | ||
105 | const struct i2c_device_id *id) | ||
106 | { | ||
107 | struct rtc_device *rtc; | ||
108 | |||
109 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) | ||
110 | return -ENODEV; | ||
111 | |||
112 | rtc = rtc_device_register(em3027_driver.driver.name, &client->dev, | ||
113 | &em3027_rtc_ops, THIS_MODULE); | ||
114 | if (IS_ERR(rtc)) | ||
115 | return PTR_ERR(rtc); | ||
116 | |||
117 | i2c_set_clientdata(client, rtc); | ||
118 | |||
119 | return 0; | ||
120 | } | ||
121 | |||
122 | static int em3027_remove(struct i2c_client *client) | ||
123 | { | ||
124 | struct rtc_device *rtc = i2c_get_clientdata(client); | ||
125 | |||
126 | if (rtc) | ||
127 | rtc_device_unregister(rtc); | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static struct i2c_device_id em3027_id[] = { | ||
133 | { "em3027", 0 }, | ||
134 | { } | ||
135 | }; | ||
136 | |||
137 | static struct i2c_driver em3027_driver = { | ||
138 | .driver = { | ||
139 | .name = "rtc-em3027", | ||
140 | }, | ||
141 | .probe = &em3027_probe, | ||
142 | .remove = &em3027_remove, | ||
143 | .id_table = em3027_id, | ||
144 | }; | ||
145 | |||
146 | static int __init em3027_init(void) | ||
147 | { | ||
148 | return i2c_add_driver(&em3027_driver); | ||
149 | } | ||
150 | |||
151 | static void __exit em3027_exit(void) | ||
152 | { | ||
153 | i2c_del_driver(&em3027_driver); | ||
154 | } | ||
155 | |||
156 | MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>"); | ||
157 | MODULE_DESCRIPTION("EM Microelectronic EM3027 RTC driver"); | ||
158 | MODULE_LICENSE("GPL"); | ||
159 | |||
160 | module_init(em3027_init); | ||
161 | module_exit(em3027_exit); | ||
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c new file mode 100644 index 000000000000..1a84b3e227d1 --- /dev/null +++ b/drivers/rtc/rtc-m41t93.c | |||
@@ -0,0 +1,225 @@ | |||
1 | /* | ||
2 | * | ||
3 | * Driver for ST M41T93 SPI RTC | ||
4 | * | ||
5 | * (c) 2010 Nikolaus Voss, Weinmann Medical GmbH | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License version 2 as | ||
9 | * published by the Free Software Foundation. | ||
10 | */ | ||
11 | |||
12 | #include <linux/bcd.h> | ||
13 | #include <linux/kernel.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/platform_device.h> | ||
16 | #include <linux/rtc.h> | ||
17 | #include <linux/spi/spi.h> | ||
18 | |||
19 | #define M41T93_REG_SSEC 0 | ||
20 | #define M41T93_REG_ST_SEC 1 | ||
21 | #define M41T93_REG_MIN 2 | ||
22 | #define M41T93_REG_CENT_HOUR 3 | ||
23 | #define M41T93_REG_WDAY 4 | ||
24 | #define M41T93_REG_DAY 5 | ||
25 | #define M41T93_REG_MON 6 | ||
26 | #define M41T93_REG_YEAR 7 | ||
27 | |||
28 | |||
29 | #define M41T93_REG_ALM_HOUR_HT 0xc | ||
30 | #define M41T93_REG_FLAGS 0xf | ||
31 | |||
32 | #define M41T93_FLAG_ST (1 << 7) | ||
33 | #define M41T93_FLAG_OF (1 << 2) | ||
34 | #define M41T93_FLAG_BL (1 << 4) | ||
35 | #define M41T93_FLAG_HT (1 << 6) | ||
36 | |||
37 | static inline int m41t93_set_reg(struct spi_device *spi, u8 addr, u8 data) | ||
38 | { | ||
39 | u8 buf[2]; | ||
40 | |||
41 | /* MSB must be '1' to write */ | ||
42 | buf[0] = addr | 0x80; | ||
43 | buf[1] = data; | ||
44 | |||
45 | return spi_write(spi, buf, sizeof(buf)); | ||
46 | } | ||
47 | |||
48 | static int m41t93_set_time(struct device *dev, struct rtc_time *tm) | ||
49 | { | ||
50 | struct spi_device *spi = to_spi_device(dev); | ||
51 | u8 buf[9] = {0x80}; /* write cmd + 8 data bytes */ | ||
52 | u8 * const data = &buf[1]; /* ptr to first data byte */ | ||
53 | |||
54 | dev_dbg(dev, "%s secs=%d, mins=%d, " | ||
55 | "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n", | ||
56 | "write", tm->tm_sec, tm->tm_min, | ||
57 | tm->tm_hour, tm->tm_mday, | ||
58 | tm->tm_mon, tm->tm_year, tm->tm_wday); | ||
59 | |||
60 | if (tm->tm_year < 100) { | ||
61 | dev_warn(&spi->dev, "unsupported date (before 2000-01-01).\n"); | ||
62 | return -EINVAL; | ||
63 | } | ||
64 | |||
65 | data[M41T93_REG_SSEC] = 0; | ||
66 | data[M41T93_REG_ST_SEC] = bin2bcd(tm->tm_sec); | ||
67 | data[M41T93_REG_MIN] = bin2bcd(tm->tm_min); | ||
68 | data[M41T93_REG_CENT_HOUR] = bin2bcd(tm->tm_hour) | | ||
69 | ((tm->tm_year/100-1) << 6); | ||
70 | data[M41T93_REG_DAY] = bin2bcd(tm->tm_mday); | ||
71 | data[M41T93_REG_WDAY] = bin2bcd(tm->tm_wday + 1); | ||
72 | data[M41T93_REG_MON] = bin2bcd(tm->tm_mon + 1); | ||
73 | data[M41T93_REG_YEAR] = bin2bcd(tm->tm_year % 100); | ||
74 | |||
75 | return spi_write(spi, buf, sizeof(buf)); | ||
76 | } | ||
77 | |||
78 | |||
79 | static int m41t93_get_time(struct device *dev, struct rtc_time *tm) | ||
80 | { | ||
81 | struct spi_device *spi = to_spi_device(dev); | ||
82 | const u8 start_addr = 0; | ||
83 | u8 buf[8]; | ||
84 | int century_after_1900; | ||
85 | int tmp; | ||
86 | int ret = 0; | ||
87 | |||
88 | /* Check status of clock. Two states must be considered: | ||
89 | 1. halt bit (HT) is set: the clock is running but update of readout | ||
90 | registers has been disabled due to power failure. This is normal | ||
91 | case after poweron. Time is valid after resetting HT bit. | ||
92 | 2. oscillator fail bit (OF) is set. Oscillator has be stopped and | ||
93 | time is invalid: | ||
94 | a) OF can be immeditely reset. | ||
95 | b) OF cannot be immediately reset: oscillator has to be restarted. | ||
96 | */ | ||
97 | tmp = spi_w8r8(spi, M41T93_REG_ALM_HOUR_HT); | ||
98 | if (tmp < 0) | ||
99 | return tmp; | ||
100 | |||
101 | if (tmp & M41T93_FLAG_HT) { | ||
102 | dev_dbg(&spi->dev, "HT bit is set, reenable clock update.\n"); | ||
103 | m41t93_set_reg(spi, M41T93_REG_ALM_HOUR_HT, | ||
104 | tmp & ~M41T93_FLAG_HT); | ||
105 | } | ||
106 | |||
107 | tmp = spi_w8r8(spi, M41T93_REG_FLAGS); | ||
108 | if (tmp < 0) | ||
109 | return tmp; | ||
110 | |||
111 | if (tmp & M41T93_FLAG_OF) { | ||
112 | ret = -EINVAL; | ||
113 | dev_warn(&spi->dev, "OF bit is set, resetting.\n"); | ||
114 | m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF); | ||
115 | |||
116 | tmp = spi_w8r8(spi, M41T93_REG_FLAGS); | ||
117 | if (tmp < 0) | ||
118 | return tmp; | ||
119 | else if (tmp & M41T93_FLAG_OF) { | ||
120 | u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST; | ||
121 | |||
122 | dev_warn(&spi->dev, | ||
123 | "OF bit is still set, kickstarting clock.\n"); | ||
124 | m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc); | ||
125 | reset_osc &= ~M41T93_FLAG_ST; | ||
126 | m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc); | ||
127 | } | ||
128 | } | ||
129 | |||
130 | if (tmp & M41T93_FLAG_BL) | ||
131 | dev_warn(&spi->dev, "BL bit is set, replace battery.\n"); | ||
132 | |||
133 | /* read actual time/date */ | ||
134 | tmp = spi_write_then_read(spi, &start_addr, 1, buf, sizeof(buf)); | ||
135 | if (tmp < 0) | ||
136 | return tmp; | ||
137 | |||
138 | tm->tm_sec = bcd2bin(buf[M41T93_REG_ST_SEC]); | ||
139 | tm->tm_min = bcd2bin(buf[M41T93_REG_MIN]); | ||
140 | tm->tm_hour = bcd2bin(buf[M41T93_REG_CENT_HOUR] & 0x3f); | ||
141 | tm->tm_mday = bcd2bin(buf[M41T93_REG_DAY]); | ||
142 | tm->tm_mon = bcd2bin(buf[M41T93_REG_MON]) - 1; | ||
143 | tm->tm_wday = bcd2bin(buf[M41T93_REG_WDAY] & 0x0f) - 1; | ||
144 | |||
145 | century_after_1900 = (buf[M41T93_REG_CENT_HOUR] >> 6) + 1; | ||
146 | tm->tm_year = bcd2bin(buf[M41T93_REG_YEAR]) + century_after_1900 * 100; | ||
147 | |||
148 | dev_dbg(dev, "%s secs=%d, mins=%d, " | ||
149 | "hours=%d, mday=%d, mon=%d, year=%d, wday=%d\n", | ||
150 | "read", tm->tm_sec, tm->tm_min, | ||
151 | tm->tm_hour, tm->tm_mday, | ||
152 | tm->tm_mon, tm->tm_year, tm->tm_wday); | ||
153 | |||
154 | return ret < 0 ? ret : rtc_valid_tm(tm); | ||
155 | } | ||
156 | |||
157 | |||
158 | static const struct rtc_class_ops m41t93_rtc_ops = { | ||
159 | .read_time = m41t93_get_time, | ||
160 | .set_time = m41t93_set_time, | ||
161 | }; | ||
162 | |||
163 | static struct spi_driver m41t93_driver; | ||
164 | |||
165 | static int __devinit m41t93_probe(struct spi_device *spi) | ||
166 | { | ||
167 | struct rtc_device *rtc; | ||
168 | int res; | ||
169 | |||
170 | spi->bits_per_word = 8; | ||
171 | spi_setup(spi); | ||
172 | |||
173 | res = spi_w8r8(spi, M41T93_REG_WDAY); | ||
174 | if (res < 0 || (res & 0xf8) != 0) { | ||
175 | dev_err(&spi->dev, "not found 0x%x.\n", res); | ||
176 | return -ENODEV; | ||
177 | } | ||
178 | |||
179 | rtc = rtc_device_register(m41t93_driver.driver.name, | ||
180 | &spi->dev, &m41t93_rtc_ops, THIS_MODULE); | ||
181 | if (IS_ERR(rtc)) | ||
182 | return PTR_ERR(rtc); | ||
183 | |||
184 | dev_set_drvdata(&spi->dev, rtc); | ||
185 | |||
186 | return 0; | ||
187 | } | ||
188 | |||
189 | |||
190 | static int __devexit m41t93_remove(struct spi_device *spi) | ||
191 | { | ||
192 | struct rtc_device *rtc = platform_get_drvdata(spi); | ||
193 | |||
194 | if (rtc) | ||
195 | rtc_device_unregister(rtc); | ||
196 | |||
197 | return 0; | ||
198 | } | ||
199 | |||
200 | static struct spi_driver m41t93_driver = { | ||
201 | .driver = { | ||
202 | .name = "rtc-m41t93", | ||
203 | .bus = &spi_bus_type, | ||
204 | .owner = THIS_MODULE, | ||
205 | }, | ||
206 | .probe = m41t93_probe, | ||
207 | .remove = __devexit_p(m41t93_remove), | ||
208 | }; | ||
209 | |||
210 | static __init int m41t93_init(void) | ||
211 | { | ||
212 | return spi_register_driver(&m41t93_driver); | ||
213 | } | ||
214 | module_init(m41t93_init); | ||
215 | |||
216 | static __exit void m41t93_exit(void) | ||
217 | { | ||
218 | spi_unregister_driver(&m41t93_driver); | ||
219 | } | ||
220 | module_exit(m41t93_exit); | ||
221 | |||
222 | MODULE_AUTHOR("Nikolaus Voss <n.voss@weinmann.de>"); | ||
223 | MODULE_DESCRIPTION("Driver for ST M41T93 SPI RTC"); | ||
224 | MODULE_LICENSE("GPL"); | ||
225 | MODULE_ALIAS("spi:rtc-m41t93"); | ||
diff --git a/drivers/rtc/rtc-mrst.c b/drivers/rtc/rtc-mrst.c index b2f096871a97..0cec5650d56a 100644 --- a/drivers/rtc/rtc-mrst.c +++ b/drivers/rtc/rtc-mrst.c | |||
@@ -380,7 +380,7 @@ cleanup1: | |||
380 | cleanup0: | 380 | cleanup0: |
381 | dev_set_drvdata(dev, NULL); | 381 | dev_set_drvdata(dev, NULL); |
382 | mrst_rtc.dev = NULL; | 382 | mrst_rtc.dev = NULL; |
383 | release_region(iomem->start, iomem->end + 1 - iomem->start); | 383 | release_mem_region(iomem->start, resource_size(iomem)); |
384 | dev_err(dev, "rtc-mrst: unable to initialise\n"); | 384 | dev_err(dev, "rtc-mrst: unable to initialise\n"); |
385 | return retval; | 385 | return retval; |
386 | } | 386 | } |
@@ -406,7 +406,7 @@ static void __devexit rtc_mrst_do_remove(struct device *dev) | |||
406 | mrst->rtc = NULL; | 406 | mrst->rtc = NULL; |
407 | 407 | ||
408 | iomem = mrst->iomem; | 408 | iomem = mrst->iomem; |
409 | release_region(iomem->start, iomem->end + 1 - iomem->start); | 409 | release_mem_region(iomem->start, resource_size(iomem)); |
410 | mrst->iomem = NULL; | 410 | mrst->iomem = NULL; |
411 | 411 | ||
412 | mrst->dev = NULL; | 412 | mrst->dev = NULL; |
diff --git a/drivers/rtc/rtc-mxc.c b/drivers/rtc/rtc-mxc.c index d814417bee8c..39e41fbdf08b 100644 --- a/drivers/rtc/rtc-mxc.c +++ b/drivers/rtc/rtc-mxc.c | |||
@@ -55,12 +55,6 @@ static const u32 PIE_BIT_DEF[MAX_PIE_NUM][2] = { | |||
55 | { MAX_PIE_FREQ, RTC_SAM7_BIT }, | 55 | { MAX_PIE_FREQ, RTC_SAM7_BIT }, |
56 | }; | 56 | }; |
57 | 57 | ||
58 | /* Those are the bits from a classic RTC we want to mimic */ | ||
59 | #define RTC_IRQF 0x80 /* any of the following 3 is active */ | ||
60 | #define RTC_PF 0x40 /* Periodic interrupt */ | ||
61 | #define RTC_AF 0x20 /* Alarm interrupt */ | ||
62 | #define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */ | ||
63 | |||
64 | #define MXC_RTC_TIME 0 | 58 | #define MXC_RTC_TIME 0 |
65 | #define MXC_RTC_ALARM 1 | 59 | #define MXC_RTC_ALARM 1 |
66 | 60 | ||
diff --git a/drivers/rtc/rtc-pcf50633.c b/drivers/rtc/rtc-pcf50633.c index f90c574f9d05..0c423892923c 100644 --- a/drivers/rtc/rtc-pcf50633.c +++ b/drivers/rtc/rtc-pcf50633.c | |||
@@ -58,7 +58,6 @@ struct pcf50633_time { | |||
58 | 58 | ||
59 | struct pcf50633_rtc { | 59 | struct pcf50633_rtc { |
60 | int alarm_enabled; | 60 | int alarm_enabled; |
61 | int second_enabled; | ||
62 | int alarm_pending; | 61 | int alarm_pending; |
63 | 62 | ||
64 | struct pcf50633 *pcf; | 63 | struct pcf50633 *pcf; |
@@ -143,7 +142,7 @@ static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
143 | { | 142 | { |
144 | struct pcf50633_rtc *rtc; | 143 | struct pcf50633_rtc *rtc; |
145 | struct pcf50633_time pcf_tm; | 144 | struct pcf50633_time pcf_tm; |
146 | int second_masked, alarm_masked, ret = 0; | 145 | int alarm_masked, ret = 0; |
147 | 146 | ||
148 | rtc = dev_get_drvdata(dev); | 147 | rtc = dev_get_drvdata(dev); |
149 | 148 | ||
@@ -162,11 +161,8 @@ static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
162 | pcf_tm.time[PCF50633_TI_SEC]); | 161 | pcf_tm.time[PCF50633_TI_SEC]); |
163 | 162 | ||
164 | 163 | ||
165 | second_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_SECOND); | ||
166 | alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM); | 164 | alarm_masked = pcf50633_irq_mask_get(rtc->pcf, PCF50633_IRQ_ALARM); |
167 | 165 | ||
168 | if (!second_masked) | ||
169 | pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_SECOND); | ||
170 | if (!alarm_masked) | 166 | if (!alarm_masked) |
171 | pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM); | 167 | pcf50633_irq_mask(rtc->pcf, PCF50633_IRQ_ALARM); |
172 | 168 | ||
@@ -175,8 +171,6 @@ static int pcf50633_rtc_set_time(struct device *dev, struct rtc_time *tm) | |||
175 | PCF50633_TI_EXTENT, | 171 | PCF50633_TI_EXTENT, |
176 | &pcf_tm.time[0]); | 172 | &pcf_tm.time[0]); |
177 | 173 | ||
178 | if (!second_masked) | ||
179 | pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_SECOND); | ||
180 | if (!alarm_masked) | 174 | if (!alarm_masked) |
181 | pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM); | 175 | pcf50633_irq_unmask(rtc->pcf, PCF50633_IRQ_ALARM); |
182 | 176 | ||
@@ -250,15 +244,8 @@ static void pcf50633_rtc_irq(int irq, void *data) | |||
250 | { | 244 | { |
251 | struct pcf50633_rtc *rtc = data; | 245 | struct pcf50633_rtc *rtc = data; |
252 | 246 | ||
253 | switch (irq) { | 247 | rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); |
254 | case PCF50633_IRQ_ALARM: | 248 | rtc->alarm_pending = 1; |
255 | rtc_update_irq(rtc->rtc_dev, 1, RTC_AF | RTC_IRQF); | ||
256 | rtc->alarm_pending = 1; | ||
257 | break; | ||
258 | case PCF50633_IRQ_SECOND: | ||
259 | rtc_update_irq(rtc->rtc_dev, 1, RTC_UF | RTC_IRQF); | ||
260 | break; | ||
261 | } | ||
262 | } | 249 | } |
263 | 250 | ||
264 | static int __devinit pcf50633_rtc_probe(struct platform_device *pdev) | 251 | static int __devinit pcf50633_rtc_probe(struct platform_device *pdev) |
@@ -282,9 +269,6 @@ static int __devinit pcf50633_rtc_probe(struct platform_device *pdev) | |||
282 | 269 | ||
283 | pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM, | 270 | pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_ALARM, |
284 | pcf50633_rtc_irq, rtc); | 271 | pcf50633_rtc_irq, rtc); |
285 | pcf50633_register_irq(rtc->pcf, PCF50633_IRQ_SECOND, | ||
286 | pcf50633_rtc_irq, rtc); | ||
287 | |||
288 | return 0; | 272 | return 0; |
289 | } | 273 | } |
290 | 274 | ||
@@ -295,7 +279,6 @@ static int __devexit pcf50633_rtc_remove(struct platform_device *pdev) | |||
295 | rtc = platform_get_drvdata(pdev); | 279 | rtc = platform_get_drvdata(pdev); |
296 | 280 | ||
297 | pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_ALARM); | 281 | pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_ALARM); |
298 | pcf50633_free_irq(rtc->pcf, PCF50633_IRQ_SECOND); | ||
299 | 282 | ||
300 | rtc_device_unregister(rtc->rtc_dev); | 283 | rtc_device_unregister(rtc->rtc_dev); |
301 | kfree(rtc); | 284 | kfree(rtc); |
diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c new file mode 100644 index 000000000000..ea09ff211dc6 --- /dev/null +++ b/drivers/rtc/rtc-rv3029c2.c | |||
@@ -0,0 +1,454 @@ | |||
1 | /* | ||
2 | * Micro Crystal RV-3029C2 rtc class driver | ||
3 | * | ||
4 | * Author: Gregory Hermant <gregory.hermant@calao-systems.com> | ||
5 | * | ||
6 | * based on previously existing rtc class drivers | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or modify | ||
9 | * it under the terms of the GNU General Public License version 2 as | ||
10 | * published by the Free Software Foundation. | ||
11 | * | ||
12 | * NOTE: Currently this driver only supports the bare minimum for read | ||
13 | * and write the RTC and alarms. The extra features provided by this chip | ||
14 | * (trickle charger, eeprom, T° compensation) are unavailable. | ||
15 | */ | ||
16 | |||
17 | #include <linux/module.h> | ||
18 | #include <linux/i2c.h> | ||
19 | #include <linux/bcd.h> | ||
20 | #include <linux/rtc.h> | ||
21 | |||
22 | /* Register map */ | ||
23 | /* control section */ | ||
24 | #define RV3029C2_ONOFF_CTRL 0x00 | ||
25 | #define RV3029C2_IRQ_CTRL 0x01 | ||
26 | #define RV3029C2_IRQ_CTRL_AIE (1 << 0) | ||
27 | #define RV3029C2_IRQ_FLAGS 0x02 | ||
28 | #define RV3029C2_IRQ_FLAGS_AF (1 << 0) | ||
29 | #define RV3029C2_STATUS 0x03 | ||
30 | #define RV3029C2_STATUS_VLOW1 (1 << 2) | ||
31 | #define RV3029C2_STATUS_VLOW2 (1 << 3) | ||
32 | #define RV3029C2_STATUS_SR (1 << 4) | ||
33 | #define RV3029C2_STATUS_PON (1 << 5) | ||
34 | #define RV3029C2_STATUS_EEBUSY (1 << 7) | ||
35 | #define RV3029C2_RST_CTRL 0x04 | ||
36 | #define RV3029C2_CONTROL_SECTION_LEN 0x05 | ||
37 | |||
38 | /* watch section */ | ||
39 | #define RV3029C2_W_SEC 0x08 | ||
40 | #define RV3029C2_W_MINUTES 0x09 | ||
41 | #define RV3029C2_W_HOURS 0x0A | ||
42 | #define RV3029C2_REG_HR_12_24 (1<<6) /* 24h/12h mode */ | ||
43 | #define RV3029C2_REG_HR_PM (1<<5) /* PM/AM bit in 12h mode */ | ||
44 | #define RV3029C2_W_DATE 0x0B | ||
45 | #define RV3029C2_W_DAYS 0x0C | ||
46 | #define RV3029C2_W_MONTHS 0x0D | ||
47 | #define RV3029C2_W_YEARS 0x0E | ||
48 | #define RV3029C2_WATCH_SECTION_LEN 0x07 | ||
49 | |||
50 | /* alarm section */ | ||
51 | #define RV3029C2_A_SC 0x10 | ||
52 | #define RV3029C2_A_MN 0x11 | ||
53 | #define RV3029C2_A_HR 0x12 | ||
54 | #define RV3029C2_A_DT 0x13 | ||
55 | #define RV3029C2_A_DW 0x14 | ||
56 | #define RV3029C2_A_MO 0x15 | ||
57 | #define RV3029C2_A_YR 0x16 | ||
58 | #define RV3029C2_ALARM_SECTION_LEN 0x07 | ||
59 | |||
60 | /* timer section */ | ||
61 | #define RV3029C2_TIMER_LOW 0x18 | ||
62 | #define RV3029C2_TIMER_HIGH 0x19 | ||
63 | |||
64 | /* temperature section */ | ||
65 | #define RV3029C2_TEMP_PAGE 0x20 | ||
66 | |||
67 | /* eeprom data section */ | ||
68 | #define RV3029C2_E2P_EEDATA1 0x28 | ||
69 | #define RV3029C2_E2P_EEDATA2 0x29 | ||
70 | |||
71 | /* eeprom control section */ | ||
72 | #define RV3029C2_CONTROL_E2P_EECTRL 0x30 | ||
73 | #define RV3029C2_TRICKLE_1K (1<<0) /* 1K resistance */ | ||
74 | #define RV3029C2_TRICKLE_5K (1<<1) /* 5K resistance */ | ||
75 | #define RV3029C2_TRICKLE_20K (1<<2) /* 20K resistance */ | ||
76 | #define RV3029C2_TRICKLE_80K (1<<3) /* 80K resistance */ | ||
77 | #define RV3029C2_CONTROL_E2P_XTALOFFSET 0x31 | ||
78 | #define RV3029C2_CONTROL_E2P_QCOEF 0x32 | ||
79 | #define RV3029C2_CONTROL_E2P_TURNOVER 0x33 | ||
80 | |||
81 | /* user ram section */ | ||
82 | #define RV3029C2_USR1_RAM_PAGE 0x38 | ||
83 | #define RV3029C2_USR1_SECTION_LEN 0x04 | ||
84 | #define RV3029C2_USR2_RAM_PAGE 0x3C | ||
85 | #define RV3029C2_USR2_SECTION_LEN 0x04 | ||
86 | |||
87 | static int | ||
88 | rv3029c2_i2c_read_regs(struct i2c_client *client, u8 reg, u8 *buf, | ||
89 | unsigned len) | ||
90 | { | ||
91 | int ret; | ||
92 | |||
93 | if ((reg > RV3029C2_USR1_RAM_PAGE + 7) || | ||
94 | (reg + len > RV3029C2_USR1_RAM_PAGE + 8)) | ||
95 | return -EINVAL; | ||
96 | |||
97 | ret = i2c_smbus_read_i2c_block_data(client, reg, len, buf); | ||
98 | if (ret < 0) | ||
99 | return ret; | ||
100 | if (ret < len) | ||
101 | return -EIO; | ||
102 | return 0; | ||
103 | } | ||
104 | |||
105 | static int | ||
106 | rv3029c2_i2c_write_regs(struct i2c_client *client, u8 reg, u8 const buf[], | ||
107 | unsigned len) | ||
108 | { | ||
109 | if ((reg > RV3029C2_USR1_RAM_PAGE + 7) || | ||
110 | (reg + len > RV3029C2_USR1_RAM_PAGE + 8)) | ||
111 | return -EINVAL; | ||
112 | |||
113 | return i2c_smbus_write_i2c_block_data(client, reg, len, buf); | ||
114 | } | ||
115 | |||
116 | static int | ||
117 | rv3029c2_i2c_get_sr(struct i2c_client *client, u8 *buf) | ||
118 | { | ||
119 | int ret = rv3029c2_i2c_read_regs(client, RV3029C2_STATUS, buf, 1); | ||
120 | |||
121 | if (ret < 0) | ||
122 | return -EIO; | ||
123 | dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]); | ||
124 | return 0; | ||
125 | } | ||
126 | |||
127 | static int | ||
128 | rv3029c2_i2c_set_sr(struct i2c_client *client, u8 val) | ||
129 | { | ||
130 | u8 buf[1]; | ||
131 | int sr; | ||
132 | |||
133 | buf[0] = val; | ||
134 | sr = rv3029c2_i2c_write_regs(client, RV3029C2_STATUS, buf, 1); | ||
135 | dev_dbg(&client->dev, "status = 0x%.2x (%d)\n", buf[0], buf[0]); | ||
136 | if (sr < 0) | ||
137 | return -EIO; | ||
138 | return 0; | ||
139 | } | ||
140 | |||
141 | static int | ||
142 | rv3029c2_i2c_read_time(struct i2c_client *client, struct rtc_time *tm) | ||
143 | { | ||
144 | u8 buf[1]; | ||
145 | int ret; | ||
146 | u8 regs[RV3029C2_WATCH_SECTION_LEN] = { 0, }; | ||
147 | |||
148 | ret = rv3029c2_i2c_get_sr(client, buf); | ||
149 | if (ret < 0) { | ||
150 | dev_err(&client->dev, "%s: reading SR failed\n", __func__); | ||
151 | return -EIO; | ||
152 | } | ||
153 | |||
154 | ret = rv3029c2_i2c_read_regs(client, RV3029C2_W_SEC , regs, | ||
155 | RV3029C2_WATCH_SECTION_LEN); | ||
156 | if (ret < 0) { | ||
157 | dev_err(&client->dev, "%s: reading RTC section failed\n", | ||
158 | __func__); | ||
159 | return ret; | ||
160 | } | ||
161 | |||
162 | tm->tm_sec = bcd2bin(regs[RV3029C2_W_SEC-RV3029C2_W_SEC]); | ||
163 | tm->tm_min = bcd2bin(regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC]); | ||
164 | |||
165 | /* HR field has a more complex interpretation */ | ||
166 | { | ||
167 | const u8 _hr = regs[RV3029C2_W_HOURS-RV3029C2_W_SEC]; | ||
168 | if (_hr & RV3029C2_REG_HR_12_24) { | ||
169 | /* 12h format */ | ||
170 | tm->tm_hour = bcd2bin(_hr & 0x1f); | ||
171 | if (_hr & RV3029C2_REG_HR_PM) /* PM flag set */ | ||
172 | tm->tm_hour += 12; | ||
173 | } else /* 24h format */ | ||
174 | tm->tm_hour = bcd2bin(_hr & 0x3f); | ||
175 | } | ||
176 | |||
177 | tm->tm_mday = bcd2bin(regs[RV3029C2_W_DATE-RV3029C2_W_SEC]); | ||
178 | tm->tm_mon = bcd2bin(regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC]) - 1; | ||
179 | tm->tm_year = bcd2bin(regs[RV3029C2_W_YEARS-RV3029C2_W_SEC]) + 100; | ||
180 | tm->tm_wday = bcd2bin(regs[RV3029C2_W_DAYS-RV3029C2_W_SEC]) - 1; | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static int rv3029c2_rtc_read_time(struct device *dev, struct rtc_time *tm) | ||
186 | { | ||
187 | return rv3029c2_i2c_read_time(to_i2c_client(dev), tm); | ||
188 | } | ||
189 | |||
190 | static int | ||
191 | rv3029c2_i2c_read_alarm(struct i2c_client *client, struct rtc_wkalrm *alarm) | ||
192 | { | ||
193 | struct rtc_time *const tm = &alarm->time; | ||
194 | int ret; | ||
195 | u8 regs[8]; | ||
196 | |||
197 | ret = rv3029c2_i2c_get_sr(client, regs); | ||
198 | if (ret < 0) { | ||
199 | dev_err(&client->dev, "%s: reading SR failed\n", __func__); | ||
200 | return -EIO; | ||
201 | } | ||
202 | |||
203 | ret = rv3029c2_i2c_read_regs(client, RV3029C2_A_SC, regs, | ||
204 | RV3029C2_ALARM_SECTION_LEN); | ||
205 | |||
206 | if (ret < 0) { | ||
207 | dev_err(&client->dev, "%s: reading alarm section failed\n", | ||
208 | __func__); | ||
209 | return ret; | ||
210 | } | ||
211 | |||
212 | tm->tm_sec = bcd2bin(regs[RV3029C2_A_SC-RV3029C2_A_SC] & 0x7f); | ||
213 | tm->tm_min = bcd2bin(regs[RV3029C2_A_MN-RV3029C2_A_SC] & 0x7f); | ||
214 | tm->tm_hour = bcd2bin(regs[RV3029C2_A_HR-RV3029C2_A_SC] & 0x3f); | ||
215 | tm->tm_mday = bcd2bin(regs[RV3029C2_A_DT-RV3029C2_A_SC] & 0x3f); | ||
216 | tm->tm_mon = bcd2bin(regs[RV3029C2_A_MO-RV3029C2_A_SC] & 0x1f) - 1; | ||
217 | tm->tm_year = bcd2bin(regs[RV3029C2_A_YR-RV3029C2_A_SC] & 0x7f) + 100; | ||
218 | tm->tm_wday = bcd2bin(regs[RV3029C2_A_DW-RV3029C2_A_SC] & 0x07) - 1; | ||
219 | |||
220 | return 0; | ||
221 | } | ||
222 | |||
223 | static int | ||
224 | rv3029c2_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) | ||
225 | { | ||
226 | return rv3029c2_i2c_read_alarm(to_i2c_client(dev), alarm); | ||
227 | } | ||
228 | |||
229 | static int rv3029c2_rtc_i2c_alarm_set_irq(struct i2c_client *client, | ||
230 | int enable) | ||
231 | { | ||
232 | int ret; | ||
233 | u8 buf[1]; | ||
234 | |||
235 | /* enable AIE irq */ | ||
236 | ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_CTRL, buf, 1); | ||
237 | if (ret < 0) { | ||
238 | dev_err(&client->dev, "can't read INT reg\n"); | ||
239 | return ret; | ||
240 | } | ||
241 | if (enable) | ||
242 | buf[0] |= RV3029C2_IRQ_CTRL_AIE; | ||
243 | else | ||
244 | buf[0] &= ~RV3029C2_IRQ_CTRL_AIE; | ||
245 | |||
246 | ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_CTRL, buf, 1); | ||
247 | if (ret < 0) { | ||
248 | dev_err(&client->dev, "can't set INT reg\n"); | ||
249 | return ret; | ||
250 | } | ||
251 | |||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client, | ||
256 | struct rtc_wkalrm *alarm) | ||
257 | { | ||
258 | struct rtc_time *const tm = &alarm->time; | ||
259 | int ret; | ||
260 | u8 regs[8]; | ||
261 | |||
262 | /* | ||
263 | * The clock has an 8 bit wide bcd-coded register (they never learn) | ||
264 | * for the year. tm_year is an offset from 1900 and we are interested | ||
265 | * in the 2000-2099 range, so any value less than 100 is invalid. | ||
266 | */ | ||
267 | if (tm->tm_year < 100) | ||
268 | return -EINVAL; | ||
269 | |||
270 | ret = rv3029c2_i2c_get_sr(client, regs); | ||
271 | if (ret < 0) { | ||
272 | dev_err(&client->dev, "%s: reading SR failed\n", __func__); | ||
273 | return -EIO; | ||
274 | } | ||
275 | regs[RV3029C2_A_SC-RV3029C2_A_SC] = bin2bcd(tm->tm_sec & 0x7f); | ||
276 | regs[RV3029C2_A_MN-RV3029C2_A_SC] = bin2bcd(tm->tm_min & 0x7f); | ||
277 | regs[RV3029C2_A_HR-RV3029C2_A_SC] = bin2bcd(tm->tm_hour & 0x3f); | ||
278 | regs[RV3029C2_A_DT-RV3029C2_A_SC] = bin2bcd(tm->tm_mday & 0x3f); | ||
279 | regs[RV3029C2_A_MO-RV3029C2_A_SC] = bin2bcd((tm->tm_mon & 0x1f) - 1); | ||
280 | regs[RV3029C2_A_DW-RV3029C2_A_SC] = bin2bcd((tm->tm_wday & 7) - 1); | ||
281 | regs[RV3029C2_A_YR-RV3029C2_A_SC] = bin2bcd((tm->tm_year & 0x7f) - 100); | ||
282 | |||
283 | ret = rv3029c2_i2c_write_regs(client, RV3029C2_A_SC, regs, | ||
284 | RV3029C2_ALARM_SECTION_LEN); | ||
285 | if (ret < 0) | ||
286 | return ret; | ||
287 | |||
288 | if (alarm->enabled) { | ||
289 | u8 buf[1]; | ||
290 | |||
291 | /* clear AF flag */ | ||
292 | ret = rv3029c2_i2c_read_regs(client, RV3029C2_IRQ_FLAGS, | ||
293 | buf, 1); | ||
294 | if (ret < 0) { | ||
295 | dev_err(&client->dev, "can't read alarm flag\n"); | ||
296 | return ret; | ||
297 | } | ||
298 | buf[0] &= ~RV3029C2_IRQ_FLAGS_AF; | ||
299 | ret = rv3029c2_i2c_write_regs(client, RV3029C2_IRQ_FLAGS, | ||
300 | buf, 1); | ||
301 | if (ret < 0) { | ||
302 | dev_err(&client->dev, "can't set alarm flag\n"); | ||
303 | return ret; | ||
304 | } | ||
305 | /* enable AIE irq */ | ||
306 | ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1); | ||
307 | if (ret) | ||
308 | return ret; | ||
309 | |||
310 | dev_dbg(&client->dev, "alarm IRQ armed\n"); | ||
311 | } else { | ||
312 | /* disable AIE irq */ | ||
313 | ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1); | ||
314 | if (ret) | ||
315 | return ret; | ||
316 | |||
317 | dev_dbg(&client->dev, "alarm IRQ disabled\n"); | ||
318 | } | ||
319 | |||
320 | return 0; | ||
321 | } | ||
322 | |||
323 | static int rv3029c2_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) | ||
324 | { | ||
325 | return rv3029c2_rtc_i2c_set_alarm(to_i2c_client(dev), alarm); | ||
326 | } | ||
327 | |||
328 | static int | ||
329 | rv3029c2_i2c_set_time(struct i2c_client *client, struct rtc_time const *tm) | ||
330 | { | ||
331 | u8 regs[8]; | ||
332 | int ret; | ||
333 | |||
334 | /* | ||
335 | * The clock has an 8 bit wide bcd-coded register (they never learn) | ||
336 | * for the year. tm_year is an offset from 1900 and we are interested | ||
337 | * in the 2000-2099 range, so any value less than 100 is invalid. | ||
338 | */ | ||
339 | if (tm->tm_year < 100) | ||
340 | return -EINVAL; | ||
341 | |||
342 | regs[RV3029C2_W_SEC-RV3029C2_W_SEC] = bin2bcd(tm->tm_sec); | ||
343 | regs[RV3029C2_W_MINUTES-RV3029C2_W_SEC] = bin2bcd(tm->tm_min); | ||
344 | regs[RV3029C2_W_HOURS-RV3029C2_W_SEC] = bin2bcd(tm->tm_hour); | ||
345 | regs[RV3029C2_W_DATE-RV3029C2_W_SEC] = bin2bcd(tm->tm_mday); | ||
346 | regs[RV3029C2_W_MONTHS-RV3029C2_W_SEC] = bin2bcd(tm->tm_mon+1); | ||
347 | regs[RV3029C2_W_DAYS-RV3029C2_W_SEC] = bin2bcd((tm->tm_wday & 7)+1); | ||
348 | regs[RV3029C2_W_YEARS-RV3029C2_W_SEC] = bin2bcd(tm->tm_year - 100); | ||
349 | |||
350 | ret = rv3029c2_i2c_write_regs(client, RV3029C2_W_SEC, regs, | ||
351 | RV3029C2_WATCH_SECTION_LEN); | ||
352 | if (ret < 0) | ||
353 | return ret; | ||
354 | |||
355 | ret = rv3029c2_i2c_get_sr(client, regs); | ||
356 | if (ret < 0) { | ||
357 | dev_err(&client->dev, "%s: reading SR failed\n", __func__); | ||
358 | return ret; | ||
359 | } | ||
360 | /* clear PON bit */ | ||
361 | ret = rv3029c2_i2c_set_sr(client, (regs[0] & ~RV3029C2_STATUS_PON)); | ||
362 | if (ret < 0) { | ||
363 | dev_err(&client->dev, "%s: reading SR failed\n", __func__); | ||
364 | return ret; | ||
365 | } | ||
366 | |||
367 | return 0; | ||
368 | } | ||
369 | |||
370 | static int rv3029c2_rtc_set_time(struct device *dev, struct rtc_time *tm) | ||
371 | { | ||
372 | return rv3029c2_i2c_set_time(to_i2c_client(dev), tm); | ||
373 | } | ||
374 | |||
375 | static const struct rtc_class_ops rv3029c2_rtc_ops = { | ||
376 | .read_time = rv3029c2_rtc_read_time, | ||
377 | .set_time = rv3029c2_rtc_set_time, | ||
378 | .read_alarm = rv3029c2_rtc_read_alarm, | ||
379 | .set_alarm = rv3029c2_rtc_set_alarm, | ||
380 | }; | ||
381 | |||
382 | static struct i2c_device_id rv3029c2_id[] = { | ||
383 | { "rv3029c2", 0 }, | ||
384 | { } | ||
385 | }; | ||
386 | MODULE_DEVICE_TABLE(i2c, rv3029c2_id); | ||
387 | |||
388 | static int __devinit | ||
389 | rv3029c2_probe(struct i2c_client *client, const struct i2c_device_id *id) | ||
390 | { | ||
391 | struct rtc_device *rtc; | ||
392 | int rc = 0; | ||
393 | u8 buf[1]; | ||
394 | |||
395 | if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_EMUL)) | ||
396 | return -ENODEV; | ||
397 | |||
398 | rtc = rtc_device_register(client->name, | ||
399 | &client->dev, &rv3029c2_rtc_ops, | ||
400 | THIS_MODULE); | ||
401 | |||
402 | if (IS_ERR(rtc)) | ||
403 | return PTR_ERR(rtc); | ||
404 | |||
405 | i2c_set_clientdata(client, rtc); | ||
406 | |||
407 | rc = rv3029c2_i2c_get_sr(client, buf); | ||
408 | if (rc < 0) { | ||
409 | dev_err(&client->dev, "reading status failed\n"); | ||
410 | goto exit_unregister; | ||
411 | } | ||
412 | |||
413 | return 0; | ||
414 | |||
415 | exit_unregister: | ||
416 | rtc_device_unregister(rtc); | ||
417 | |||
418 | return rc; | ||
419 | } | ||
420 | |||
421 | static int __devexit rv3029c2_remove(struct i2c_client *client) | ||
422 | { | ||
423 | struct rtc_device *rtc = i2c_get_clientdata(client); | ||
424 | |||
425 | rtc_device_unregister(rtc); | ||
426 | |||
427 | return 0; | ||
428 | } | ||
429 | |||
430 | static struct i2c_driver rv3029c2_driver = { | ||
431 | .driver = { | ||
432 | .name = "rtc-rv3029c2", | ||
433 | }, | ||
434 | .probe = rv3029c2_probe, | ||
435 | .remove = __devexit_p(rv3029c2_remove), | ||
436 | .id_table = rv3029c2_id, | ||
437 | }; | ||
438 | |||
439 | static int __init rv3029c2_init(void) | ||
440 | { | ||
441 | return i2c_add_driver(&rv3029c2_driver); | ||
442 | } | ||
443 | |||
444 | static void __exit rv3029c2_exit(void) | ||
445 | { | ||
446 | i2c_del_driver(&rv3029c2_driver); | ||
447 | } | ||
448 | |||
449 | module_init(rv3029c2_init); | ||
450 | module_exit(rv3029c2_exit); | ||
451 | |||
452 | MODULE_AUTHOR("Gregory Hermant <gregory.hermant@calao-systems.com>"); | ||
453 | MODULE_DESCRIPTION("Micro Crystal RV3029C2 RTC driver"); | ||
454 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c new file mode 100644 index 000000000000..893bac2bb21b --- /dev/null +++ b/drivers/rtc/rtc-spear.c | |||
@@ -0,0 +1,534 @@ | |||
1 | /* | ||
2 | * drivers/rtc/rtc-spear.c | ||
3 | * | ||
4 | * Copyright (C) 2010 ST Microelectronics | ||
5 | * Rajeev Kumar<rajeev-dlh.kumar@st.com> | ||
6 | * | ||
7 | * This file is licensed under the terms of the GNU General Public | ||
8 | * License version 2. This program is licensed "as is" without any | ||
9 | * warranty of any kind, whether express or implied. | ||
10 | */ | ||
11 | |||
12 | #include <linux/bcd.h> | ||
13 | #include <linux/clk.h> | ||
14 | #include <linux/delay.h> | ||
15 | #include <linux/init.h> | ||
16 | #include <linux/io.h> | ||
17 | #include <linux/irq.h> | ||
18 | #include <linux/module.h> | ||
19 | #include <linux/platform_device.h> | ||
20 | #include <linux/rtc.h> | ||
21 | #include <linux/slab.h> | ||
22 | #include <linux/spinlock.h> | ||
23 | |||
24 | /* RTC registers */ | ||
25 | #define TIME_REG 0x00 | ||
26 | #define DATE_REG 0x04 | ||
27 | #define ALARM_TIME_REG 0x08 | ||
28 | #define ALARM_DATE_REG 0x0C | ||
29 | #define CTRL_REG 0x10 | ||
30 | #define STATUS_REG 0x14 | ||
31 | |||
32 | /* TIME_REG & ALARM_TIME_REG */ | ||
33 | #define SECONDS_UNITS (0xf<<0) /* seconds units position */ | ||
34 | #define SECONDS_TENS (0x7<<4) /* seconds tens position */ | ||
35 | #define MINUTES_UNITS (0xf<<8) /* minutes units position */ | ||
36 | #define MINUTES_TENS (0x7<<12) /* minutes tens position */ | ||
37 | #define HOURS_UNITS (0xf<<16) /* hours units position */ | ||
38 | #define HOURS_TENS (0x3<<20) /* hours tens position */ | ||
39 | |||
40 | /* DATE_REG & ALARM_DATE_REG */ | ||
41 | #define DAYS_UNITS (0xf<<0) /* days units position */ | ||
42 | #define DAYS_TENS (0x3<<4) /* days tens position */ | ||
43 | #define MONTHS_UNITS (0xf<<8) /* months units position */ | ||
44 | #define MONTHS_TENS (0x1<<12) /* months tens position */ | ||
45 | #define YEARS_UNITS (0xf<<16) /* years units position */ | ||
46 | #define YEARS_TENS (0xf<<20) /* years tens position */ | ||
47 | #define YEARS_HUNDREDS (0xf<<24) /* years hundereds position */ | ||
48 | #define YEARS_MILLENIUMS (0xf<<28) /* years millenium position */ | ||
49 | |||
50 | /* MASK SHIFT TIME_REG & ALARM_TIME_REG*/ | ||
51 | #define SECOND_SHIFT 0x00 /* seconds units */ | ||
52 | #define MINUTE_SHIFT 0x08 /* minutes units position */ | ||
53 | #define HOUR_SHIFT 0x10 /* hours units position */ | ||
54 | #define MDAY_SHIFT 0x00 /* Month day shift */ | ||
55 | #define MONTH_SHIFT 0x08 /* Month shift */ | ||
56 | #define YEAR_SHIFT 0x10 /* Year shift */ | ||
57 | |||
58 | #define SECOND_MASK 0x7F | ||
59 | #define MIN_MASK 0x7F | ||
60 | #define HOUR_MASK 0x3F | ||
61 | #define DAY_MASK 0x3F | ||
62 | #define MONTH_MASK 0x7F | ||
63 | #define YEAR_MASK 0xFFFF | ||
64 | |||
65 | /* date reg equal to time reg, for debug only */ | ||
66 | #define TIME_BYP (1<<9) | ||
67 | #define INT_ENABLE (1<<31) /* interrupt enable */ | ||
68 | |||
69 | /* STATUS_REG */ | ||
70 | #define CLK_UNCONNECTED (1<<0) | ||
71 | #define PEND_WR_TIME (1<<2) | ||
72 | #define PEND_WR_DATE (1<<3) | ||
73 | #define LOST_WR_TIME (1<<4) | ||
74 | #define LOST_WR_DATE (1<<5) | ||
75 | #define RTC_INT_MASK (1<<31) | ||
76 | #define STATUS_BUSY (PEND_WR_TIME | PEND_WR_DATE) | ||
77 | #define STATUS_FAIL (LOST_WR_TIME | LOST_WR_DATE) | ||
78 | |||
79 | struct spear_rtc_config { | ||
80 | struct clk *clk; | ||
81 | spinlock_t lock; | ||
82 | void __iomem *ioaddr; | ||
83 | }; | ||
84 | |||
85 | static inline void spear_rtc_clear_interrupt(struct spear_rtc_config *config) | ||
86 | { | ||
87 | unsigned int val; | ||
88 | unsigned long flags; | ||
89 | |||
90 | spin_lock_irqsave(&config->lock, flags); | ||
91 | val = readl(config->ioaddr + STATUS_REG); | ||
92 | val |= RTC_INT_MASK; | ||
93 | writel(val, config->ioaddr + STATUS_REG); | ||
94 | spin_unlock_irqrestore(&config->lock, flags); | ||
95 | } | ||
96 | |||
97 | static inline void spear_rtc_enable_interrupt(struct spear_rtc_config *config) | ||
98 | { | ||
99 | unsigned int val; | ||
100 | |||
101 | val = readl(config->ioaddr + CTRL_REG); | ||
102 | if (!(val & INT_ENABLE)) { | ||
103 | spear_rtc_clear_interrupt(config); | ||
104 | val |= INT_ENABLE; | ||
105 | writel(val, config->ioaddr + CTRL_REG); | ||
106 | } | ||
107 | } | ||
108 | |||
109 | static inline void spear_rtc_disable_interrupt(struct spear_rtc_config *config) | ||
110 | { | ||
111 | unsigned int val; | ||
112 | |||
113 | val = readl(config->ioaddr + CTRL_REG); | ||
114 | if (val & INT_ENABLE) { | ||
115 | val &= ~INT_ENABLE; | ||
116 | writel(val, config->ioaddr + CTRL_REG); | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static inline int is_write_complete(struct spear_rtc_config *config) | ||
121 | { | ||
122 | int ret = 0; | ||
123 | unsigned long flags; | ||
124 | |||
125 | spin_lock_irqsave(&config->lock, flags); | ||
126 | if ((readl(config->ioaddr + STATUS_REG)) & STATUS_FAIL) | ||
127 | ret = -EIO; | ||
128 | spin_unlock_irqrestore(&config->lock, flags); | ||
129 | |||
130 | return ret; | ||
131 | } | ||
132 | |||
133 | static void rtc_wait_not_busy(struct spear_rtc_config *config) | ||
134 | { | ||
135 | int status, count = 0; | ||
136 | unsigned long flags; | ||
137 | |||
138 | /* Assuming BUSY may stay active for 80 msec) */ | ||
139 | for (count = 0; count < 80; count++) { | ||
140 | spin_lock_irqsave(&config->lock, flags); | ||
141 | status = readl(config->ioaddr + STATUS_REG); | ||
142 | spin_unlock_irqrestore(&config->lock, flags); | ||
143 | if ((status & STATUS_BUSY) == 0) | ||
144 | break; | ||
145 | /* check status busy, after each msec */ | ||
146 | msleep(1); | ||
147 | } | ||
148 | } | ||
149 | |||
150 | static irqreturn_t spear_rtc_irq(int irq, void *dev_id) | ||
151 | { | ||
152 | struct rtc_device *rtc = (struct rtc_device *)dev_id; | ||
153 | struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev); | ||
154 | unsigned long flags, events = 0; | ||
155 | unsigned int irq_data; | ||
156 | |||
157 | spin_lock_irqsave(&config->lock, flags); | ||
158 | irq_data = readl(config->ioaddr + STATUS_REG); | ||
159 | spin_unlock_irqrestore(&config->lock, flags); | ||
160 | |||
161 | if ((irq_data & RTC_INT_MASK)) { | ||
162 | spear_rtc_clear_interrupt(config); | ||
163 | events = RTC_IRQF | RTC_AF; | ||
164 | rtc_update_irq(rtc, 1, events); | ||
165 | return IRQ_HANDLED; | ||
166 | } else | ||
167 | return IRQ_NONE; | ||
168 | |||
169 | } | ||
170 | |||
171 | static int tm2bcd(struct rtc_time *tm) | ||
172 | { | ||
173 | if (rtc_valid_tm(tm) != 0) | ||
174 | return -EINVAL; | ||
175 | tm->tm_sec = bin2bcd(tm->tm_sec); | ||
176 | tm->tm_min = bin2bcd(tm->tm_min); | ||
177 | tm->tm_hour = bin2bcd(tm->tm_hour); | ||
178 | tm->tm_mday = bin2bcd(tm->tm_mday); | ||
179 | tm->tm_mon = bin2bcd(tm->tm_mon + 1); | ||
180 | tm->tm_year = bin2bcd(tm->tm_year); | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static void bcd2tm(struct rtc_time *tm) | ||
186 | { | ||
187 | tm->tm_sec = bcd2bin(tm->tm_sec); | ||
188 | tm->tm_min = bcd2bin(tm->tm_min); | ||
189 | tm->tm_hour = bcd2bin(tm->tm_hour); | ||
190 | tm->tm_mday = bcd2bin(tm->tm_mday); | ||
191 | tm->tm_mon = bcd2bin(tm->tm_mon) - 1; | ||
192 | /* epoch == 1900 */ | ||
193 | tm->tm_year = bcd2bin(tm->tm_year); | ||
194 | } | ||
195 | |||
196 | /* | ||
197 | * spear_rtc_read_time - set the time | ||
198 | * @dev: rtc device in use | ||
199 | * @tm: holds date and time | ||
200 | * | ||
201 | * This function read time and date. On success it will return 0 | ||
202 | * otherwise -ve error is returned. | ||
203 | */ | ||
204 | static int spear_rtc_read_time(struct device *dev, struct rtc_time *tm) | ||
205 | { | ||
206 | struct platform_device *pdev = to_platform_device(dev); | ||
207 | struct rtc_device *rtc = platform_get_drvdata(pdev); | ||
208 | struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev); | ||
209 | unsigned int time, date; | ||
210 | |||
211 | /* we don't report wday/yday/isdst ... */ | ||
212 | rtc_wait_not_busy(config); | ||
213 | |||
214 | time = readl(config->ioaddr + TIME_REG); | ||
215 | date = readl(config->ioaddr + DATE_REG); | ||
216 | tm->tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK; | ||
217 | tm->tm_min = (time >> MINUTE_SHIFT) & MIN_MASK; | ||
218 | tm->tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK; | ||
219 | tm->tm_mday = (date >> MDAY_SHIFT) & DAY_MASK; | ||
220 | tm->tm_mon = (date >> MONTH_SHIFT) & MONTH_MASK; | ||
221 | tm->tm_year = (date >> YEAR_SHIFT) & YEAR_MASK; | ||
222 | |||
223 | bcd2tm(tm); | ||
224 | return 0; | ||
225 | } | ||
226 | |||
227 | /* | ||
228 | * spear_rtc_set_time - set the time | ||
229 | * @dev: rtc device in use | ||
230 | * @tm: holds date and time | ||
231 | * | ||
232 | * This function set time and date. On success it will return 0 | ||
233 | * otherwise -ve error is returned. | ||
234 | */ | ||
235 | static int spear_rtc_set_time(struct device *dev, struct rtc_time *tm) | ||
236 | { | ||
237 | struct platform_device *pdev = to_platform_device(dev); | ||
238 | struct rtc_device *rtc = platform_get_drvdata(pdev); | ||
239 | struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev); | ||
240 | unsigned int time, date, err = 0; | ||
241 | |||
242 | if (tm2bcd(tm) < 0) | ||
243 | return -EINVAL; | ||
244 | |||
245 | rtc_wait_not_busy(config); | ||
246 | time = (tm->tm_sec << SECOND_SHIFT) | (tm->tm_min << MINUTE_SHIFT) | | ||
247 | (tm->tm_hour << HOUR_SHIFT); | ||
248 | date = (tm->tm_mday << MDAY_SHIFT) | (tm->tm_mon << MONTH_SHIFT) | | ||
249 | (tm->tm_year << YEAR_SHIFT); | ||
250 | writel(time, config->ioaddr + TIME_REG); | ||
251 | writel(date, config->ioaddr + DATE_REG); | ||
252 | err = is_write_complete(config); | ||
253 | if (err < 0) | ||
254 | return err; | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | /* | ||
260 | * spear_rtc_read_alarm - read the alarm time | ||
261 | * @dev: rtc device in use | ||
262 | * @alm: holds alarm date and time | ||
263 | * | ||
264 | * This function read alarm time and date. On success it will return 0 | ||
265 | * otherwise -ve error is returned. | ||
266 | */ | ||
267 | static int spear_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) | ||
268 | { | ||
269 | struct platform_device *pdev = to_platform_device(dev); | ||
270 | struct rtc_device *rtc = platform_get_drvdata(pdev); | ||
271 | struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev); | ||
272 | unsigned int time, date; | ||
273 | |||
274 | rtc_wait_not_busy(config); | ||
275 | |||
276 | time = readl(config->ioaddr + ALARM_TIME_REG); | ||
277 | date = readl(config->ioaddr + ALARM_DATE_REG); | ||
278 | alm->time.tm_sec = (time >> SECOND_SHIFT) & SECOND_MASK; | ||
279 | alm->time.tm_min = (time >> MINUTE_SHIFT) & MIN_MASK; | ||
280 | alm->time.tm_hour = (time >> HOUR_SHIFT) & HOUR_MASK; | ||
281 | alm->time.tm_mday = (date >> MDAY_SHIFT) & DAY_MASK; | ||
282 | alm->time.tm_mon = (date >> MONTH_SHIFT) & MONTH_MASK; | ||
283 | alm->time.tm_year = (date >> YEAR_SHIFT) & YEAR_MASK; | ||
284 | |||
285 | bcd2tm(&alm->time); | ||
286 | alm->enabled = readl(config->ioaddr + CTRL_REG) & INT_ENABLE; | ||
287 | |||
288 | return 0; | ||
289 | } | ||
290 | |||
291 | /* | ||
292 | * spear_rtc_set_alarm - set the alarm time | ||
293 | * @dev: rtc device in use | ||
294 | * @alm: holds alarm date and time | ||
295 | * | ||
296 | * This function set alarm time and date. On success it will return 0 | ||
297 | * otherwise -ve error is returned. | ||
298 | */ | ||
299 | static int spear_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) | ||
300 | { | ||
301 | struct platform_device *pdev = to_platform_device(dev); | ||
302 | struct rtc_device *rtc = platform_get_drvdata(pdev); | ||
303 | struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev); | ||
304 | unsigned int time, date, err = 0; | ||
305 | |||
306 | if (tm2bcd(&alm->time) < 0) | ||
307 | return -EINVAL; | ||
308 | |||
309 | rtc_wait_not_busy(config); | ||
310 | |||
311 | time = (alm->time.tm_sec << SECOND_SHIFT) | (alm->time.tm_min << | ||
312 | MINUTE_SHIFT) | (alm->time.tm_hour << HOUR_SHIFT); | ||
313 | date = (alm->time.tm_mday << MDAY_SHIFT) | (alm->time.tm_mon << | ||
314 | MONTH_SHIFT) | (alm->time.tm_year << YEAR_SHIFT); | ||
315 | |||
316 | writel(time, config->ioaddr + ALARM_TIME_REG); | ||
317 | writel(date, config->ioaddr + ALARM_DATE_REG); | ||
318 | err = is_write_complete(config); | ||
319 | if (err < 0) | ||
320 | return err; | ||
321 | |||
322 | if (alm->enabled) | ||
323 | spear_rtc_enable_interrupt(config); | ||
324 | else | ||
325 | spear_rtc_disable_interrupt(config); | ||
326 | |||
327 | return 0; | ||
328 | } | ||
329 | static struct rtc_class_ops spear_rtc_ops = { | ||
330 | .read_time = spear_rtc_read_time, | ||
331 | .set_time = spear_rtc_set_time, | ||
332 | .read_alarm = spear_rtc_read_alarm, | ||
333 | .set_alarm = spear_rtc_set_alarm, | ||
334 | }; | ||
335 | |||
336 | static int __devinit spear_rtc_probe(struct platform_device *pdev) | ||
337 | { | ||
338 | struct resource *res; | ||
339 | struct rtc_device *rtc; | ||
340 | struct spear_rtc_config *config; | ||
341 | unsigned int status = 0; | ||
342 | int irq; | ||
343 | |||
344 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
345 | if (!res) { | ||
346 | dev_err(&pdev->dev, "no resource defined\n"); | ||
347 | return -EBUSY; | ||
348 | } | ||
349 | if (!request_mem_region(res->start, resource_size(res), pdev->name)) { | ||
350 | dev_err(&pdev->dev, "rtc region already claimed\n"); | ||
351 | return -EBUSY; | ||
352 | } | ||
353 | |||
354 | config = kzalloc(sizeof(*config), GFP_KERNEL); | ||
355 | if (!config) { | ||
356 | dev_err(&pdev->dev, "out of memory\n"); | ||
357 | status = -ENOMEM; | ||
358 | goto err_release_region; | ||
359 | } | ||
360 | |||
361 | config->clk = clk_get(&pdev->dev, NULL); | ||
362 | if (IS_ERR(config->clk)) { | ||
363 | status = PTR_ERR(config->clk); | ||
364 | goto err_kfree; | ||
365 | } | ||
366 | |||
367 | status = clk_enable(config->clk); | ||
368 | if (status < 0) | ||
369 | goto err_clk_put; | ||
370 | |||
371 | config->ioaddr = ioremap(res->start, resource_size(res)); | ||
372 | if (!config->ioaddr) { | ||
373 | dev_err(&pdev->dev, "ioremap fail\n"); | ||
374 | status = -ENOMEM; | ||
375 | goto err_disable_clock; | ||
376 | } | ||
377 | |||
378 | spin_lock_init(&config->lock); | ||
379 | |||
380 | rtc = rtc_device_register(pdev->name, &pdev->dev, &spear_rtc_ops, | ||
381 | THIS_MODULE); | ||
382 | if (IS_ERR(rtc)) { | ||
383 | dev_err(&pdev->dev, "can't register RTC device, err %ld\n", | ||
384 | PTR_ERR(rtc)); | ||
385 | status = PTR_ERR(rtc); | ||
386 | goto err_iounmap; | ||
387 | } | ||
388 | |||
389 | platform_set_drvdata(pdev, rtc); | ||
390 | dev_set_drvdata(&rtc->dev, config); | ||
391 | |||
392 | /* alarm irqs */ | ||
393 | irq = platform_get_irq(pdev, 0); | ||
394 | if (irq < 0) { | ||
395 | dev_err(&pdev->dev, "no update irq?\n"); | ||
396 | status = irq; | ||
397 | goto err_clear_platdata; | ||
398 | } | ||
399 | |||
400 | status = request_irq(irq, spear_rtc_irq, 0, pdev->name, rtc); | ||
401 | if (status) { | ||
402 | dev_err(&pdev->dev, "Alarm interrupt IRQ%d already \ | ||
403 | claimed\n", irq); | ||
404 | goto err_clear_platdata; | ||
405 | } | ||
406 | |||
407 | if (!device_can_wakeup(&pdev->dev)) | ||
408 | device_init_wakeup(&pdev->dev, 1); | ||
409 | |||
410 | return 0; | ||
411 | |||
412 | err_clear_platdata: | ||
413 | platform_set_drvdata(pdev, NULL); | ||
414 | dev_set_drvdata(&rtc->dev, NULL); | ||
415 | rtc_device_unregister(rtc); | ||
416 | err_iounmap: | ||
417 | iounmap(config->ioaddr); | ||
418 | err_disable_clock: | ||
419 | clk_disable(config->clk); | ||
420 | err_clk_put: | ||
421 | clk_put(config->clk); | ||
422 | err_kfree: | ||
423 | kfree(config); | ||
424 | err_release_region: | ||
425 | release_mem_region(res->start, resource_size(res)); | ||
426 | |||
427 | return status; | ||
428 | } | ||
429 | |||
430 | static int __devexit spear_rtc_remove(struct platform_device *pdev) | ||
431 | { | ||
432 | struct rtc_device *rtc = platform_get_drvdata(pdev); | ||
433 | struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev); | ||
434 | int irq; | ||
435 | struct resource *res; | ||
436 | |||
437 | /* leave rtc running, but disable irqs */ | ||
438 | spear_rtc_disable_interrupt(config); | ||
439 | device_init_wakeup(&pdev->dev, 0); | ||
440 | irq = platform_get_irq(pdev, 0); | ||
441 | if (irq) | ||
442 | free_irq(irq, pdev); | ||
443 | clk_disable(config->clk); | ||
444 | clk_put(config->clk); | ||
445 | iounmap(config->ioaddr); | ||
446 | kfree(config); | ||
447 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
448 | if (res) | ||
449 | release_mem_region(res->start, resource_size(res)); | ||
450 | platform_set_drvdata(pdev, NULL); | ||
451 | dev_set_drvdata(&rtc->dev, NULL); | ||
452 | rtc_device_unregister(rtc); | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | #ifdef CONFIG_PM | ||
458 | |||
459 | static int spear_rtc_suspend(struct platform_device *pdev, pm_message_t state) | ||
460 | { | ||
461 | struct rtc_device *rtc = platform_get_drvdata(pdev); | ||
462 | struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev); | ||
463 | int irq; | ||
464 | |||
465 | irq = platform_get_irq(pdev, 0); | ||
466 | if (device_may_wakeup(&pdev->dev)) | ||
467 | enable_irq_wake(irq); | ||
468 | else { | ||
469 | spear_rtc_disable_interrupt(config); | ||
470 | clk_disable(config->clk); | ||
471 | } | ||
472 | |||
473 | return 0; | ||
474 | } | ||
475 | |||
476 | static int spear_rtc_resume(struct platform_device *pdev) | ||
477 | { | ||
478 | struct rtc_device *rtc = platform_get_drvdata(pdev); | ||
479 | struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev); | ||
480 | int irq; | ||
481 | |||
482 | irq = platform_get_irq(pdev, 0); | ||
483 | |||
484 | if (device_may_wakeup(&pdev->dev)) | ||
485 | disable_irq_wake(irq); | ||
486 | else { | ||
487 | clk_enable(config->clk); | ||
488 | spear_rtc_enable_interrupt(config); | ||
489 | } | ||
490 | |||
491 | return 0; | ||
492 | } | ||
493 | |||
494 | #else | ||
495 | #define spear_rtc_suspend NULL | ||
496 | #define spear_rtc_resume NULL | ||
497 | #endif | ||
498 | |||
499 | static void spear_rtc_shutdown(struct platform_device *pdev) | ||
500 | { | ||
501 | struct rtc_device *rtc = platform_get_drvdata(pdev); | ||
502 | struct spear_rtc_config *config = dev_get_drvdata(&rtc->dev); | ||
503 | |||
504 | spear_rtc_disable_interrupt(config); | ||
505 | clk_disable(config->clk); | ||
506 | } | ||
507 | |||
508 | static struct platform_driver spear_rtc_driver = { | ||
509 | .probe = spear_rtc_probe, | ||
510 | .remove = __devexit_p(spear_rtc_remove), | ||
511 | .suspend = spear_rtc_suspend, | ||
512 | .resume = spear_rtc_resume, | ||
513 | .shutdown = spear_rtc_shutdown, | ||
514 | .driver = { | ||
515 | .name = "rtc-spear", | ||
516 | }, | ||
517 | }; | ||
518 | |||
519 | static int __init rtc_init(void) | ||
520 | { | ||
521 | return platform_driver_register(&spear_rtc_driver); | ||
522 | } | ||
523 | module_init(rtc_init); | ||
524 | |||
525 | static void __exit rtc_exit(void) | ||
526 | { | ||
527 | platform_driver_unregister(&spear_rtc_driver); | ||
528 | } | ||
529 | module_exit(rtc_exit); | ||
530 | |||
531 | MODULE_ALIAS("platform:rtc-spear"); | ||
532 | MODULE_AUTHOR("Rajeev Kumar <rajeev-dlh.kumar@st.com>"); | ||
533 | MODULE_DESCRIPTION("ST SPEAr Realtime Clock Driver (RTC)"); | ||
534 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/rtc/rtc-vt8500.c b/drivers/rtc/rtc-vt8500.c new file mode 100644 index 000000000000..b8bc862903ae --- /dev/null +++ b/drivers/rtc/rtc-vt8500.c | |||
@@ -0,0 +1,366 @@ | |||
1 | /* | ||
2 | * drivers/rtc/rtc-vt8500.c | ||
3 | * | ||
4 | * Copyright (C) 2010 Alexey Charkov <alchark@gmail.com> | ||
5 | * | ||
6 | * Based on rtc-pxa.c | ||
7 | * | ||
8 | * This software is licensed under the terms of the GNU General Public | ||
9 | * License version 2, as published by the Free Software Foundation, and | ||
10 | * may be copied, distributed, and modified under those terms. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | */ | ||
17 | |||
18 | #include <linux/module.h> | ||
19 | #include <linux/rtc.h> | ||
20 | #include <linux/init.h> | ||
21 | #include <linux/interrupt.h> | ||
22 | #include <linux/io.h> | ||
23 | #include <linux/bcd.h> | ||
24 | #include <linux/platform_device.h> | ||
25 | #include <linux/slab.h> | ||
26 | |||
27 | /* | ||
28 | * Register definitions | ||
29 | */ | ||
30 | #define VT8500_RTC_TS 0x00 /* Time set */ | ||
31 | #define VT8500_RTC_DS 0x04 /* Date set */ | ||
32 | #define VT8500_RTC_AS 0x08 /* Alarm set */ | ||
33 | #define VT8500_RTC_CR 0x0c /* Control */ | ||
34 | #define VT8500_RTC_TR 0x10 /* Time read */ | ||
35 | #define VT8500_RTC_DR 0x14 /* Date read */ | ||
36 | #define VT8500_RTC_WS 0x18 /* Write status */ | ||
37 | #define VT8500_RTC_CL 0x20 /* Calibration */ | ||
38 | #define VT8500_RTC_IS 0x24 /* Interrupt status */ | ||
39 | #define VT8500_RTC_ST 0x28 /* Status */ | ||
40 | |||
41 | #define INVALID_TIME_BIT (1 << 31) | ||
42 | |||
43 | #define DATE_CENTURY_S 19 | ||
44 | #define DATE_YEAR_S 11 | ||
45 | #define DATE_YEAR_MASK (0xff << DATE_YEAR_S) | ||
46 | #define DATE_MONTH_S 6 | ||
47 | #define DATE_MONTH_MASK (0x1f << DATE_MONTH_S) | ||
48 | #define DATE_DAY_MASK 0x3f | ||
49 | |||
50 | #define TIME_DOW_S 20 | ||
51 | #define TIME_DOW_MASK (0x07 << TIME_DOW_S) | ||
52 | #define TIME_HOUR_S 14 | ||
53 | #define TIME_HOUR_MASK (0x3f << TIME_HOUR_S) | ||
54 | #define TIME_MIN_S 7 | ||
55 | #define TIME_MIN_MASK (0x7f << TIME_MIN_S) | ||
56 | #define TIME_SEC_MASK 0x7f | ||
57 | |||
58 | #define ALARM_DAY_S 20 | ||
59 | #define ALARM_DAY_MASK (0x3f << ALARM_DAY_S) | ||
60 | |||
61 | #define ALARM_DAY_BIT (1 << 29) | ||
62 | #define ALARM_HOUR_BIT (1 << 28) | ||
63 | #define ALARM_MIN_BIT (1 << 27) | ||
64 | #define ALARM_SEC_BIT (1 << 26) | ||
65 | |||
66 | #define ALARM_ENABLE_MASK (ALARM_DAY_BIT \ | ||
67 | | ALARM_HOUR_BIT \ | ||
68 | | ALARM_MIN_BIT \ | ||
69 | | ALARM_SEC_BIT) | ||
70 | |||
71 | #define VT8500_RTC_CR_ENABLE (1 << 0) /* Enable RTC */ | ||
72 | #define VT8500_RTC_CR_24H (1 << 1) /* 24h time format */ | ||
73 | #define VT8500_RTC_CR_SM_ENABLE (1 << 2) /* Enable periodic irqs */ | ||
74 | #define VT8500_RTC_CR_SM_SEC (1 << 3) /* 0: 1Hz/60, 1: 1Hz */ | ||
75 | #define VT8500_RTC_CR_CALIB (1 << 4) /* Enable calibration */ | ||
76 | |||
77 | struct vt8500_rtc { | ||
78 | void __iomem *regbase; | ||
79 | struct resource *res; | ||
80 | int irq_alarm; | ||
81 | int irq_hz; | ||
82 | struct rtc_device *rtc; | ||
83 | spinlock_t lock; /* Protects this structure */ | ||
84 | }; | ||
85 | |||
86 | static irqreturn_t vt8500_rtc_irq(int irq, void *dev_id) | ||
87 | { | ||
88 | struct vt8500_rtc *vt8500_rtc = dev_id; | ||
89 | u32 isr; | ||
90 | unsigned long events = 0; | ||
91 | |||
92 | spin_lock(&vt8500_rtc->lock); | ||
93 | |||
94 | /* clear interrupt sources */ | ||
95 | isr = readl(vt8500_rtc->regbase + VT8500_RTC_IS); | ||
96 | writel(isr, vt8500_rtc->regbase + VT8500_RTC_IS); | ||
97 | |||
98 | spin_unlock(&vt8500_rtc->lock); | ||
99 | |||
100 | if (isr & 1) | ||
101 | events |= RTC_AF | RTC_IRQF; | ||
102 | |||
103 | /* Only second/minute interrupts are supported */ | ||
104 | if (isr & 2) | ||
105 | events |= RTC_UF | RTC_IRQF; | ||
106 | |||
107 | rtc_update_irq(vt8500_rtc->rtc, 1, events); | ||
108 | |||
109 | return IRQ_HANDLED; | ||
110 | } | ||
111 | |||
112 | static int vt8500_rtc_read_time(struct device *dev, struct rtc_time *tm) | ||
113 | { | ||
114 | struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); | ||
115 | u32 date, time; | ||
116 | |||
117 | date = readl(vt8500_rtc->regbase + VT8500_RTC_DR); | ||
118 | time = readl(vt8500_rtc->regbase + VT8500_RTC_TR); | ||
119 | |||
120 | tm->tm_sec = bcd2bin(time & TIME_SEC_MASK); | ||
121 | tm->tm_min = bcd2bin((time & TIME_MIN_MASK) >> TIME_MIN_S); | ||
122 | tm->tm_hour = bcd2bin((time & TIME_HOUR_MASK) >> TIME_HOUR_S); | ||
123 | tm->tm_mday = bcd2bin(date & DATE_DAY_MASK); | ||
124 | tm->tm_mon = bcd2bin((date & DATE_MONTH_MASK) >> DATE_MONTH_S); | ||
125 | tm->tm_year = bcd2bin((date & DATE_YEAR_MASK) >> DATE_YEAR_S) | ||
126 | + ((date >> DATE_CENTURY_S) & 1 ? 200 : 100); | ||
127 | tm->tm_wday = (time & TIME_DOW_MASK) >> TIME_DOW_S; | ||
128 | |||
129 | return 0; | ||
130 | } | ||
131 | |||
132 | static int vt8500_rtc_set_time(struct device *dev, struct rtc_time *tm) | ||
133 | { | ||
134 | struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); | ||
135 | |||
136 | if (tm->tm_year < 100) { | ||
137 | dev_warn(dev, "Only years 2000-2199 are supported by the " | ||
138 | "hardware!\n"); | ||
139 | return -EINVAL; | ||
140 | } | ||
141 | |||
142 | writel((bin2bcd(tm->tm_year - 100) << DATE_YEAR_S) | ||
143 | | (bin2bcd(tm->tm_mon) << DATE_MONTH_S) | ||
144 | | (bin2bcd(tm->tm_mday)), | ||
145 | vt8500_rtc->regbase + VT8500_RTC_DS); | ||
146 | writel((bin2bcd(tm->tm_wday) << TIME_DOW_S) | ||
147 | | (bin2bcd(tm->tm_hour) << TIME_HOUR_S) | ||
148 | | (bin2bcd(tm->tm_min) << TIME_MIN_S) | ||
149 | | (bin2bcd(tm->tm_sec)), | ||
150 | vt8500_rtc->regbase + VT8500_RTC_TS); | ||
151 | |||
152 | return 0; | ||
153 | } | ||
154 | |||
155 | static int vt8500_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alrm) | ||
156 | { | ||
157 | struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); | ||
158 | u32 isr, alarm; | ||
159 | |||
160 | alarm = readl(vt8500_rtc->regbase + VT8500_RTC_AS); | ||
161 | isr = readl(vt8500_rtc->regbase + VT8500_RTC_IS); | ||
162 | |||
163 | alrm->time.tm_mday = bcd2bin((alarm & ALARM_DAY_MASK) >> ALARM_DAY_S); | ||
164 | alrm->time.tm_hour = bcd2bin((alarm & TIME_HOUR_MASK) >> TIME_HOUR_S); | ||
165 | alrm->time.tm_min = bcd2bin((alarm & TIME_MIN_MASK) >> TIME_MIN_S); | ||
166 | alrm->time.tm_sec = bcd2bin((alarm & TIME_SEC_MASK)); | ||
167 | |||
168 | alrm->enabled = (alarm & ALARM_ENABLE_MASK) ? 1 : 0; | ||
169 | |||
170 | alrm->pending = (isr & 1) ? 1 : 0; | ||
171 | return rtc_valid_tm(&alrm->time); | ||
172 | } | ||
173 | |||
174 | static int vt8500_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) | ||
175 | { | ||
176 | struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); | ||
177 | |||
178 | writel((alrm->enabled ? ALARM_ENABLE_MASK : 0) | ||
179 | | (bin2bcd(alrm->time.tm_mday) << ALARM_DAY_S) | ||
180 | | (bin2bcd(alrm->time.tm_hour) << TIME_HOUR_S) | ||
181 | | (bin2bcd(alrm->time.tm_min) << TIME_MIN_S) | ||
182 | | (bin2bcd(alrm->time.tm_sec)), | ||
183 | vt8500_rtc->regbase + VT8500_RTC_AS); | ||
184 | |||
185 | return 0; | ||
186 | } | ||
187 | |||
188 | static int vt8500_alarm_irq_enable(struct device *dev, unsigned int enabled) | ||
189 | { | ||
190 | struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); | ||
191 | unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_AS); | ||
192 | |||
193 | if (enabled) | ||
194 | tmp |= ALARM_ENABLE_MASK; | ||
195 | else | ||
196 | tmp &= ~ALARM_ENABLE_MASK; | ||
197 | |||
198 | writel(tmp, vt8500_rtc->regbase + VT8500_RTC_AS); | ||
199 | return 0; | ||
200 | } | ||
201 | |||
202 | static int vt8500_update_irq_enable(struct device *dev, unsigned int enabled) | ||
203 | { | ||
204 | struct vt8500_rtc *vt8500_rtc = dev_get_drvdata(dev); | ||
205 | unsigned long tmp = readl(vt8500_rtc->regbase + VT8500_RTC_CR); | ||
206 | |||
207 | if (enabled) | ||
208 | tmp |= VT8500_RTC_CR_SM_SEC | VT8500_RTC_CR_SM_ENABLE; | ||
209 | else | ||
210 | tmp &= ~VT8500_RTC_CR_SM_ENABLE; | ||
211 | |||
212 | writel(tmp, vt8500_rtc->regbase + VT8500_RTC_CR); | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | static const struct rtc_class_ops vt8500_rtc_ops = { | ||
217 | .read_time = vt8500_rtc_read_time, | ||
218 | .set_time = vt8500_rtc_set_time, | ||
219 | .read_alarm = vt8500_rtc_read_alarm, | ||
220 | .set_alarm = vt8500_rtc_set_alarm, | ||
221 | .alarm_irq_enable = vt8500_alarm_irq_enable, | ||
222 | .update_irq_enable = vt8500_update_irq_enable, | ||
223 | }; | ||
224 | |||
225 | static int __devinit vt8500_rtc_probe(struct platform_device *pdev) | ||
226 | { | ||
227 | struct vt8500_rtc *vt8500_rtc; | ||
228 | int ret; | ||
229 | |||
230 | vt8500_rtc = kzalloc(sizeof(struct vt8500_rtc), GFP_KERNEL); | ||
231 | if (!vt8500_rtc) | ||
232 | return -ENOMEM; | ||
233 | |||
234 | spin_lock_init(&vt8500_rtc->lock); | ||
235 | platform_set_drvdata(pdev, vt8500_rtc); | ||
236 | |||
237 | vt8500_rtc->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
238 | if (!vt8500_rtc->res) { | ||
239 | dev_err(&pdev->dev, "No I/O memory resource defined\n"); | ||
240 | ret = -ENXIO; | ||
241 | goto err_free; | ||
242 | } | ||
243 | |||
244 | vt8500_rtc->irq_alarm = platform_get_irq(pdev, 0); | ||
245 | if (vt8500_rtc->irq_alarm < 0) { | ||
246 | dev_err(&pdev->dev, "No alarm IRQ resource defined\n"); | ||
247 | ret = -ENXIO; | ||
248 | goto err_free; | ||
249 | } | ||
250 | |||
251 | vt8500_rtc->irq_hz = platform_get_irq(pdev, 1); | ||
252 | if (vt8500_rtc->irq_hz < 0) { | ||
253 | dev_err(&pdev->dev, "No 1Hz IRQ resource defined\n"); | ||
254 | ret = -ENXIO; | ||
255 | goto err_free; | ||
256 | } | ||
257 | |||
258 | vt8500_rtc->res = request_mem_region(vt8500_rtc->res->start, | ||
259 | resource_size(vt8500_rtc->res), | ||
260 | "vt8500-rtc"); | ||
261 | if (vt8500_rtc->res == NULL) { | ||
262 | dev_err(&pdev->dev, "failed to request I/O memory\n"); | ||
263 | ret = -EBUSY; | ||
264 | goto err_free; | ||
265 | } | ||
266 | |||
267 | vt8500_rtc->regbase = ioremap(vt8500_rtc->res->start, | ||
268 | resource_size(vt8500_rtc->res)); | ||
269 | if (!vt8500_rtc->regbase) { | ||
270 | dev_err(&pdev->dev, "Unable to map RTC I/O memory\n"); | ||
271 | ret = -EBUSY; | ||
272 | goto err_release; | ||
273 | } | ||
274 | |||
275 | /* Enable the second/minute interrupt generation and enable RTC */ | ||
276 | writel(VT8500_RTC_CR_ENABLE | VT8500_RTC_CR_24H | ||
277 | | VT8500_RTC_CR_SM_ENABLE | VT8500_RTC_CR_SM_SEC, | ||
278 | vt8500_rtc->regbase + VT8500_RTC_CR); | ||
279 | |||
280 | vt8500_rtc->rtc = rtc_device_register("vt8500-rtc", &pdev->dev, | ||
281 | &vt8500_rtc_ops, THIS_MODULE); | ||
282 | if (IS_ERR(vt8500_rtc->rtc)) { | ||
283 | ret = PTR_ERR(vt8500_rtc->rtc); | ||
284 | dev_err(&pdev->dev, | ||
285 | "Failed to register RTC device -> %d\n", ret); | ||
286 | goto err_unmap; | ||
287 | } | ||
288 | |||
289 | ret = request_irq(vt8500_rtc->irq_hz, vt8500_rtc_irq, 0, | ||
290 | "rtc 1Hz", vt8500_rtc); | ||
291 | if (ret < 0) { | ||
292 | dev_err(&pdev->dev, "can't get irq %i, err %d\n", | ||
293 | vt8500_rtc->irq_hz, ret); | ||
294 | goto err_unreg; | ||
295 | } | ||
296 | |||
297 | ret = request_irq(vt8500_rtc->irq_alarm, vt8500_rtc_irq, 0, | ||
298 | "rtc alarm", vt8500_rtc); | ||
299 | if (ret < 0) { | ||
300 | dev_err(&pdev->dev, "can't get irq %i, err %d\n", | ||
301 | vt8500_rtc->irq_alarm, ret); | ||
302 | goto err_free_hz; | ||
303 | } | ||
304 | |||
305 | return 0; | ||
306 | |||
307 | err_free_hz: | ||
308 | free_irq(vt8500_rtc->irq_hz, vt8500_rtc); | ||
309 | err_unreg: | ||
310 | rtc_device_unregister(vt8500_rtc->rtc); | ||
311 | err_unmap: | ||
312 | iounmap(vt8500_rtc->regbase); | ||
313 | err_release: | ||
314 | release_mem_region(vt8500_rtc->res->start, | ||
315 | resource_size(vt8500_rtc->res)); | ||
316 | err_free: | ||
317 | kfree(vt8500_rtc); | ||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | static int __devexit vt8500_rtc_remove(struct platform_device *pdev) | ||
322 | { | ||
323 | struct vt8500_rtc *vt8500_rtc = platform_get_drvdata(pdev); | ||
324 | |||
325 | free_irq(vt8500_rtc->irq_alarm, vt8500_rtc); | ||
326 | free_irq(vt8500_rtc->irq_hz, vt8500_rtc); | ||
327 | |||
328 | rtc_device_unregister(vt8500_rtc->rtc); | ||
329 | |||
330 | /* Disable alarm matching */ | ||
331 | writel(0, vt8500_rtc->regbase + VT8500_RTC_IS); | ||
332 | iounmap(vt8500_rtc->regbase); | ||
333 | release_mem_region(vt8500_rtc->res->start, | ||
334 | resource_size(vt8500_rtc->res)); | ||
335 | |||
336 | kfree(vt8500_rtc); | ||
337 | platform_set_drvdata(pdev, NULL); | ||
338 | |||
339 | return 0; | ||
340 | } | ||
341 | |||
342 | static struct platform_driver vt8500_rtc_driver = { | ||
343 | .probe = vt8500_rtc_probe, | ||
344 | .remove = __devexit_p(vt8500_rtc_remove), | ||
345 | .driver = { | ||
346 | .name = "vt8500-rtc", | ||
347 | .owner = THIS_MODULE, | ||
348 | }, | ||
349 | }; | ||
350 | |||
351 | static int __init vt8500_rtc_init(void) | ||
352 | { | ||
353 | return platform_driver_register(&vt8500_rtc_driver); | ||
354 | } | ||
355 | module_init(vt8500_rtc_init); | ||
356 | |||
357 | static void __exit vt8500_rtc_exit(void) | ||
358 | { | ||
359 | platform_driver_unregister(&vt8500_rtc_driver); | ||
360 | } | ||
361 | module_exit(vt8500_rtc_exit); | ||
362 | |||
363 | MODULE_AUTHOR("Alexey Charkov <alchark@gmail.com>"); | ||
364 | MODULE_DESCRIPTION("VIA VT8500 SoC Realtime Clock Driver (RTC)"); | ||
365 | MODULE_LICENSE("GPL"); | ||
366 | MODULE_ALIAS("platform:vt8500-rtc"); | ||
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c index 85dddb1e4126..46784b83c5c4 100644 --- a/drivers/s390/block/dasd_diag.c +++ b/drivers/s390/block/dasd_diag.c | |||
@@ -24,7 +24,7 @@ | |||
24 | #include <asm/debug.h> | 24 | #include <asm/debug.h> |
25 | #include <asm/ebcdic.h> | 25 | #include <asm/ebcdic.h> |
26 | #include <asm/io.h> | 26 | #include <asm/io.h> |
27 | #include <asm/s390_ext.h> | 27 | #include <asm/irq.h> |
28 | #include <asm/vtoc.h> | 28 | #include <asm/vtoc.h> |
29 | #include <asm/diag.h> | 29 | #include <asm/diag.h> |
30 | 30 | ||
@@ -642,7 +642,7 @@ dasd_diag_init(void) | |||
642 | } | 642 | } |
643 | ASCEBC(dasd_diag_discipline.ebcname, 4); | 643 | ASCEBC(dasd_diag_discipline.ebcname, 4); |
644 | 644 | ||
645 | ctl_set_bit(0, 9); | 645 | service_subclass_irq_register(); |
646 | register_external_interrupt(0x2603, dasd_ext_handler); | 646 | register_external_interrupt(0x2603, dasd_ext_handler); |
647 | dasd_diag_discipline_pointer = &dasd_diag_discipline; | 647 | dasd_diag_discipline_pointer = &dasd_diag_discipline; |
648 | return 0; | 648 | return 0; |
@@ -652,7 +652,7 @@ static void __exit | |||
652 | dasd_diag_cleanup(void) | 652 | dasd_diag_cleanup(void) |
653 | { | 653 | { |
654 | unregister_external_interrupt(0x2603, dasd_ext_handler); | 654 | unregister_external_interrupt(0x2603, dasd_ext_handler); |
655 | ctl_clear_bit(0, 9); | 655 | service_subclass_irq_unregister(); |
656 | dasd_diag_discipline_pointer = NULL; | 656 | dasd_diag_discipline_pointer = NULL; |
657 | } | 657 | } |
658 | 658 | ||
diff --git a/drivers/s390/char/sclp.c b/drivers/s390/char/sclp.c index b76c61f82485..eaa7e78186f9 100644 --- a/drivers/s390/char/sclp.c +++ b/drivers/s390/char/sclp.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/suspend.h> | 19 | #include <linux/suspend.h> |
20 | #include <linux/completion.h> | 20 | #include <linux/completion.h> |
21 | #include <linux/platform_device.h> | 21 | #include <linux/platform_device.h> |
22 | #include <asm/s390_ext.h> | ||
23 | #include <asm/types.h> | 22 | #include <asm/types.h> |
24 | #include <asm/irq.h> | 23 | #include <asm/irq.h> |
25 | 24 | ||
@@ -885,12 +884,12 @@ sclp_check_interface(void) | |||
885 | spin_unlock_irqrestore(&sclp_lock, flags); | 884 | spin_unlock_irqrestore(&sclp_lock, flags); |
886 | /* Enable service-signal interruption - needs to happen | 885 | /* Enable service-signal interruption - needs to happen |
887 | * with IRQs enabled. */ | 886 | * with IRQs enabled. */ |
888 | ctl_set_bit(0, 9); | 887 | service_subclass_irq_register(); |
889 | /* Wait for signal from interrupt or timeout */ | 888 | /* Wait for signal from interrupt or timeout */ |
890 | sclp_sync_wait(); | 889 | sclp_sync_wait(); |
891 | /* Disable service-signal interruption - needs to happen | 890 | /* Disable service-signal interruption - needs to happen |
892 | * with IRQs enabled. */ | 891 | * with IRQs enabled. */ |
893 | ctl_clear_bit(0,9); | 892 | service_subclass_irq_unregister(); |
894 | spin_lock_irqsave(&sclp_lock, flags); | 893 | spin_lock_irqsave(&sclp_lock, flags); |
895 | del_timer(&sclp_request_timer); | 894 | del_timer(&sclp_request_timer); |
896 | if (sclp_init_req.status == SCLP_REQ_DONE && | 895 | if (sclp_init_req.status == SCLP_REQ_DONE && |
@@ -1070,7 +1069,7 @@ sclp_init(void) | |||
1070 | spin_unlock_irqrestore(&sclp_lock, flags); | 1069 | spin_unlock_irqrestore(&sclp_lock, flags); |
1071 | /* Enable service-signal external interruption - needs to happen with | 1070 | /* Enable service-signal external interruption - needs to happen with |
1072 | * IRQs enabled. */ | 1071 | * IRQs enabled. */ |
1073 | ctl_set_bit(0, 9); | 1072 | service_subclass_irq_register(); |
1074 | sclp_init_mask(1); | 1073 | sclp_init_mask(1); |
1075 | return 0; | 1074 | return 0; |
1076 | 1075 | ||
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/kvm/kvm_virtio.c index 607998f0b7d8..aec60d55b10d 100644 --- a/drivers/s390/kvm/kvm_virtio.c +++ b/drivers/s390/kvm/kvm_virtio.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <asm/kvm_para.h> | 25 | #include <asm/kvm_para.h> |
26 | #include <asm/kvm_virtio.h> | 26 | #include <asm/kvm_virtio.h> |
27 | #include <asm/setup.h> | 27 | #include <asm/setup.h> |
28 | #include <asm/s390_ext.h> | ||
29 | #include <asm/irq.h> | 28 | #include <asm/irq.h> |
30 | 29 | ||
31 | #define VIRTIO_SUBCODE_64 0x0D00 | 30 | #define VIRTIO_SUBCODE_64 0x0D00 |
@@ -441,7 +440,7 @@ static int __init kvm_devices_init(void) | |||
441 | 440 | ||
442 | INIT_WORK(&hotplug_work, hotplug_devices); | 441 | INIT_WORK(&hotplug_work, hotplug_devices); |
443 | 442 | ||
444 | ctl_set_bit(0, 9); | 443 | service_subclass_irq_register(); |
445 | register_external_interrupt(0x2603, kvm_extint_handler); | 444 | register_external_interrupt(0x2603, kvm_extint_handler); |
446 | 445 | ||
447 | scan_devices(); | 446 | scan_devices(); |
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c index 4ff26521d75f..3382475dc22d 100644 --- a/drivers/scsi/aacraid/linit.c +++ b/drivers/scsi/aacraid/linit.c | |||
@@ -59,7 +59,6 @@ | |||
59 | #ifndef AAC_DRIVER_BRANCH | 59 | #ifndef AAC_DRIVER_BRANCH |
60 | #define AAC_DRIVER_BRANCH "" | 60 | #define AAC_DRIVER_BRANCH "" |
61 | #endif | 61 | #endif |
62 | #define AAC_DRIVER_BUILD_DATE __DATE__ " " __TIME__ | ||
63 | #define AAC_DRIVERNAME "aacraid" | 62 | #define AAC_DRIVERNAME "aacraid" |
64 | 63 | ||
65 | #ifdef AAC_DRIVER_BUILD | 64 | #ifdef AAC_DRIVER_BUILD |
@@ -67,7 +66,7 @@ | |||
67 | #define str(x) _str(x) | 66 | #define str(x) _str(x) |
68 | #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH | 67 | #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION "[" str(AAC_DRIVER_BUILD) "]" AAC_DRIVER_BRANCH |
69 | #else | 68 | #else |
70 | #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH " " AAC_DRIVER_BUILD_DATE | 69 | #define AAC_DRIVER_FULL_VERSION AAC_DRIVER_VERSION AAC_DRIVER_BRANCH |
71 | #endif | 70 | #endif |
72 | 71 | ||
73 | MODULE_AUTHOR("Red Hat Inc and Adaptec"); | 72 | MODULE_AUTHOR("Red Hat Inc and Adaptec"); |
diff --git a/drivers/scsi/in2000.c b/drivers/scsi/in2000.c index 92109b126391..112f1bec7756 100644 --- a/drivers/scsi/in2000.c +++ b/drivers/scsi/in2000.c | |||
@@ -2227,7 +2227,7 @@ static int in2000_proc_info(struct Scsi_Host *instance, char *buf, char **start, | |||
2227 | bp = buf; | 2227 | bp = buf; |
2228 | *bp = '\0'; | 2228 | *bp = '\0'; |
2229 | if (hd->proc & PR_VERSION) { | 2229 | if (hd->proc & PR_VERSION) { |
2230 | sprintf(tbuf, "\nVersion %s - %s. Compiled %s %s", IN2000_VERSION, IN2000_DATE, __DATE__, __TIME__); | 2230 | sprintf(tbuf, "\nVersion %s - %s.", IN2000_VERSION, IN2000_DATE); |
2231 | strcat(bp, tbuf); | 2231 | strcat(bp, tbuf); |
2232 | } | 2232 | } |
2233 | if (hd->proc & PR_INFO) { | 2233 | if (hd->proc & PR_INFO) { |
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c index 7f636b118287..fca6a8953070 100644 --- a/drivers/scsi/pmcraid.c +++ b/drivers/scsi/pmcraid.c | |||
@@ -4252,8 +4252,8 @@ static ssize_t pmcraid_show_drv_version( | |||
4252 | char *buf | 4252 | char *buf |
4253 | ) | 4253 | ) |
4254 | { | 4254 | { |
4255 | return snprintf(buf, PAGE_SIZE, "version: %s, build date: %s\n", | 4255 | return snprintf(buf, PAGE_SIZE, "version: %s\n", |
4256 | PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE); | 4256 | PMCRAID_DRIVER_VERSION); |
4257 | } | 4257 | } |
4258 | 4258 | ||
4259 | static struct device_attribute pmcraid_driver_version_attr = { | 4259 | static struct device_attribute pmcraid_driver_version_attr = { |
@@ -6096,9 +6096,8 @@ static int __init pmcraid_init(void) | |||
6096 | dev_t dev; | 6096 | dev_t dev; |
6097 | int error; | 6097 | int error; |
6098 | 6098 | ||
6099 | pmcraid_info("%s Device Driver version: %s %s\n", | 6099 | pmcraid_info("%s Device Driver version: %s\n", |
6100 | PMCRAID_DRIVER_NAME, | 6100 | PMCRAID_DRIVER_NAME, PMCRAID_DRIVER_VERSION); |
6101 | PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE); | ||
6102 | 6101 | ||
6103 | error = alloc_chrdev_region(&dev, 0, | 6102 | error = alloc_chrdev_region(&dev, 0, |
6104 | PMCRAID_MAX_ADAPTERS, | 6103 | PMCRAID_MAX_ADAPTERS, |
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h index 34e4c915002e..f920baf3ff24 100644 --- a/drivers/scsi/pmcraid.h +++ b/drivers/scsi/pmcraid.h | |||
@@ -43,7 +43,6 @@ | |||
43 | #define PMCRAID_DRIVER_NAME "PMC MaxRAID" | 43 | #define PMCRAID_DRIVER_NAME "PMC MaxRAID" |
44 | #define PMCRAID_DEVFILE "pmcsas" | 44 | #define PMCRAID_DEVFILE "pmcsas" |
45 | #define PMCRAID_DRIVER_VERSION "1.0.3" | 45 | #define PMCRAID_DRIVER_VERSION "1.0.3" |
46 | #define PMCRAID_DRIVER_DATE __DATE__ | ||
47 | 46 | ||
48 | #define PMCRAID_FW_VERSION_1 0x002 | 47 | #define PMCRAID_FW_VERSION_1 0x002 |
49 | 48 | ||
diff --git a/drivers/scsi/wd33c93.c b/drivers/scsi/wd33c93.c index 97ae716134d0..c0ee4ea28a19 100644 --- a/drivers/scsi/wd33c93.c +++ b/drivers/scsi/wd33c93.c | |||
@@ -2051,8 +2051,7 @@ wd33c93_init(struct Scsi_Host *instance, const wd33c93_regs regs, | |||
2051 | for (i = 0; i < MAX_SETUP_ARGS; i++) | 2051 | for (i = 0; i < MAX_SETUP_ARGS; i++) |
2052 | printk("%s,", setup_args[i]); | 2052 | printk("%s,", setup_args[i]); |
2053 | printk("\n"); | 2053 | printk("\n"); |
2054 | printk(" Version %s - %s, Compiled %s at %s\n", | 2054 | printk(" Version %s - %s\n", WD33C93_VERSION, WD33C93_DATE); |
2055 | WD33C93_VERSION, WD33C93_DATE, __DATE__, __TIME__); | ||
2056 | } | 2055 | } |
2057 | 2056 | ||
2058 | int | 2057 | int |
@@ -2132,8 +2131,8 @@ wd33c93_proc_info(struct Scsi_Host *instance, char *buf, char **start, off_t off | |||
2132 | bp = buf; | 2131 | bp = buf; |
2133 | *bp = '\0'; | 2132 | *bp = '\0'; |
2134 | if (hd->proc & PR_VERSION) { | 2133 | if (hd->proc & PR_VERSION) { |
2135 | sprintf(tbuf, "\nVersion %s - %s. Compiled %s %s", | 2134 | sprintf(tbuf, "\nVersion %s - %s.", |
2136 | WD33C93_VERSION, WD33C93_DATE, __DATE__, __TIME__); | 2135 | WD33C93_VERSION, WD33C93_DATE); |
2137 | strcat(bp, tbuf); | 2136 | strcat(bp, tbuf); |
2138 | } | 2137 | } |
2139 | if (hd->proc & PR_INFO) { | 2138 | if (hd->proc & PR_INFO) { |
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig index fc14b8dea0d7..fbd96b29530d 100644 --- a/drivers/spi/Kconfig +++ b/drivers/spi/Kconfig | |||
@@ -271,8 +271,8 @@ config SPI_ORION | |||
271 | This enables using the SPI master controller on the Orion chips. | 271 | This enables using the SPI master controller on the Orion chips. |
272 | 272 | ||
273 | config SPI_PL022 | 273 | config SPI_PL022 |
274 | tristate "ARM AMBA PL022 SSP controller (EXPERIMENTAL)" | 274 | tristate "ARM AMBA PL022 SSP controller" |
275 | depends on ARM_AMBA && EXPERIMENTAL | 275 | depends on ARM_AMBA |
276 | default y if MACH_U300 | 276 | default y if MACH_U300 |
277 | default y if ARCH_REALVIEW | 277 | default y if ARCH_REALVIEW |
278 | default y if INTEGRATOR_IMPD1 | 278 | default y if INTEGRATOR_IMPD1 |
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c index 08de58e7f59f..6a9e58dd36c7 100644 --- a/drivers/spi/amba-pl022.c +++ b/drivers/spi/amba-pl022.c | |||
@@ -24,11 +24,6 @@ | |||
24 | * GNU General Public License for more details. | 24 | * GNU General Public License for more details. |
25 | */ | 25 | */ |
26 | 26 | ||
27 | /* | ||
28 | * TODO: | ||
29 | * - add timeout on polled transfers | ||
30 | */ | ||
31 | |||
32 | #include <linux/init.h> | 27 | #include <linux/init.h> |
33 | #include <linux/module.h> | 28 | #include <linux/module.h> |
34 | #include <linux/device.h> | 29 | #include <linux/device.h> |
@@ -287,6 +282,8 @@ | |||
287 | 282 | ||
288 | #define CLEAR_ALL_INTERRUPTS 0x3 | 283 | #define CLEAR_ALL_INTERRUPTS 0x3 |
289 | 284 | ||
285 | #define SPI_POLLING_TIMEOUT 1000 | ||
286 | |||
290 | 287 | ||
291 | /* | 288 | /* |
292 | * The type of reading going on on this chip | 289 | * The type of reading going on on this chip |
@@ -1063,7 +1060,7 @@ static int __init pl022_dma_probe(struct pl022 *pl022) | |||
1063 | pl022->master_info->dma_filter, | 1060 | pl022->master_info->dma_filter, |
1064 | pl022->master_info->dma_rx_param); | 1061 | pl022->master_info->dma_rx_param); |
1065 | if (!pl022->dma_rx_channel) { | 1062 | if (!pl022->dma_rx_channel) { |
1066 | dev_err(&pl022->adev->dev, "no RX DMA channel!\n"); | 1063 | dev_dbg(&pl022->adev->dev, "no RX DMA channel!\n"); |
1067 | goto err_no_rxchan; | 1064 | goto err_no_rxchan; |
1068 | } | 1065 | } |
1069 | 1066 | ||
@@ -1071,13 +1068,13 @@ static int __init pl022_dma_probe(struct pl022 *pl022) | |||
1071 | pl022->master_info->dma_filter, | 1068 | pl022->master_info->dma_filter, |
1072 | pl022->master_info->dma_tx_param); | 1069 | pl022->master_info->dma_tx_param); |
1073 | if (!pl022->dma_tx_channel) { | 1070 | if (!pl022->dma_tx_channel) { |
1074 | dev_err(&pl022->adev->dev, "no TX DMA channel!\n"); | 1071 | dev_dbg(&pl022->adev->dev, "no TX DMA channel!\n"); |
1075 | goto err_no_txchan; | 1072 | goto err_no_txchan; |
1076 | } | 1073 | } |
1077 | 1074 | ||
1078 | pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); | 1075 | pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); |
1079 | if (!pl022->dummypage) { | 1076 | if (!pl022->dummypage) { |
1080 | dev_err(&pl022->adev->dev, "no DMA dummypage!\n"); | 1077 | dev_dbg(&pl022->adev->dev, "no DMA dummypage!\n"); |
1081 | goto err_no_dummypage; | 1078 | goto err_no_dummypage; |
1082 | } | 1079 | } |
1083 | 1080 | ||
@@ -1093,6 +1090,8 @@ err_no_txchan: | |||
1093 | dma_release_channel(pl022->dma_rx_channel); | 1090 | dma_release_channel(pl022->dma_rx_channel); |
1094 | pl022->dma_rx_channel = NULL; | 1091 | pl022->dma_rx_channel = NULL; |
1095 | err_no_rxchan: | 1092 | err_no_rxchan: |
1093 | dev_err(&pl022->adev->dev, | ||
1094 | "Failed to work in dma mode, work without dma!\n"); | ||
1096 | return -ENODEV; | 1095 | return -ENODEV; |
1097 | } | 1096 | } |
1098 | 1097 | ||
@@ -1378,6 +1377,7 @@ static void do_polling_transfer(struct pl022 *pl022) | |||
1378 | struct spi_transfer *transfer = NULL; | 1377 | struct spi_transfer *transfer = NULL; |
1379 | struct spi_transfer *previous = NULL; | 1378 | struct spi_transfer *previous = NULL; |
1380 | struct chip_data *chip; | 1379 | struct chip_data *chip; |
1380 | unsigned long time, timeout; | ||
1381 | 1381 | ||
1382 | chip = pl022->cur_chip; | 1382 | chip = pl022->cur_chip; |
1383 | message = pl022->cur_msg; | 1383 | message = pl022->cur_msg; |
@@ -1415,9 +1415,19 @@ static void do_polling_transfer(struct pl022 *pl022) | |||
1415 | SSP_CR1(pl022->virtbase)); | 1415 | SSP_CR1(pl022->virtbase)); |
1416 | 1416 | ||
1417 | dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); | 1417 | dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); |
1418 | /* FIXME: insert a timeout so we don't hang here indefinitely */ | 1418 | |
1419 | while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) | 1419 | timeout = jiffies + msecs_to_jiffies(SPI_POLLING_TIMEOUT); |
1420 | while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) { | ||
1421 | time = jiffies; | ||
1420 | readwriter(pl022); | 1422 | readwriter(pl022); |
1423 | if (time_after(time, timeout)) { | ||
1424 | dev_warn(&pl022->adev->dev, | ||
1425 | "%s: timeout!\n", __func__); | ||
1426 | message->state = STATE_ERROR; | ||
1427 | goto out; | ||
1428 | } | ||
1429 | cpu_relax(); | ||
1430 | } | ||
1421 | 1431 | ||
1422 | /* Update total byte transferred */ | 1432 | /* Update total byte transferred */ |
1423 | message->actual_length += pl022->cur_transfer->len; | 1433 | message->actual_length += pl022->cur_transfer->len; |
@@ -1426,7 +1436,7 @@ static void do_polling_transfer(struct pl022 *pl022) | |||
1426 | /* Move to next transfer */ | 1436 | /* Move to next transfer */ |
1427 | message->state = next_transfer(pl022); | 1437 | message->state = next_transfer(pl022); |
1428 | } | 1438 | } |
1429 | 1439 | out: | |
1430 | /* Handle end of message */ | 1440 | /* Handle end of message */ |
1431 | if (message->state == STATE_DONE) | 1441 | if (message->state == STATE_DONE) |
1432 | message->status = 0; | 1442 | message->status = 0; |
@@ -2107,7 +2117,7 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) | |||
2107 | if (platform_info->enable_dma) { | 2117 | if (platform_info->enable_dma) { |
2108 | status = pl022_dma_probe(pl022); | 2118 | status = pl022_dma_probe(pl022); |
2109 | if (status != 0) | 2119 | if (status != 0) |
2110 | goto err_no_dma; | 2120 | platform_info->enable_dma = 0; |
2111 | } | 2121 | } |
2112 | 2122 | ||
2113 | /* Initialize and start queue */ | 2123 | /* Initialize and start queue */ |
@@ -2143,7 +2153,6 @@ pl022_probe(struct amba_device *adev, const struct amba_id *id) | |||
2143 | err_init_queue: | 2153 | err_init_queue: |
2144 | destroy_queue(pl022); | 2154 | destroy_queue(pl022); |
2145 | pl022_dma_remove(pl022); | 2155 | pl022_dma_remove(pl022); |
2146 | err_no_dma: | ||
2147 | free_irq(adev->irq[0], pl022); | 2156 | free_irq(adev->irq[0], pl022); |
2148 | err_no_irq: | 2157 | err_no_irq: |
2149 | clk_put(pl022->clk); | 2158 | clk_put(pl022->clk); |
diff --git a/drivers/spi/dw_spi.c b/drivers/spi/dw_spi.c index 871e337c917f..919fa9d9e16b 100644 --- a/drivers/spi/dw_spi.c +++ b/drivers/spi/dw_spi.c | |||
@@ -58,8 +58,6 @@ struct chip_data { | |||
58 | u8 bits_per_word; | 58 | u8 bits_per_word; |
59 | u16 clk_div; /* baud rate divider */ | 59 | u16 clk_div; /* baud rate divider */ |
60 | u32 speed_hz; /* baud rate */ | 60 | u32 speed_hz; /* baud rate */ |
61 | int (*write)(struct dw_spi *dws); | ||
62 | int (*read)(struct dw_spi *dws); | ||
63 | void (*cs_control)(u32 command); | 61 | void (*cs_control)(u32 command); |
64 | }; | 62 | }; |
65 | 63 | ||
@@ -162,107 +160,70 @@ static inline void mrst_spi_debugfs_remove(struct dw_spi *dws) | |||
162 | } | 160 | } |
163 | #endif /* CONFIG_DEBUG_FS */ | 161 | #endif /* CONFIG_DEBUG_FS */ |
164 | 162 | ||
165 | static void wait_till_not_busy(struct dw_spi *dws) | 163 | /* Return the max entries we can fill into tx fifo */ |
164 | static inline u32 tx_max(struct dw_spi *dws) | ||
166 | { | 165 | { |
167 | unsigned long end = jiffies + 1 + usecs_to_jiffies(5000); | 166 | u32 tx_left, tx_room, rxtx_gap; |
168 | 167 | ||
169 | while (time_before(jiffies, end)) { | 168 | tx_left = (dws->tx_end - dws->tx) / dws->n_bytes; |
170 | if (!(dw_readw(dws, sr) & SR_BUSY)) | 169 | tx_room = dws->fifo_len - dw_readw(dws, txflr); |
171 | return; | ||
172 | cpu_relax(); | ||
173 | } | ||
174 | dev_err(&dws->master->dev, | ||
175 | "DW SPI: Status keeps busy for 5000us after a read/write!\n"); | ||
176 | } | ||
177 | |||
178 | static void flush(struct dw_spi *dws) | ||
179 | { | ||
180 | while (dw_readw(dws, sr) & SR_RF_NOT_EMPT) { | ||
181 | dw_readw(dws, dr); | ||
182 | cpu_relax(); | ||
183 | } | ||
184 | |||
185 | wait_till_not_busy(dws); | ||
186 | } | ||
187 | |||
188 | static int null_writer(struct dw_spi *dws) | ||
189 | { | ||
190 | u8 n_bytes = dws->n_bytes; | ||
191 | 170 | ||
192 | if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL) | 171 | /* |
193 | || (dws->tx == dws->tx_end)) | 172 | * Another concern is about the tx/rx mismatch, we |
194 | return 0; | 173 | * though to use (dws->fifo_len - rxflr - txflr) as |
195 | dw_writew(dws, dr, 0); | 174 | * one maximum value for tx, but it doesn't cover the |
196 | dws->tx += n_bytes; | 175 | * data which is out of tx/rx fifo and inside the |
176 | * shift registers. So a control from sw point of | ||
177 | * view is taken. | ||
178 | */ | ||
179 | rxtx_gap = ((dws->rx_end - dws->rx) - (dws->tx_end - dws->tx)) | ||
180 | / dws->n_bytes; | ||
197 | 181 | ||
198 | wait_till_not_busy(dws); | 182 | return min3(tx_left, tx_room, (u32) (dws->fifo_len - rxtx_gap)); |
199 | return 1; | ||
200 | } | 183 | } |
201 | 184 | ||
202 | static int null_reader(struct dw_spi *dws) | 185 | /* Return the max entries we should read out of rx fifo */ |
186 | static inline u32 rx_max(struct dw_spi *dws) | ||
203 | { | 187 | { |
204 | u8 n_bytes = dws->n_bytes; | 188 | u32 rx_left = (dws->rx_end - dws->rx) / dws->n_bytes; |
205 | 189 | ||
206 | while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT) | 190 | return min(rx_left, (u32)dw_readw(dws, rxflr)); |
207 | && (dws->rx < dws->rx_end)) { | ||
208 | dw_readw(dws, dr); | ||
209 | dws->rx += n_bytes; | ||
210 | } | ||
211 | wait_till_not_busy(dws); | ||
212 | return dws->rx == dws->rx_end; | ||
213 | } | 191 | } |
214 | 192 | ||
215 | static int u8_writer(struct dw_spi *dws) | 193 | static void dw_writer(struct dw_spi *dws) |
216 | { | 194 | { |
217 | if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL) | 195 | u32 max = tx_max(dws); |
218 | || (dws->tx == dws->tx_end)) | 196 | u16 txw = 0; |
219 | return 0; | ||
220 | 197 | ||
221 | dw_writew(dws, dr, *(u8 *)(dws->tx)); | 198 | while (max--) { |
222 | ++dws->tx; | 199 | /* Set the tx word if the transfer's original "tx" is not null */ |
223 | 200 | if (dws->tx_end - dws->len) { | |
224 | wait_till_not_busy(dws); | 201 | if (dws->n_bytes == 1) |
225 | return 1; | 202 | txw = *(u8 *)(dws->tx); |
226 | } | 203 | else |
227 | 204 | txw = *(u16 *)(dws->tx); | |
228 | static int u8_reader(struct dw_spi *dws) | 205 | } |
229 | { | 206 | dw_writew(dws, dr, txw); |
230 | while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT) | 207 | dws->tx += dws->n_bytes; |
231 | && (dws->rx < dws->rx_end)) { | ||
232 | *(u8 *)(dws->rx) = dw_readw(dws, dr); | ||
233 | ++dws->rx; | ||
234 | } | 208 | } |
235 | |||
236 | wait_till_not_busy(dws); | ||
237 | return dws->rx == dws->rx_end; | ||
238 | } | 209 | } |
239 | 210 | ||
240 | static int u16_writer(struct dw_spi *dws) | 211 | static void dw_reader(struct dw_spi *dws) |
241 | { | 212 | { |
242 | if (!(dw_readw(dws, sr) & SR_TF_NOT_FULL) | 213 | u32 max = rx_max(dws); |
243 | || (dws->tx == dws->tx_end)) | 214 | u16 rxw; |
244 | return 0; | ||
245 | 215 | ||
246 | dw_writew(dws, dr, *(u16 *)(dws->tx)); | 216 | while (max--) { |
247 | dws->tx += 2; | 217 | rxw = dw_readw(dws, dr); |
248 | 218 | /* Care rx only if the transfer's original "rx" is not null */ | |
249 | wait_till_not_busy(dws); | 219 | if (dws->rx_end - dws->len) { |
250 | return 1; | 220 | if (dws->n_bytes == 1) |
251 | } | 221 | *(u8 *)(dws->rx) = rxw; |
252 | 222 | else | |
253 | static int u16_reader(struct dw_spi *dws) | 223 | *(u16 *)(dws->rx) = rxw; |
254 | { | 224 | } |
255 | u16 temp; | 225 | dws->rx += dws->n_bytes; |
256 | |||
257 | while ((dw_readw(dws, sr) & SR_RF_NOT_EMPT) | ||
258 | && (dws->rx < dws->rx_end)) { | ||
259 | temp = dw_readw(dws, dr); | ||
260 | *(u16 *)(dws->rx) = temp; | ||
261 | dws->rx += 2; | ||
262 | } | 226 | } |
263 | |||
264 | wait_till_not_busy(dws); | ||
265 | return dws->rx == dws->rx_end; | ||
266 | } | 227 | } |
267 | 228 | ||
268 | static void *next_transfer(struct dw_spi *dws) | 229 | static void *next_transfer(struct dw_spi *dws) |
@@ -334,8 +295,7 @@ static void giveback(struct dw_spi *dws) | |||
334 | 295 | ||
335 | static void int_error_stop(struct dw_spi *dws, const char *msg) | 296 | static void int_error_stop(struct dw_spi *dws, const char *msg) |
336 | { | 297 | { |
337 | /* Stop and reset hw */ | 298 | /* Stop the hw */ |
338 | flush(dws); | ||
339 | spi_enable_chip(dws, 0); | 299 | spi_enable_chip(dws, 0); |
340 | 300 | ||
341 | dev_err(&dws->master->dev, "%s\n", msg); | 301 | dev_err(&dws->master->dev, "%s\n", msg); |
@@ -362,35 +322,28 @@ EXPORT_SYMBOL_GPL(dw_spi_xfer_done); | |||
362 | 322 | ||
363 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) | 323 | static irqreturn_t interrupt_transfer(struct dw_spi *dws) |
364 | { | 324 | { |
365 | u16 irq_status, irq_mask = 0x3f; | 325 | u16 irq_status = dw_readw(dws, isr); |
366 | u32 int_level = dws->fifo_len / 2; | ||
367 | u32 left; | ||
368 | 326 | ||
369 | irq_status = dw_readw(dws, isr) & irq_mask; | ||
370 | /* Error handling */ | 327 | /* Error handling */ |
371 | if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { | 328 | if (irq_status & (SPI_INT_TXOI | SPI_INT_RXOI | SPI_INT_RXUI)) { |
372 | dw_readw(dws, txoicr); | 329 | dw_readw(dws, txoicr); |
373 | dw_readw(dws, rxoicr); | 330 | dw_readw(dws, rxoicr); |
374 | dw_readw(dws, rxuicr); | 331 | dw_readw(dws, rxuicr); |
375 | int_error_stop(dws, "interrupt_transfer: fifo overrun"); | 332 | int_error_stop(dws, "interrupt_transfer: fifo overrun/underrun"); |
376 | return IRQ_HANDLED; | 333 | return IRQ_HANDLED; |
377 | } | 334 | } |
378 | 335 | ||
336 | dw_reader(dws); | ||
337 | if (dws->rx_end == dws->rx) { | ||
338 | spi_mask_intr(dws, SPI_INT_TXEI); | ||
339 | dw_spi_xfer_done(dws); | ||
340 | return IRQ_HANDLED; | ||
341 | } | ||
379 | if (irq_status & SPI_INT_TXEI) { | 342 | if (irq_status & SPI_INT_TXEI) { |
380 | spi_mask_intr(dws, SPI_INT_TXEI); | 343 | spi_mask_intr(dws, SPI_INT_TXEI); |
381 | 344 | dw_writer(dws); | |
382 | left = (dws->tx_end - dws->tx) / dws->n_bytes; | 345 | /* Enable TX irq always, it will be disabled when RX finished */ |
383 | left = (left > int_level) ? int_level : left; | 346 | spi_umask_intr(dws, SPI_INT_TXEI); |
384 | |||
385 | while (left--) | ||
386 | dws->write(dws); | ||
387 | dws->read(dws); | ||
388 | |||
389 | /* Re-enable the IRQ if there is still data left to tx */ | ||
390 | if (dws->tx_end > dws->tx) | ||
391 | spi_umask_intr(dws, SPI_INT_TXEI); | ||
392 | else | ||
393 | dw_spi_xfer_done(dws); | ||
394 | } | 347 | } |
395 | 348 | ||
396 | return IRQ_HANDLED; | 349 | return IRQ_HANDLED; |
@@ -399,15 +352,13 @@ static irqreturn_t interrupt_transfer(struct dw_spi *dws) | |||
399 | static irqreturn_t dw_spi_irq(int irq, void *dev_id) | 352 | static irqreturn_t dw_spi_irq(int irq, void *dev_id) |
400 | { | 353 | { |
401 | struct dw_spi *dws = dev_id; | 354 | struct dw_spi *dws = dev_id; |
402 | u16 irq_status, irq_mask = 0x3f; | 355 | u16 irq_status = dw_readw(dws, isr) & 0x3f; |
403 | 356 | ||
404 | irq_status = dw_readw(dws, isr) & irq_mask; | ||
405 | if (!irq_status) | 357 | if (!irq_status) |
406 | return IRQ_NONE; | 358 | return IRQ_NONE; |
407 | 359 | ||
408 | if (!dws->cur_msg) { | 360 | if (!dws->cur_msg) { |
409 | spi_mask_intr(dws, SPI_INT_TXEI); | 361 | spi_mask_intr(dws, SPI_INT_TXEI); |
410 | /* Never fail */ | ||
411 | return IRQ_HANDLED; | 362 | return IRQ_HANDLED; |
412 | } | 363 | } |
413 | 364 | ||
@@ -417,13 +368,11 @@ static irqreturn_t dw_spi_irq(int irq, void *dev_id) | |||
417 | /* Must be called inside pump_transfers() */ | 368 | /* Must be called inside pump_transfers() */ |
418 | static void poll_transfer(struct dw_spi *dws) | 369 | static void poll_transfer(struct dw_spi *dws) |
419 | { | 370 | { |
420 | while (dws->write(dws)) | 371 | do { |
421 | dws->read(dws); | 372 | dw_writer(dws); |
422 | /* | 373 | dw_reader(dws); |
423 | * There is a possibility that the last word of a transaction | 374 | cpu_relax(); |
424 | * will be lost if data is not ready. Re-read to solve this issue. | 375 | } while (dws->rx_end > dws->rx); |
425 | */ | ||
426 | dws->read(dws); | ||
427 | 376 | ||
428 | dw_spi_xfer_done(dws); | 377 | dw_spi_xfer_done(dws); |
429 | } | 378 | } |
@@ -483,8 +432,6 @@ static void pump_transfers(unsigned long data) | |||
483 | dws->tx_end = dws->tx + transfer->len; | 432 | dws->tx_end = dws->tx + transfer->len; |
484 | dws->rx = transfer->rx_buf; | 433 | dws->rx = transfer->rx_buf; |
485 | dws->rx_end = dws->rx + transfer->len; | 434 | dws->rx_end = dws->rx + transfer->len; |
486 | dws->write = dws->tx ? chip->write : null_writer; | ||
487 | dws->read = dws->rx ? chip->read : null_reader; | ||
488 | dws->cs_change = transfer->cs_change; | 435 | dws->cs_change = transfer->cs_change; |
489 | dws->len = dws->cur_transfer->len; | 436 | dws->len = dws->cur_transfer->len; |
490 | if (chip != dws->prev_chip) | 437 | if (chip != dws->prev_chip) |
@@ -518,20 +465,8 @@ static void pump_transfers(unsigned long data) | |||
518 | 465 | ||
519 | switch (bits) { | 466 | switch (bits) { |
520 | case 8: | 467 | case 8: |
521 | dws->n_bytes = 1; | ||
522 | dws->dma_width = 1; | ||
523 | dws->read = (dws->read != null_reader) ? | ||
524 | u8_reader : null_reader; | ||
525 | dws->write = (dws->write != null_writer) ? | ||
526 | u8_writer : null_writer; | ||
527 | break; | ||
528 | case 16: | 468 | case 16: |
529 | dws->n_bytes = 2; | 469 | dws->n_bytes = dws->dma_width = bits >> 3; |
530 | dws->dma_width = 2; | ||
531 | dws->read = (dws->read != null_reader) ? | ||
532 | u16_reader : null_reader; | ||
533 | dws->write = (dws->write != null_writer) ? | ||
534 | u16_writer : null_writer; | ||
535 | break; | 470 | break; |
536 | default: | 471 | default: |
537 | printk(KERN_ERR "MRST SPI0: unsupported bits:" | 472 | printk(KERN_ERR "MRST SPI0: unsupported bits:" |
@@ -575,7 +510,7 @@ static void pump_transfers(unsigned long data) | |||
575 | txint_level = dws->fifo_len / 2; | 510 | txint_level = dws->fifo_len / 2; |
576 | txint_level = (templen > txint_level) ? txint_level : templen; | 511 | txint_level = (templen > txint_level) ? txint_level : templen; |
577 | 512 | ||
578 | imask |= SPI_INT_TXEI; | 513 | imask |= SPI_INT_TXEI | SPI_INT_TXOI | SPI_INT_RXUI | SPI_INT_RXOI; |
579 | dws->transfer_handler = interrupt_transfer; | 514 | dws->transfer_handler = interrupt_transfer; |
580 | } | 515 | } |
581 | 516 | ||
@@ -733,13 +668,9 @@ static int dw_spi_setup(struct spi_device *spi) | |||
733 | if (spi->bits_per_word <= 8) { | 668 | if (spi->bits_per_word <= 8) { |
734 | chip->n_bytes = 1; | 669 | chip->n_bytes = 1; |
735 | chip->dma_width = 1; | 670 | chip->dma_width = 1; |
736 | chip->read = u8_reader; | ||
737 | chip->write = u8_writer; | ||
738 | } else if (spi->bits_per_word <= 16) { | 671 | } else if (spi->bits_per_word <= 16) { |
739 | chip->n_bytes = 2; | 672 | chip->n_bytes = 2; |
740 | chip->dma_width = 2; | 673 | chip->dma_width = 2; |
741 | chip->read = u16_reader; | ||
742 | chip->write = u16_writer; | ||
743 | } else { | 674 | } else { |
744 | /* Never take >16b case for MRST SPIC */ | 675 | /* Never take >16b case for MRST SPIC */ |
745 | dev_err(&spi->dev, "invalid wordsize\n"); | 676 | dev_err(&spi->dev, "invalid wordsize\n"); |
@@ -851,7 +782,6 @@ static void spi_hw_init(struct dw_spi *dws) | |||
851 | spi_enable_chip(dws, 0); | 782 | spi_enable_chip(dws, 0); |
852 | spi_mask_intr(dws, 0xff); | 783 | spi_mask_intr(dws, 0xff); |
853 | spi_enable_chip(dws, 1); | 784 | spi_enable_chip(dws, 1); |
854 | flush(dws); | ||
855 | 785 | ||
856 | /* | 786 | /* |
857 | * Try to detect the FIFO depth if not set by interface driver, | 787 | * Try to detect the FIFO depth if not set by interface driver, |
diff --git a/drivers/spi/dw_spi.h b/drivers/spi/dw_spi.h index b23e452adaf7..7a5e78d2a5cb 100644 --- a/drivers/spi/dw_spi.h +++ b/drivers/spi/dw_spi.h | |||
@@ -137,8 +137,6 @@ struct dw_spi { | |||
137 | u8 max_bits_per_word; /* maxim is 16b */ | 137 | u8 max_bits_per_word; /* maxim is 16b */ |
138 | u32 dma_width; | 138 | u32 dma_width; |
139 | int cs_change; | 139 | int cs_change; |
140 | int (*write)(struct dw_spi *dws); | ||
141 | int (*read)(struct dw_spi *dws); | ||
142 | irqreturn_t (*transfer_handler)(struct dw_spi *dws); | 140 | irqreturn_t (*transfer_handler)(struct dw_spi *dws); |
143 | void (*cs_control)(u32 command); | 141 | void (*cs_control)(u32 command); |
144 | 142 | ||
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 82b9a428c323..2e13a14bba3f 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c | |||
@@ -1047,8 +1047,8 @@ static u8 *buf; | |||
1047 | * spi_{async,sync}() calls with dma-safe buffers. | 1047 | * spi_{async,sync}() calls with dma-safe buffers. |
1048 | */ | 1048 | */ |
1049 | int spi_write_then_read(struct spi_device *spi, | 1049 | int spi_write_then_read(struct spi_device *spi, |
1050 | const u8 *txbuf, unsigned n_tx, | 1050 | const void *txbuf, unsigned n_tx, |
1051 | u8 *rxbuf, unsigned n_rx) | 1051 | void *rxbuf, unsigned n_rx) |
1052 | { | 1052 | { |
1053 | static DEFINE_MUTEX(lock); | 1053 | static DEFINE_MUTEX(lock); |
1054 | 1054 | ||
diff --git a/drivers/spi/spi_nuc900.c b/drivers/spi/spi_nuc900.c index d5be18b3078c..3cd15f690f16 100644 --- a/drivers/spi/spi_nuc900.c +++ b/drivers/spi/spi_nuc900.c | |||
@@ -463,7 +463,7 @@ static int __devexit nuc900_spi_remove(struct platform_device *dev) | |||
463 | 463 | ||
464 | platform_set_drvdata(dev, NULL); | 464 | platform_set_drvdata(dev, NULL); |
465 | 465 | ||
466 | spi_unregister_master(hw->master); | 466 | spi_bitbang_stop(&hw->bitbang); |
467 | 467 | ||
468 | clk_disable(hw->clk); | 468 | clk_disable(hw->clk); |
469 | clk_put(hw->clk); | 469 | clk_put(hw->clk); |
diff --git a/drivers/spi/spi_s3c24xx.c b/drivers/spi/spi_s3c24xx.c index 151a95e40653..1a5fcabfd565 100644 --- a/drivers/spi/spi_s3c24xx.c +++ b/drivers/spi/spi_s3c24xx.c | |||
@@ -668,7 +668,7 @@ static int __exit s3c24xx_spi_remove(struct platform_device *dev) | |||
668 | 668 | ||
669 | platform_set_drvdata(dev, NULL); | 669 | platform_set_drvdata(dev, NULL); |
670 | 670 | ||
671 | spi_unregister_master(hw->master); | 671 | spi_bitbang_stop(&hw->bitbang); |
672 | 672 | ||
673 | clk_disable(hw->clk); | 673 | clk_disable(hw->clk); |
674 | clk_put(hw->clk); | 674 | clk_put(hw->clk); |
diff --git a/drivers/spi/spi_sh.c b/drivers/spi/spi_sh.c index 869a07d375d6..9eedd71ad898 100644 --- a/drivers/spi/spi_sh.c +++ b/drivers/spi/spi_sh.c | |||
@@ -427,10 +427,10 @@ static int __devexit spi_sh_remove(struct platform_device *pdev) | |||
427 | { | 427 | { |
428 | struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev); | 428 | struct spi_sh_data *ss = dev_get_drvdata(&pdev->dev); |
429 | 429 | ||
430 | spi_unregister_master(ss->master); | ||
430 | destroy_workqueue(ss->workqueue); | 431 | destroy_workqueue(ss->workqueue); |
431 | free_irq(ss->irq, ss); | 432 | free_irq(ss->irq, ss); |
432 | iounmap(ss->addr); | 433 | iounmap(ss->addr); |
433 | spi_master_put(ss->master); | ||
434 | 434 | ||
435 | return 0; | 435 | return 0; |
436 | } | 436 | } |
diff --git a/drivers/spi/spi_tegra.c b/drivers/spi/spi_tegra.c index 891e5909038c..6c3aa6ecaade 100644 --- a/drivers/spi/spi_tegra.c +++ b/drivers/spi/spi_tegra.c | |||
@@ -578,6 +578,7 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev) | |||
578 | master = dev_get_drvdata(&pdev->dev); | 578 | master = dev_get_drvdata(&pdev->dev); |
579 | tspi = spi_master_get_devdata(master); | 579 | tspi = spi_master_get_devdata(master); |
580 | 580 | ||
581 | spi_unregister_master(master); | ||
581 | tegra_dma_free_channel(tspi->rx_dma); | 582 | tegra_dma_free_channel(tspi->rx_dma); |
582 | 583 | ||
583 | dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, | 584 | dma_free_coherent(&pdev->dev, sizeof(u32) * BB_LEN, |
@@ -586,7 +587,6 @@ static int __devexit spi_tegra_remove(struct platform_device *pdev) | |||
586 | clk_put(tspi->clk); | 587 | clk_put(tspi->clk); |
587 | iounmap(tspi->base); | 588 | iounmap(tspi->base); |
588 | 589 | ||
589 | spi_master_put(master); | ||
590 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | 590 | r = platform_get_resource(pdev, IORESOURCE_MEM, 0); |
591 | release_mem_region(r->start, (r->end - r->start) + 1); | 591 | release_mem_region(r->start, (r->end - r->start) + 1); |
592 | 592 | ||
diff --git a/drivers/spi/xilinx_spi.c b/drivers/spi/xilinx_spi.c index c69c6f2c2c5c..4d2c75df886c 100644 --- a/drivers/spi/xilinx_spi.c +++ b/drivers/spi/xilinx_spi.c | |||
@@ -18,7 +18,6 @@ | |||
18 | #include <linux/interrupt.h> | 18 | #include <linux/interrupt.h> |
19 | #include <linux/of.h> | 19 | #include <linux/of.h> |
20 | #include <linux/platform_device.h> | 20 | #include <linux/platform_device.h> |
21 | #include <linux/mfd/core.h> | ||
22 | #include <linux/spi/spi.h> | 21 | #include <linux/spi/spi.h> |
23 | #include <linux/spi/spi_bitbang.h> | 22 | #include <linux/spi/spi_bitbang.h> |
24 | #include <linux/spi/xilinx_spi.h> | 23 | #include <linux/spi/xilinx_spi.h> |
@@ -471,7 +470,7 @@ static int __devinit xilinx_spi_probe(struct platform_device *dev) | |||
471 | struct spi_master *master; | 470 | struct spi_master *master; |
472 | u8 i; | 471 | u8 i; |
473 | 472 | ||
474 | pdata = mfd_get_data(dev); | 473 | pdata = dev->dev.platform_data; |
475 | if (pdata) { | 474 | if (pdata) { |
476 | num_cs = pdata->num_chipselect; | 475 | num_cs = pdata->num_chipselect; |
477 | little_endian = pdata->little_endian; | 476 | little_endian = pdata->little_endian; |
diff --git a/drivers/staging/generic_serial/rio/rioinit.c b/drivers/staging/generic_serial/rio/rioinit.c index 24a282bb89d4..fb62b383f1de 100644 --- a/drivers/staging/generic_serial/rio/rioinit.c +++ b/drivers/staging/generic_serial/rio/rioinit.c | |||
@@ -381,7 +381,7 @@ struct rioVersion *RIOVersid(void) | |||
381 | { | 381 | { |
382 | strlcpy(stVersion.version, "RIO driver for linux V1.0", | 382 | strlcpy(stVersion.version, "RIO driver for linux V1.0", |
383 | sizeof(stVersion.version)); | 383 | sizeof(stVersion.version)); |
384 | strlcpy(stVersion.buildDate, __DATE__, | 384 | strlcpy(stVersion.buildDate, "Aug 15 2010", |
385 | sizeof(stVersion.buildDate)); | 385 | sizeof(stVersion.buildDate)); |
386 | 386 | ||
387 | return &stVersion; | 387 | return &stVersion; |
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c index bfa05e801823..c0e8f2eeb886 100644 --- a/drivers/tty/cyclades.c +++ b/drivers/tty/cyclades.c | |||
@@ -4096,8 +4096,7 @@ static int __init cy_init(void) | |||
4096 | if (!cy_serial_driver) | 4096 | if (!cy_serial_driver) |
4097 | goto err; | 4097 | goto err; |
4098 | 4098 | ||
4099 | printk(KERN_INFO "Cyclades driver " CY_VERSION " (built %s %s)\n", | 4099 | printk(KERN_INFO "Cyclades driver " CY_VERSION "\n"); |
4100 | __DATE__, __TIME__); | ||
4101 | 4100 | ||
4102 | /* Initialize the tty_driver structure */ | 4101 | /* Initialize the tty_driver structure */ |
4103 | 4102 | ||
diff --git a/drivers/tty/nozomi.c b/drivers/tty/nozomi.c index b1aecc7bb32a..fd347ff34d07 100644 --- a/drivers/tty/nozomi.c +++ b/drivers/tty/nozomi.c | |||
@@ -61,8 +61,7 @@ | |||
61 | #include <linux/delay.h> | 61 | #include <linux/delay.h> |
62 | 62 | ||
63 | 63 | ||
64 | #define VERSION_STRING DRIVER_DESC " 2.1d (build date: " \ | 64 | #define VERSION_STRING DRIVER_DESC " 2.1d" |
65 | __DATE__ " " __TIME__ ")" | ||
66 | 65 | ||
67 | /* Macros definitions */ | 66 | /* Macros definitions */ |
68 | 67 | ||
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c index bea5c215460c..84db7321cce8 100644 --- a/drivers/tty/serial/m32r_sio.c +++ b/drivers/tty/serial/m32r_sio.c | |||
@@ -907,9 +907,10 @@ static int m32r_sio_request_port(struct uart_port *port) | |||
907 | return ret; | 907 | return ret; |
908 | } | 908 | } |
909 | 909 | ||
910 | static void m32r_sio_config_port(struct uart_port *port, int flags) | 910 | static void m32r_sio_config_port(struct uart_port *port, int unused) |
911 | { | 911 | { |
912 | struct uart_sio_port *up = (struct uart_sio_port *)port; | 912 | struct uart_sio_port *up = (struct uart_sio_port *)port; |
913 | unsigned long flags; | ||
913 | 914 | ||
914 | spin_lock_irqsave(&up->port.lock, flags); | 915 | spin_lock_irqsave(&up->port.lock, flags); |
915 | 916 | ||
diff --git a/drivers/usb/otg/twl6030-usb.c b/drivers/usb/otg/twl6030-usb.c index 3f2e07011a48..cfb5aa72b196 100644 --- a/drivers/usb/otg/twl6030-usb.c +++ b/drivers/usb/otg/twl6030-usb.c | |||
@@ -100,6 +100,7 @@ struct twl6030_usb { | |||
100 | u8 linkstat; | 100 | u8 linkstat; |
101 | u8 asleep; | 101 | u8 asleep; |
102 | bool irq_enabled; | 102 | bool irq_enabled; |
103 | unsigned long features; | ||
103 | }; | 104 | }; |
104 | 105 | ||
105 | #define xceiv_to_twl(x) container_of((x), struct twl6030_usb, otg) | 106 | #define xceiv_to_twl(x) container_of((x), struct twl6030_usb, otg) |
@@ -204,6 +205,12 @@ static int twl6030_start_srp(struct otg_transceiver *x) | |||
204 | 205 | ||
205 | static int twl6030_usb_ldo_init(struct twl6030_usb *twl) | 206 | static int twl6030_usb_ldo_init(struct twl6030_usb *twl) |
206 | { | 207 | { |
208 | char *regulator_name; | ||
209 | |||
210 | if (twl->features & TWL6025_SUBCLASS) | ||
211 | regulator_name = "ldousb"; | ||
212 | else | ||
213 | regulator_name = "vusb"; | ||
207 | 214 | ||
208 | /* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */ | 215 | /* Set to OTG_REV 1.3 and turn on the ID_WAKEUP_COMP */ |
209 | twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG); | 216 | twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x1, TWL6030_BACKUP_REG); |
@@ -214,7 +221,7 @@ static int twl6030_usb_ldo_init(struct twl6030_usb *twl) | |||
214 | /* Program MISC2 register and set bit VUSB_IN_VBAT */ | 221 | /* Program MISC2 register and set bit VUSB_IN_VBAT */ |
215 | twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2); | 222 | twl6030_writeb(twl, TWL6030_MODULE_ID0 , 0x10, TWL6030_MISC2); |
216 | 223 | ||
217 | twl->usb3v3 = regulator_get(twl->dev, "vusb"); | 224 | twl->usb3v3 = regulator_get(twl->dev, regulator_name); |
218 | if (IS_ERR(twl->usb3v3)) | 225 | if (IS_ERR(twl->usb3v3)) |
219 | return -ENODEV; | 226 | return -ENODEV; |
220 | 227 | ||
@@ -409,6 +416,7 @@ static int __devinit twl6030_usb_probe(struct platform_device *pdev) | |||
409 | twl->dev = &pdev->dev; | 416 | twl->dev = &pdev->dev; |
410 | twl->irq1 = platform_get_irq(pdev, 0); | 417 | twl->irq1 = platform_get_irq(pdev, 0); |
411 | twl->irq2 = platform_get_irq(pdev, 1); | 418 | twl->irq2 = platform_get_irq(pdev, 1); |
419 | twl->features = pdata->features; | ||
412 | twl->otg.dev = twl->dev; | 420 | twl->otg.dev = twl->dev; |
413 | twl->otg.label = "twl6030"; | 421 | twl->otg.label = "twl6030"; |
414 | twl->otg.set_host = twl6030_set_host; | 422 | twl->otg.set_host = twl6030_set_host; |
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c index c8b520e9a11a..c04b94da81f7 100644 --- a/drivers/video/backlight/88pm860x_bl.c +++ b/drivers/video/backlight/88pm860x_bl.c | |||
@@ -16,7 +16,6 @@ | |||
16 | #include <linux/fb.h> | 16 | #include <linux/fb.h> |
17 | #include <linux/i2c.h> | 17 | #include <linux/i2c.h> |
18 | #include <linux/backlight.h> | 18 | #include <linux/backlight.h> |
19 | #include <linux/mfd/core.h> | ||
20 | #include <linux/mfd/88pm860x.h> | 19 | #include <linux/mfd/88pm860x.h> |
21 | 20 | ||
22 | #define MAX_BRIGHTNESS (0xFF) | 21 | #define MAX_BRIGHTNESS (0xFF) |
@@ -168,7 +167,6 @@ static int pm860x_backlight_probe(struct platform_device *pdev) | |||
168 | struct pm860x_backlight_pdata *pdata = NULL; | 167 | struct pm860x_backlight_pdata *pdata = NULL; |
169 | struct pm860x_backlight_data *data; | 168 | struct pm860x_backlight_data *data; |
170 | struct backlight_device *bl; | 169 | struct backlight_device *bl; |
171 | struct mfd_cell *cell; | ||
172 | struct resource *res; | 170 | struct resource *res; |
173 | struct backlight_properties props; | 171 | struct backlight_properties props; |
174 | unsigned char value; | 172 | unsigned char value; |
@@ -181,10 +179,7 @@ static int pm860x_backlight_probe(struct platform_device *pdev) | |||
181 | return -EINVAL; | 179 | return -EINVAL; |
182 | } | 180 | } |
183 | 181 | ||
184 | cell = pdev->dev.platform_data; | 182 | pdata = pdev->dev.platform_data; |
185 | if (cell == NULL) | ||
186 | return -ENODEV; | ||
187 | pdata = cell->mfd_data; | ||
188 | if (pdata == NULL) { | 183 | if (pdata == NULL) { |
189 | dev_err(&pdev->dev, "platform data isn't assigned to " | 184 | dev_err(&pdev->dev, "platform data isn't assigned to " |
190 | "backlight\n"); | 185 | "backlight\n"); |
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c index ea39336addfb..f70bd63b0187 100644 --- a/drivers/video/mb862xx/mb862xxfbdrv.c +++ b/drivers/video/mb862xx/mb862xxfbdrv.c | |||
@@ -16,6 +16,7 @@ | |||
16 | 16 | ||
17 | #include <linux/fb.h> | 17 | #include <linux/fb.h> |
18 | #include <linux/delay.h> | 18 | #include <linux/delay.h> |
19 | #include <linux/uaccess.h> | ||
19 | #include <linux/init.h> | 20 | #include <linux/init.h> |
20 | #include <linux/interrupt.h> | 21 | #include <linux/interrupt.h> |
21 | #include <linux/pci.h> | 22 | #include <linux/pci.h> |
diff --git a/drivers/video/omap/Makefile b/drivers/video/omap/Makefile index 49226a1b909e..25db55696e14 100644 --- a/drivers/video/omap/Makefile +++ b/drivers/video/omap/Makefile | |||
@@ -30,7 +30,6 @@ objs-y$(CONFIG_MACH_OMAP_APOLLON) += lcd_apollon.o | |||
30 | objs-y$(CONFIG_MACH_OMAP_2430SDP) += lcd_2430sdp.o | 30 | objs-y$(CONFIG_MACH_OMAP_2430SDP) += lcd_2430sdp.o |
31 | objs-y$(CONFIG_MACH_OMAP_3430SDP) += lcd_2430sdp.o | 31 | objs-y$(CONFIG_MACH_OMAP_3430SDP) += lcd_2430sdp.o |
32 | objs-y$(CONFIG_MACH_OMAP_LDP) += lcd_ldp.o | 32 | objs-y$(CONFIG_MACH_OMAP_LDP) += lcd_ldp.o |
33 | objs-y$(CONFIG_MACH_OMAP2EVM) += lcd_omap2evm.o | ||
34 | objs-y$(CONFIG_MACH_OMAP3EVM) += lcd_omap3evm.o | 33 | objs-y$(CONFIG_MACH_OMAP3EVM) += lcd_omap3evm.o |
35 | objs-y$(CONFIG_MACH_OMAP3_BEAGLE) += lcd_omap3beagle.o | 34 | objs-y$(CONFIG_MACH_OMAP3_BEAGLE) += lcd_omap3beagle.o |
36 | objs-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o | 35 | objs-y$(CONFIG_FB_OMAP_LCD_MIPID) += lcd_mipid.o |
diff --git a/drivers/video/omap/lcd_omap2evm.c b/drivers/video/omap/lcd_omap2evm.c deleted file mode 100644 index 7e7a65c08452..000000000000 --- a/drivers/video/omap/lcd_omap2evm.c +++ /dev/null | |||
@@ -1,192 +0,0 @@ | |||
1 | /* | ||
2 | * LCD panel support for the MISTRAL OMAP2EVM board | ||
3 | * | ||
4 | * Author: Arun C <arunedarath@mistralsolutions.com> | ||
5 | * | ||
6 | * Derived from drivers/video/omap/lcd_omap3evm.c | ||
7 | * Derived from drivers/video/omap/lcd-apollon.c | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or modify it | ||
10 | * under the terms of the GNU General Public License as published by the | ||
11 | * Free Software Foundation; either version 2 of the License, or (at your | ||
12 | * option) any later version. | ||
13 | * | ||
14 | * This program is distributed in the hope that it will be useful, but | ||
15 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
17 | * General Public License for more details. | ||
18 | * | ||
19 | * You should have received a copy of the GNU General Public License along | ||
20 | * with this program; if not, write to the Free Software Foundation, Inc., | ||
21 | * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. | ||
22 | */ | ||
23 | |||
24 | #include <linux/module.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <linux/gpio.h> | ||
27 | #include <linux/i2c/twl.h> | ||
28 | |||
29 | #include <plat/mux.h> | ||
30 | #include <asm/mach-types.h> | ||
31 | |||
32 | #include "omapfb.h" | ||
33 | |||
34 | #define LCD_PANEL_ENABLE_GPIO 154 | ||
35 | #define LCD_PANEL_LR 128 | ||
36 | #define LCD_PANEL_UD 129 | ||
37 | #define LCD_PANEL_INI 152 | ||
38 | #define LCD_PANEL_QVGA 148 | ||
39 | #define LCD_PANEL_RESB 153 | ||
40 | |||
41 | #define TWL_LED_LEDEN 0x00 | ||
42 | #define TWL_PWMA_PWMAON 0x00 | ||
43 | #define TWL_PWMA_PWMAOFF 0x01 | ||
44 | |||
45 | static unsigned int bklight_level; | ||
46 | |||
47 | static int omap2evm_panel_init(struct lcd_panel *panel, | ||
48 | struct omapfb_device *fbdev) | ||
49 | { | ||
50 | gpio_request(LCD_PANEL_ENABLE_GPIO, "LCD enable"); | ||
51 | gpio_request(LCD_PANEL_LR, "LCD lr"); | ||
52 | gpio_request(LCD_PANEL_UD, "LCD ud"); | ||
53 | gpio_request(LCD_PANEL_INI, "LCD ini"); | ||
54 | gpio_request(LCD_PANEL_QVGA, "LCD qvga"); | ||
55 | gpio_request(LCD_PANEL_RESB, "LCD resb"); | ||
56 | |||
57 | gpio_direction_output(LCD_PANEL_ENABLE_GPIO, 1); | ||
58 | gpio_direction_output(LCD_PANEL_RESB, 1); | ||
59 | gpio_direction_output(LCD_PANEL_INI, 1); | ||
60 | gpio_direction_output(LCD_PANEL_QVGA, 0); | ||
61 | gpio_direction_output(LCD_PANEL_LR, 1); | ||
62 | gpio_direction_output(LCD_PANEL_UD, 1); | ||
63 | |||
64 | twl_i2c_write_u8(TWL4030_MODULE_LED, 0x11, TWL_LED_LEDEN); | ||
65 | twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x01, TWL_PWMA_PWMAON); | ||
66 | twl_i2c_write_u8(TWL4030_MODULE_PWMA, 0x02, TWL_PWMA_PWMAOFF); | ||
67 | bklight_level = 100; | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static void omap2evm_panel_cleanup(struct lcd_panel *panel) | ||
73 | { | ||
74 | gpio_free(LCD_PANEL_RESB); | ||
75 | gpio_free(LCD_PANEL_QVGA); | ||
76 | gpio_free(LCD_PANEL_INI); | ||
77 | gpio_free(LCD_PANEL_UD); | ||
78 | gpio_free(LCD_PANEL_LR); | ||
79 | gpio_free(LCD_PANEL_ENABLE_GPIO); | ||
80 | } | ||
81 | |||
82 | static int omap2evm_panel_enable(struct lcd_panel *panel) | ||
83 | { | ||
84 | gpio_set_value(LCD_PANEL_ENABLE_GPIO, 0); | ||
85 | return 0; | ||
86 | } | ||
87 | |||
88 | static void omap2evm_panel_disable(struct lcd_panel *panel) | ||
89 | { | ||
90 | gpio_set_value(LCD_PANEL_ENABLE_GPIO, 1); | ||
91 | } | ||
92 | |||
93 | static unsigned long omap2evm_panel_get_caps(struct lcd_panel *panel) | ||
94 | { | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | static int omap2evm_bklight_setlevel(struct lcd_panel *panel, | ||
99 | unsigned int level) | ||
100 | { | ||
101 | u8 c; | ||
102 | if ((level >= 0) && (level <= 100)) { | ||
103 | c = (125 * (100 - level)) / 100 + 2; | ||
104 | twl_i2c_write_u8(TWL4030_MODULE_PWMA, c, TWL_PWMA_PWMAOFF); | ||
105 | bklight_level = level; | ||
106 | } | ||
107 | return 0; | ||
108 | } | ||
109 | |||
110 | static unsigned int omap2evm_bklight_getlevel(struct lcd_panel *panel) | ||
111 | { | ||
112 | return bklight_level; | ||
113 | } | ||
114 | |||
115 | static unsigned int omap2evm_bklight_getmaxlevel(struct lcd_panel *panel) | ||
116 | { | ||
117 | return 100; | ||
118 | } | ||
119 | |||
120 | struct lcd_panel omap2evm_panel = { | ||
121 | .name = "omap2evm", | ||
122 | .config = OMAP_LCDC_PANEL_TFT | OMAP_LCDC_INV_VSYNC | | ||
123 | OMAP_LCDC_INV_HSYNC, | ||
124 | |||
125 | .bpp = 16, | ||
126 | .data_lines = 18, | ||
127 | .x_res = 480, | ||
128 | .y_res = 640, | ||
129 | .hsw = 3, | ||
130 | .hfp = 0, | ||
131 | .hbp = 28, | ||
132 | .vsw = 2, | ||
133 | .vfp = 1, | ||
134 | .vbp = 0, | ||
135 | |||
136 | .pixel_clock = 20000, | ||
137 | |||
138 | .init = omap2evm_panel_init, | ||
139 | .cleanup = omap2evm_panel_cleanup, | ||
140 | .enable = omap2evm_panel_enable, | ||
141 | .disable = omap2evm_panel_disable, | ||
142 | .get_caps = omap2evm_panel_get_caps, | ||
143 | .set_bklight_level = omap2evm_bklight_setlevel, | ||
144 | .get_bklight_level = omap2evm_bklight_getlevel, | ||
145 | .get_bklight_max = omap2evm_bklight_getmaxlevel, | ||
146 | }; | ||
147 | |||
148 | static int omap2evm_panel_probe(struct platform_device *pdev) | ||
149 | { | ||
150 | omapfb_register_panel(&omap2evm_panel); | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | static int omap2evm_panel_remove(struct platform_device *pdev) | ||
155 | { | ||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | static int omap2evm_panel_suspend(struct platform_device *pdev, | ||
160 | pm_message_t mesg) | ||
161 | { | ||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | static int omap2evm_panel_resume(struct platform_device *pdev) | ||
166 | { | ||
167 | return 0; | ||
168 | } | ||
169 | |||
170 | struct platform_driver omap2evm_panel_driver = { | ||
171 | .probe = omap2evm_panel_probe, | ||
172 | .remove = omap2evm_panel_remove, | ||
173 | .suspend = omap2evm_panel_suspend, | ||
174 | .resume = omap2evm_panel_resume, | ||
175 | .driver = { | ||
176 | .name = "omap2evm_lcd", | ||
177 | .owner = THIS_MODULE, | ||
178 | }, | ||
179 | }; | ||
180 | |||
181 | static int __init omap2evm_panel_drv_init(void) | ||
182 | { | ||
183 | return platform_driver_register(&omap2evm_panel_driver); | ||
184 | } | ||
185 | |||
186 | static void __exit omap2evm_panel_drv_exit(void) | ||
187 | { | ||
188 | platform_driver_unregister(&omap2evm_panel_driver); | ||
189 | } | ||
190 | |||
191 | module_init(omap2evm_panel_drv_init); | ||
192 | module_exit(omap2evm_panel_drv_exit); | ||
diff --git a/drivers/video/tmiofb.c b/drivers/video/tmiofb.c index 0c341d739604..cd1c4dcef8fd 100644 --- a/drivers/video/tmiofb.c +++ b/drivers/video/tmiofb.c | |||
@@ -250,7 +250,7 @@ static irqreturn_t tmiofb_irq(int irq, void *__info) | |||
250 | */ | 250 | */ |
251 | static int tmiofb_hw_stop(struct platform_device *dev) | 251 | static int tmiofb_hw_stop(struct platform_device *dev) |
252 | { | 252 | { |
253 | struct tmio_fb_data *data = mfd_get_data(dev); | 253 | struct tmio_fb_data *data = dev->dev.platform_data; |
254 | struct fb_info *info = platform_get_drvdata(dev); | 254 | struct fb_info *info = platform_get_drvdata(dev); |
255 | struct tmiofb_par *par = info->par; | 255 | struct tmiofb_par *par = info->par; |
256 | 256 | ||
@@ -311,7 +311,7 @@ static int tmiofb_hw_init(struct platform_device *dev) | |||
311 | */ | 311 | */ |
312 | static void tmiofb_hw_mode(struct platform_device *dev) | 312 | static void tmiofb_hw_mode(struct platform_device *dev) |
313 | { | 313 | { |
314 | struct tmio_fb_data *data = mfd_get_data(dev); | 314 | struct tmio_fb_data *data = dev->dev.platform_data; |
315 | struct fb_info *info = platform_get_drvdata(dev); | 315 | struct fb_info *info = platform_get_drvdata(dev); |
316 | struct fb_videomode *mode = info->mode; | 316 | struct fb_videomode *mode = info->mode; |
317 | struct tmiofb_par *par = info->par; | 317 | struct tmiofb_par *par = info->par; |
@@ -557,8 +557,7 @@ static int tmiofb_ioctl(struct fb_info *fbi, | |||
557 | static struct fb_videomode * | 557 | static struct fb_videomode * |
558 | tmiofb_find_mode(struct fb_info *info, struct fb_var_screeninfo *var) | 558 | tmiofb_find_mode(struct fb_info *info, struct fb_var_screeninfo *var) |
559 | { | 559 | { |
560 | struct tmio_fb_data *data = | 560 | struct tmio_fb_data *data = info->device->platform_data; |
561 | mfd_get_data(to_platform_device(info->device)); | ||
562 | struct fb_videomode *best = NULL; | 561 | struct fb_videomode *best = NULL; |
563 | int i; | 562 | int i; |
564 | 563 | ||
@@ -578,8 +577,7 @@ static int tmiofb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) | |||
578 | { | 577 | { |
579 | 578 | ||
580 | struct fb_videomode *mode; | 579 | struct fb_videomode *mode; |
581 | struct tmio_fb_data *data = | 580 | struct tmio_fb_data *data = info->device->platform_data; |
582 | mfd_get_data(to_platform_device(info->device)); | ||
583 | 581 | ||
584 | mode = tmiofb_find_mode(info, var); | 582 | mode = tmiofb_find_mode(info, var); |
585 | if (!mode || var->bits_per_pixel > 16) | 583 | if (!mode || var->bits_per_pixel > 16) |
@@ -680,7 +678,7 @@ static struct fb_ops tmiofb_ops = { | |||
680 | static int __devinit tmiofb_probe(struct platform_device *dev) | 678 | static int __devinit tmiofb_probe(struct platform_device *dev) |
681 | { | 679 | { |
682 | const struct mfd_cell *cell = mfd_get_cell(dev); | 680 | const struct mfd_cell *cell = mfd_get_cell(dev); |
683 | struct tmio_fb_data *data = mfd_get_data(dev); | 681 | struct tmio_fb_data *data = dev->dev.platform_data; |
684 | struct resource *ccr = platform_get_resource(dev, IORESOURCE_MEM, 1); | 682 | struct resource *ccr = platform_get_resource(dev, IORESOURCE_MEM, 1); |
685 | struct resource *lcr = platform_get_resource(dev, IORESOURCE_MEM, 0); | 683 | struct resource *lcr = platform_get_resource(dev, IORESOURCE_MEM, 0); |
686 | struct resource *vram = platform_get_resource(dev, IORESOURCE_MEM, 2); | 684 | struct resource *vram = platform_get_resource(dev, IORESOURCE_MEM, 2); |
diff --git a/drivers/video/via/via-gpio.c b/drivers/video/via/via-gpio.c index c2a0a1cfd3b3..ab5341814c74 100644 --- a/drivers/video/via/via-gpio.c +++ b/drivers/video/via/via-gpio.c | |||
@@ -145,7 +145,7 @@ static int via_gpio_get(struct gpio_chip *chip, unsigned int nr) | |||
145 | } | 145 | } |
146 | 146 | ||
147 | 147 | ||
148 | static struct viafb_gpio_cfg gpio_config = { | 148 | static struct viafb_gpio_cfg viafb_gpio_config = { |
149 | .gpio_chip = { | 149 | .gpio_chip = { |
150 | .label = "VIAFB onboard GPIO", | 150 | .label = "VIAFB onboard GPIO", |
151 | .owner = THIS_MODULE, | 151 | .owner = THIS_MODULE, |
@@ -183,8 +183,8 @@ static int viafb_gpio_resume(void *private) | |||
183 | { | 183 | { |
184 | int i; | 184 | int i; |
185 | 185 | ||
186 | for (i = 0; i < gpio_config.gpio_chip.ngpio; i += 2) | 186 | for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i += 2) |
187 | viafb_gpio_enable(gpio_config.active_gpios[i]); | 187 | viafb_gpio_enable(viafb_gpio_config.active_gpios[i]); |
188 | return 0; | 188 | return 0; |
189 | } | 189 | } |
190 | 190 | ||
@@ -201,9 +201,9 @@ int viafb_gpio_lookup(const char *name) | |||
201 | { | 201 | { |
202 | int i; | 202 | int i; |
203 | 203 | ||
204 | for (i = 0; i < gpio_config.gpio_chip.ngpio; i++) | 204 | for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i++) |
205 | if (!strcmp(name, gpio_config.active_gpios[i]->vg_name)) | 205 | if (!strcmp(name, viafb_gpio_config.active_gpios[i]->vg_name)) |
206 | return gpio_config.gpio_chip.base + i; | 206 | return viafb_gpio_config.gpio_chip.base + i; |
207 | return -1; | 207 | return -1; |
208 | } | 208 | } |
209 | EXPORT_SYMBOL_GPL(viafb_gpio_lookup); | 209 | EXPORT_SYMBOL_GPL(viafb_gpio_lookup); |
@@ -229,14 +229,15 @@ static __devinit int viafb_gpio_probe(struct platform_device *platdev) | |||
229 | for (gpio = viafb_all_gpios; | 229 | for (gpio = viafb_all_gpios; |
230 | gpio < viafb_all_gpios + VIAFB_NUM_GPIOS; gpio++) | 230 | gpio < viafb_all_gpios + VIAFB_NUM_GPIOS; gpio++) |
231 | if (gpio->vg_port_index == port_cfg[i].ioport_index) { | 231 | if (gpio->vg_port_index == port_cfg[i].ioport_index) { |
232 | gpio_config.active_gpios[ngpio] = gpio; | 232 | viafb_gpio_config.active_gpios[ngpio] = gpio; |
233 | gpio_config.gpio_names[ngpio] = gpio->vg_name; | 233 | viafb_gpio_config.gpio_names[ngpio] = |
234 | gpio->vg_name; | ||
234 | ngpio++; | 235 | ngpio++; |
235 | } | 236 | } |
236 | } | 237 | } |
237 | gpio_config.gpio_chip.ngpio = ngpio; | 238 | viafb_gpio_config.gpio_chip.ngpio = ngpio; |
238 | gpio_config.gpio_chip.names = gpio_config.gpio_names; | 239 | viafb_gpio_config.gpio_chip.names = viafb_gpio_config.gpio_names; |
239 | gpio_config.vdev = vdev; | 240 | viafb_gpio_config.vdev = vdev; |
240 | if (ngpio == 0) { | 241 | if (ngpio == 0) { |
241 | printk(KERN_INFO "viafb: no GPIOs configured\n"); | 242 | printk(KERN_INFO "viafb: no GPIOs configured\n"); |
242 | return 0; | 243 | return 0; |
@@ -245,18 +246,18 @@ static __devinit int viafb_gpio_probe(struct platform_device *platdev) | |||
245 | * Enable the ports. They come in pairs, with a single | 246 | * Enable the ports. They come in pairs, with a single |
246 | * enable bit for both. | 247 | * enable bit for both. |
247 | */ | 248 | */ |
248 | spin_lock_irqsave(&gpio_config.vdev->reg_lock, flags); | 249 | spin_lock_irqsave(&viafb_gpio_config.vdev->reg_lock, flags); |
249 | for (i = 0; i < ngpio; i += 2) | 250 | for (i = 0; i < ngpio; i += 2) |
250 | viafb_gpio_enable(gpio_config.active_gpios[i]); | 251 | viafb_gpio_enable(viafb_gpio_config.active_gpios[i]); |
251 | spin_unlock_irqrestore(&gpio_config.vdev->reg_lock, flags); | 252 | spin_unlock_irqrestore(&viafb_gpio_config.vdev->reg_lock, flags); |
252 | /* | 253 | /* |
253 | * Get registered. | 254 | * Get registered. |
254 | */ | 255 | */ |
255 | gpio_config.gpio_chip.base = -1; /* Dynamic */ | 256 | viafb_gpio_config.gpio_chip.base = -1; /* Dynamic */ |
256 | ret = gpiochip_add(&gpio_config.gpio_chip); | 257 | ret = gpiochip_add(&viafb_gpio_config.gpio_chip); |
257 | if (ret) { | 258 | if (ret) { |
258 | printk(KERN_ERR "viafb: failed to add gpios (%d)\n", ret); | 259 | printk(KERN_ERR "viafb: failed to add gpios (%d)\n", ret); |
259 | gpio_config.gpio_chip.ngpio = 0; | 260 | viafb_gpio_config.gpio_chip.ngpio = 0; |
260 | } | 261 | } |
261 | #ifdef CONFIG_PM | 262 | #ifdef CONFIG_PM |
262 | viafb_pm_register(&viafb_gpio_pm_hooks); | 263 | viafb_pm_register(&viafb_gpio_pm_hooks); |
@@ -277,8 +278,8 @@ static int viafb_gpio_remove(struct platform_device *platdev) | |||
277 | /* | 278 | /* |
278 | * Get unregistered. | 279 | * Get unregistered. |
279 | */ | 280 | */ |
280 | if (gpio_config.gpio_chip.ngpio > 0) { | 281 | if (viafb_gpio_config.gpio_chip.ngpio > 0) { |
281 | ret = gpiochip_remove(&gpio_config.gpio_chip); | 282 | ret = gpiochip_remove(&viafb_gpio_config.gpio_chip); |
282 | if (ret) { /* Somebody still using it? */ | 283 | if (ret) { /* Somebody still using it? */ |
283 | printk(KERN_ERR "Viafb: GPIO remove failed\n"); | 284 | printk(KERN_ERR "Viafb: GPIO remove failed\n"); |
284 | return ret; | 285 | return ret; |
@@ -287,11 +288,11 @@ static int viafb_gpio_remove(struct platform_device *platdev) | |||
287 | /* | 288 | /* |
288 | * Disable the ports. | 289 | * Disable the ports. |
289 | */ | 290 | */ |
290 | spin_lock_irqsave(&gpio_config.vdev->reg_lock, flags); | 291 | spin_lock_irqsave(&viafb_gpio_config.vdev->reg_lock, flags); |
291 | for (i = 0; i < gpio_config.gpio_chip.ngpio; i += 2) | 292 | for (i = 0; i < viafb_gpio_config.gpio_chip.ngpio; i += 2) |
292 | viafb_gpio_disable(gpio_config.active_gpios[i]); | 293 | viafb_gpio_disable(viafb_gpio_config.active_gpios[i]); |
293 | gpio_config.gpio_chip.ngpio = 0; | 294 | viafb_gpio_config.gpio_chip.ngpio = 0; |
294 | spin_unlock_irqrestore(&gpio_config.vdev->reg_lock, flags); | 295 | spin_unlock_irqrestore(&viafb_gpio_config.vdev->reg_lock, flags); |
295 | return ret; | 296 | return ret; |
296 | } | 297 | } |
297 | 298 | ||
diff --git a/drivers/w1/masters/Kconfig b/drivers/w1/masters/Kconfig index 7c608c5ccf84..00d615d7aa21 100644 --- a/drivers/w1/masters/Kconfig +++ b/drivers/w1/masters/Kconfig | |||
@@ -42,7 +42,7 @@ config W1_MASTER_MXC | |||
42 | 42 | ||
43 | config W1_MASTER_DS1WM | 43 | config W1_MASTER_DS1WM |
44 | tristate "Maxim DS1WM 1-wire busmaster" | 44 | tristate "Maxim DS1WM 1-wire busmaster" |
45 | depends on W1 && ARM && HAVE_CLK | 45 | depends on W1 |
46 | help | 46 | help |
47 | Say Y here to enable the DS1WM 1-wire driver, such as that | 47 | Say Y here to enable the DS1WM 1-wire driver, such as that |
48 | in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like | 48 | in HP iPAQ devices like h5xxx, h2200, and ASIC3-based like |
diff --git a/drivers/w1/masters/ds1wm.c b/drivers/w1/masters/ds1wm.c index 2f4fa02744a5..ad57593d224a 100644 --- a/drivers/w1/masters/ds1wm.c +++ b/drivers/w1/masters/ds1wm.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #define DS1WM_INT 0x02 /* R/W interrupt status */ | 33 | #define DS1WM_INT 0x02 /* R/W interrupt status */ |
34 | #define DS1WM_INT_EN 0x03 /* R/W interrupt enable */ | 34 | #define DS1WM_INT_EN 0x03 /* R/W interrupt enable */ |
35 | #define DS1WM_CLKDIV 0x04 /* R/W 5 bits of divisor and pre-scale */ | 35 | #define DS1WM_CLKDIV 0x04 /* R/W 5 bits of divisor and pre-scale */ |
36 | #define DS1WM_CNTRL 0x05 /* R/W master control register (not used yet) */ | ||
36 | 37 | ||
37 | #define DS1WM_CMD_1W_RESET (1 << 0) /* force reset on 1-wire bus */ | 38 | #define DS1WM_CMD_1W_RESET (1 << 0) /* force reset on 1-wire bus */ |
38 | #define DS1WM_CMD_SRA (1 << 1) /* enable Search ROM accelerator mode */ | 39 | #define DS1WM_CMD_SRA (1 << 1) /* enable Search ROM accelerator mode */ |
@@ -56,6 +57,7 @@ | |||
56 | #define DS1WM_INTEN_ERSRF (1 << 5) /* enable rx shift register full int */ | 57 | #define DS1WM_INTEN_ERSRF (1 << 5) /* enable rx shift register full int */ |
57 | #define DS1WM_INTEN_DQO (1 << 6) /* enable direct bus driving ops */ | 58 | #define DS1WM_INTEN_DQO (1 << 6) /* enable direct bus driving ops */ |
58 | 59 | ||
60 | #define DS1WM_INTEN_NOT_IAS (~DS1WM_INTEN_IAS) /* all but INTR active state */ | ||
59 | 61 | ||
60 | #define DS1WM_TIMEOUT (HZ * 5) | 62 | #define DS1WM_TIMEOUT (HZ * 5) |
61 | 63 | ||
@@ -63,41 +65,50 @@ static struct { | |||
63 | unsigned long freq; | 65 | unsigned long freq; |
64 | unsigned long divisor; | 66 | unsigned long divisor; |
65 | } freq[] = { | 67 | } freq[] = { |
66 | { 4000000, 0x8 }, | 68 | { 1000000, 0x80 }, |
67 | { 5000000, 0x2 }, | 69 | { 2000000, 0x84 }, |
68 | { 6000000, 0x5 }, | 70 | { 3000000, 0x81 }, |
69 | { 7000000, 0x3 }, | 71 | { 4000000, 0x88 }, |
70 | { 8000000, 0xc }, | 72 | { 5000000, 0x82 }, |
71 | { 10000000, 0x6 }, | 73 | { 6000000, 0x85 }, |
72 | { 12000000, 0x9 }, | 74 | { 7000000, 0x83 }, |
73 | { 14000000, 0x7 }, | 75 | { 8000000, 0x8c }, |
74 | { 16000000, 0x10 }, | 76 | { 10000000, 0x86 }, |
75 | { 20000000, 0xa }, | 77 | { 12000000, 0x89 }, |
76 | { 24000000, 0xd }, | 78 | { 14000000, 0x87 }, |
77 | { 28000000, 0xb }, | 79 | { 16000000, 0x90 }, |
78 | { 32000000, 0x14 }, | 80 | { 20000000, 0x8a }, |
79 | { 40000000, 0xe }, | 81 | { 24000000, 0x8d }, |
80 | { 48000000, 0x11 }, | 82 | { 28000000, 0x8b }, |
81 | { 56000000, 0xf }, | 83 | { 32000000, 0x94 }, |
82 | { 64000000, 0x18 }, | 84 | { 40000000, 0x8e }, |
83 | { 80000000, 0x12 }, | 85 | { 48000000, 0x91 }, |
84 | { 96000000, 0x15 }, | 86 | { 56000000, 0x8f }, |
85 | { 112000000, 0x13 }, | 87 | { 64000000, 0x98 }, |
86 | { 128000000, 0x1c }, | 88 | { 80000000, 0x92 }, |
89 | { 96000000, 0x95 }, | ||
90 | { 112000000, 0x93 }, | ||
91 | { 128000000, 0x9c }, | ||
92 | /* you can continue this table, consult the OPERATION - CLOCK DIVISOR | ||
93 | section of the ds1wm spec sheet. */ | ||
87 | }; | 94 | }; |
88 | 95 | ||
89 | struct ds1wm_data { | 96 | struct ds1wm_data { |
90 | void __iomem *map; | 97 | void __iomem *map; |
91 | int bus_shift; /* # of shifts to calc register offsets */ | 98 | int bus_shift; /* # of shifts to calc register offsets */ |
92 | struct platform_device *pdev; | 99 | struct platform_device *pdev; |
93 | const struct mfd_cell *cell; | 100 | const struct mfd_cell *cell; |
94 | int irq; | 101 | int irq; |
95 | int active_high; | 102 | int slave_present; |
96 | int slave_present; | 103 | void *reset_complete; |
97 | void *reset_complete; | 104 | void *read_complete; |
98 | void *read_complete; | 105 | void *write_complete; |
99 | void *write_complete; | 106 | int read_error; |
100 | u8 read_byte; /* last byte received */ | 107 | /* last byte received */ |
108 | u8 read_byte; | ||
109 | /* byte to write that makes all intr disabled, */ | ||
110 | /* considering active_state (IAS) (optimization) */ | ||
111 | u8 int_en_reg_none; | ||
101 | }; | 112 | }; |
102 | 113 | ||
103 | static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, | 114 | static inline void ds1wm_write_register(struct ds1wm_data *ds1wm_data, u32 reg, |
@@ -115,23 +126,39 @@ static inline u8 ds1wm_read_register(struct ds1wm_data *ds1wm_data, u32 reg) | |||
115 | static irqreturn_t ds1wm_isr(int isr, void *data) | 126 | static irqreturn_t ds1wm_isr(int isr, void *data) |
116 | { | 127 | { |
117 | struct ds1wm_data *ds1wm_data = data; | 128 | struct ds1wm_data *ds1wm_data = data; |
118 | u8 intr = ds1wm_read_register(ds1wm_data, DS1WM_INT); | 129 | u8 intr; |
130 | u8 inten = ds1wm_read_register(ds1wm_data, DS1WM_INT_EN); | ||
131 | /* if no bits are set in int enable register (except the IAS) | ||
132 | than go no further, reading the regs below has side effects */ | ||
133 | if (!(inten & DS1WM_INTEN_NOT_IAS)) | ||
134 | return IRQ_NONE; | ||
119 | 135 | ||
120 | ds1wm_data->slave_present = (intr & DS1WM_INT_PDR) ? 0 : 1; | 136 | ds1wm_write_register(ds1wm_data, |
137 | DS1WM_INT_EN, ds1wm_data->int_en_reg_none); | ||
121 | 138 | ||
122 | if ((intr & DS1WM_INT_PD) && ds1wm_data->reset_complete) | 139 | /* this read action clears the INTR and certain flags in ds1wm */ |
123 | complete(ds1wm_data->reset_complete); | 140 | intr = ds1wm_read_register(ds1wm_data, DS1WM_INT); |
124 | 141 | ||
125 | if ((intr & DS1WM_INT_TSRE) && ds1wm_data->write_complete) | 142 | ds1wm_data->slave_present = (intr & DS1WM_INT_PDR) ? 0 : 1; |
126 | complete(ds1wm_data->write_complete); | ||
127 | 143 | ||
144 | if ((intr & DS1WM_INT_TSRE) && ds1wm_data->write_complete) { | ||
145 | inten &= ~DS1WM_INTEN_ETMT; | ||
146 | complete(ds1wm_data->write_complete); | ||
147 | } | ||
128 | if (intr & DS1WM_INT_RBF) { | 148 | if (intr & DS1WM_INT_RBF) { |
149 | /* this read clears the RBF flag */ | ||
129 | ds1wm_data->read_byte = ds1wm_read_register(ds1wm_data, | 150 | ds1wm_data->read_byte = ds1wm_read_register(ds1wm_data, |
130 | DS1WM_DATA); | 151 | DS1WM_DATA); |
152 | inten &= ~DS1WM_INTEN_ERBF; | ||
131 | if (ds1wm_data->read_complete) | 153 | if (ds1wm_data->read_complete) |
132 | complete(ds1wm_data->read_complete); | 154 | complete(ds1wm_data->read_complete); |
133 | } | 155 | } |
156 | if ((intr & DS1WM_INT_PD) && ds1wm_data->reset_complete) { | ||
157 | inten &= ~DS1WM_INTEN_EPD; | ||
158 | complete(ds1wm_data->reset_complete); | ||
159 | } | ||
134 | 160 | ||
161 | ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, inten); | ||
135 | return IRQ_HANDLED; | 162 | return IRQ_HANDLED; |
136 | } | 163 | } |
137 | 164 | ||
@@ -142,33 +169,19 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data) | |||
142 | 169 | ||
143 | ds1wm_data->reset_complete = &reset_done; | 170 | ds1wm_data->reset_complete = &reset_done; |
144 | 171 | ||
172 | /* enable Presence detect only */ | ||
145 | ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, DS1WM_INTEN_EPD | | 173 | ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, DS1WM_INTEN_EPD | |
146 | (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); | 174 | ds1wm_data->int_en_reg_none); |
147 | 175 | ||
148 | ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_1W_RESET); | 176 | ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_1W_RESET); |
149 | 177 | ||
150 | timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); | 178 | timeleft = wait_for_completion_timeout(&reset_done, DS1WM_TIMEOUT); |
151 | ds1wm_data->reset_complete = NULL; | 179 | ds1wm_data->reset_complete = NULL; |
152 | if (!timeleft) { | 180 | if (!timeleft) { |
153 | dev_err(&ds1wm_data->pdev->dev, "reset failed\n"); | 181 | dev_err(&ds1wm_data->pdev->dev, "reset failed, timed out\n"); |
154 | return 1; | 182 | return 1; |
155 | } | 183 | } |
156 | 184 | ||
157 | /* Wait for the end of the reset. According to the specs, the time | ||
158 | * from when the interrupt is asserted to the end of the reset is: | ||
159 | * tRSTH - tPDH - tPDL - tPDI | ||
160 | * 625 us - 60 us - 240 us - 100 ns = 324.9 us | ||
161 | * | ||
162 | * We'll wait a bit longer just to be sure. | ||
163 | * Was udelay(500), but if it is going to busywait the cpu that long, | ||
164 | * might as well come back later. | ||
165 | */ | ||
166 | msleep(1); | ||
167 | |||
168 | ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, | ||
169 | DS1WM_INTEN_ERBF | DS1WM_INTEN_ETMT | DS1WM_INTEN_EPD | | ||
170 | (ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0)); | ||
171 | |||
172 | if (!ds1wm_data->slave_present) { | 185 | if (!ds1wm_data->slave_present) { |
173 | dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); | 186 | dev_dbg(&ds1wm_data->pdev->dev, "reset: no devices found\n"); |
174 | return 1; | 187 | return 1; |
@@ -179,26 +192,47 @@ static int ds1wm_reset(struct ds1wm_data *ds1wm_data) | |||
179 | 192 | ||
180 | static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) | 193 | static int ds1wm_write(struct ds1wm_data *ds1wm_data, u8 data) |
181 | { | 194 | { |
195 | unsigned long timeleft; | ||
182 | DECLARE_COMPLETION_ONSTACK(write_done); | 196 | DECLARE_COMPLETION_ONSTACK(write_done); |
183 | ds1wm_data->write_complete = &write_done; | 197 | ds1wm_data->write_complete = &write_done; |
184 | 198 | ||
199 | ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, | ||
200 | ds1wm_data->int_en_reg_none | DS1WM_INTEN_ETMT); | ||
201 | |||
185 | ds1wm_write_register(ds1wm_data, DS1WM_DATA, data); | 202 | ds1wm_write_register(ds1wm_data, DS1WM_DATA, data); |
186 | 203 | ||
187 | wait_for_completion_timeout(&write_done, DS1WM_TIMEOUT); | 204 | timeleft = wait_for_completion_timeout(&write_done, DS1WM_TIMEOUT); |
205 | |||
188 | ds1wm_data->write_complete = NULL; | 206 | ds1wm_data->write_complete = NULL; |
207 | if (!timeleft) { | ||
208 | dev_err(&ds1wm_data->pdev->dev, "write failed, timed out\n"); | ||
209 | return -ETIMEDOUT; | ||
210 | } | ||
189 | 211 | ||
190 | return 0; | 212 | return 0; |
191 | } | 213 | } |
192 | 214 | ||
193 | static int ds1wm_read(struct ds1wm_data *ds1wm_data, unsigned char write_data) | 215 | static u8 ds1wm_read(struct ds1wm_data *ds1wm_data, unsigned char write_data) |
194 | { | 216 | { |
217 | unsigned long timeleft; | ||
218 | u8 intEnable = DS1WM_INTEN_ERBF | ds1wm_data->int_en_reg_none; | ||
195 | DECLARE_COMPLETION_ONSTACK(read_done); | 219 | DECLARE_COMPLETION_ONSTACK(read_done); |
220 | |||
221 | ds1wm_read_register(ds1wm_data, DS1WM_DATA); | ||
222 | |||
196 | ds1wm_data->read_complete = &read_done; | 223 | ds1wm_data->read_complete = &read_done; |
224 | ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, intEnable); | ||
197 | 225 | ||
198 | ds1wm_write(ds1wm_data, write_data); | 226 | ds1wm_write_register(ds1wm_data, DS1WM_DATA, write_data); |
199 | wait_for_completion_timeout(&read_done, DS1WM_TIMEOUT); | 227 | timeleft = wait_for_completion_timeout(&read_done, DS1WM_TIMEOUT); |
200 | ds1wm_data->read_complete = NULL; | ||
201 | 228 | ||
229 | ds1wm_data->read_complete = NULL; | ||
230 | if (!timeleft) { | ||
231 | dev_err(&ds1wm_data->pdev->dev, "read failed, timed out\n"); | ||
232 | ds1wm_data->read_error = -ETIMEDOUT; | ||
233 | return 0xFF; | ||
234 | } | ||
235 | ds1wm_data->read_error = 0; | ||
202 | return ds1wm_data->read_byte; | 236 | return ds1wm_data->read_byte; |
203 | } | 237 | } |
204 | 238 | ||
@@ -206,8 +240,8 @@ static int ds1wm_find_divisor(int gclk) | |||
206 | { | 240 | { |
207 | int i; | 241 | int i; |
208 | 242 | ||
209 | for (i = 0; i < ARRAY_SIZE(freq); i++) | 243 | for (i = ARRAY_SIZE(freq)-1; i >= 0; --i) |
210 | if (gclk <= freq[i].freq) | 244 | if (gclk >= freq[i].freq) |
211 | return freq[i].divisor; | 245 | return freq[i].divisor; |
212 | 246 | ||
213 | return 0; | 247 | return 0; |
@@ -216,12 +250,14 @@ static int ds1wm_find_divisor(int gclk) | |||
216 | static void ds1wm_up(struct ds1wm_data *ds1wm_data) | 250 | static void ds1wm_up(struct ds1wm_data *ds1wm_data) |
217 | { | 251 | { |
218 | int divisor; | 252 | int divisor; |
219 | struct ds1wm_driver_data *plat = mfd_get_data(ds1wm_data->pdev); | 253 | struct ds1wm_driver_data *plat = ds1wm_data->pdev->dev.platform_data; |
220 | 254 | ||
221 | if (ds1wm_data->cell->enable) | 255 | if (ds1wm_data->cell->enable) |
222 | ds1wm_data->cell->enable(ds1wm_data->pdev); | 256 | ds1wm_data->cell->enable(ds1wm_data->pdev); |
223 | 257 | ||
224 | divisor = ds1wm_find_divisor(plat->clock_rate); | 258 | divisor = ds1wm_find_divisor(plat->clock_rate); |
259 | dev_dbg(&ds1wm_data->pdev->dev, | ||
260 | "found divisor 0x%x for clock %d\n", divisor, plat->clock_rate); | ||
225 | if (divisor == 0) { | 261 | if (divisor == 0) { |
226 | dev_err(&ds1wm_data->pdev->dev, | 262 | dev_err(&ds1wm_data->pdev->dev, |
227 | "no suitable divisor for %dHz clock\n", | 263 | "no suitable divisor for %dHz clock\n", |
@@ -242,7 +278,7 @@ static void ds1wm_down(struct ds1wm_data *ds1wm_data) | |||
242 | 278 | ||
243 | /* Disable interrupts. */ | 279 | /* Disable interrupts. */ |
244 | ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, | 280 | ds1wm_write_register(ds1wm_data, DS1WM_INT_EN, |
245 | ds1wm_data->active_high ? DS1WM_INTEN_IAS : 0); | 281 | ds1wm_data->int_en_reg_none); |
246 | 282 | ||
247 | if (ds1wm_data->cell->disable) | 283 | if (ds1wm_data->cell->disable) |
248 | ds1wm_data->cell->disable(ds1wm_data->pdev); | 284 | ds1wm_data->cell->disable(ds1wm_data->pdev); |
@@ -279,41 +315,121 @@ static void ds1wm_search(void *data, struct w1_master *master_dev, | |||
279 | { | 315 | { |
280 | struct ds1wm_data *ds1wm_data = data; | 316 | struct ds1wm_data *ds1wm_data = data; |
281 | int i; | 317 | int i; |
282 | unsigned long long rom_id; | 318 | int ms_discrep_bit = -1; |
283 | 319 | u64 r = 0; /* holds the progress of the search */ | |
284 | /* XXX We need to iterate for multiple devices per the DS1WM docs. | 320 | u64 r_prime, d; |
285 | * See http://www.maxim-ic.com/appnotes.cfm/appnote_number/120. */ | 321 | unsigned slaves_found = 0; |
286 | if (ds1wm_reset(ds1wm_data)) | 322 | unsigned int pass = 0; |
287 | return; | 323 | |
288 | 324 | dev_dbg(&ds1wm_data->pdev->dev, "search begin\n"); | |
289 | ds1wm_write(ds1wm_data, search_type); | 325 | while (true) { |
290 | ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_SRA); | 326 | ++pass; |
291 | 327 | if (pass > 100) { | |
292 | for (rom_id = 0, i = 0; i < 16; i++) { | 328 | dev_dbg(&ds1wm_data->pdev->dev, |
293 | 329 | "too many attempts (100), search aborted\n"); | |
294 | unsigned char resp, r, d; | 330 | return; |
295 | 331 | } | |
296 | resp = ds1wm_read(ds1wm_data, 0x00); | 332 | |
297 | 333 | if (ds1wm_reset(ds1wm_data)) { | |
298 | r = ((resp & 0x02) >> 1) | | 334 | dev_dbg(&ds1wm_data->pdev->dev, |
299 | ((resp & 0x08) >> 2) | | 335 | "pass: %d reset error (or no slaves)\n", pass); |
300 | ((resp & 0x20) >> 3) | | 336 | break; |
301 | ((resp & 0x80) >> 4); | 337 | } |
302 | 338 | ||
303 | d = ((resp & 0x01) >> 0) | | 339 | dev_dbg(&ds1wm_data->pdev->dev, |
304 | ((resp & 0x04) >> 1) | | 340 | "pass: %d r : %0#18llx writing SEARCH_ROM\n", pass, r); |
305 | ((resp & 0x10) >> 2) | | 341 | ds1wm_write(ds1wm_data, search_type); |
306 | ((resp & 0x40) >> 3); | 342 | dev_dbg(&ds1wm_data->pdev->dev, |
307 | 343 | "pass: %d entering ASM\n", pass); | |
308 | rom_id |= (unsigned long long) r << (i * 4); | 344 | ds1wm_write_register(ds1wm_data, DS1WM_CMD, DS1WM_CMD_SRA); |
309 | 345 | dev_dbg(&ds1wm_data->pdev->dev, | |
310 | } | 346 | "pass: %d begining nibble loop\n", pass); |
311 | dev_dbg(&ds1wm_data->pdev->dev, "found 0x%08llX\n", rom_id); | 347 | |
312 | 348 | r_prime = 0; | |
313 | ds1wm_write_register(ds1wm_data, DS1WM_CMD, ~DS1WM_CMD_SRA); | 349 | d = 0; |
314 | ds1wm_reset(ds1wm_data); | 350 | /* we work one nibble at a time */ |
315 | 351 | /* each nibble is interleaved to form a byte */ | |
316 | slave_found(master_dev, rom_id); | 352 | for (i = 0; i < 16; i++) { |
353 | |||
354 | unsigned char resp, _r, _r_prime, _d; | ||
355 | |||
356 | _r = (r >> (4*i)) & 0xf; | ||
357 | _r = ((_r & 0x1) << 1) | | ||
358 | ((_r & 0x2) << 2) | | ||
359 | ((_r & 0x4) << 3) | | ||
360 | ((_r & 0x8) << 4); | ||
361 | |||
362 | /* writes _r, then reads back: */ | ||
363 | resp = ds1wm_read(ds1wm_data, _r); | ||
364 | |||
365 | if (ds1wm_data->read_error) { | ||
366 | dev_err(&ds1wm_data->pdev->dev, | ||
367 | "pass: %d nibble: %d read error\n", pass, i); | ||
368 | break; | ||
369 | } | ||
370 | |||
371 | _r_prime = ((resp & 0x02) >> 1) | | ||
372 | ((resp & 0x08) >> 2) | | ||
373 | ((resp & 0x20) >> 3) | | ||
374 | ((resp & 0x80) >> 4); | ||
375 | |||
376 | _d = ((resp & 0x01) >> 0) | | ||
377 | ((resp & 0x04) >> 1) | | ||
378 | ((resp & 0x10) >> 2) | | ||
379 | ((resp & 0x40) >> 3); | ||
380 | |||
381 | r_prime |= (unsigned long long) _r_prime << (i * 4); | ||
382 | d |= (unsigned long long) _d << (i * 4); | ||
383 | |||
384 | } | ||
385 | if (ds1wm_data->read_error) { | ||
386 | dev_err(&ds1wm_data->pdev->dev, | ||
387 | "pass: %d read error, retrying\n", pass); | ||
388 | break; | ||
389 | } | ||
390 | dev_dbg(&ds1wm_data->pdev->dev, | ||
391 | "pass: %d r\': %0#18llx d:%0#18llx\n", | ||
392 | pass, r_prime, d); | ||
393 | dev_dbg(&ds1wm_data->pdev->dev, | ||
394 | "pass: %d nibble loop complete, exiting ASM\n", pass); | ||
395 | ds1wm_write_register(ds1wm_data, DS1WM_CMD, ~DS1WM_CMD_SRA); | ||
396 | dev_dbg(&ds1wm_data->pdev->dev, | ||
397 | "pass: %d resetting bus\n", pass); | ||
398 | ds1wm_reset(ds1wm_data); | ||
399 | if ((r_prime & ((u64)1 << 63)) && (d & ((u64)1 << 63))) { | ||
400 | dev_err(&ds1wm_data->pdev->dev, | ||
401 | "pass: %d bus error, retrying\n", pass); | ||
402 | continue; /* start over */ | ||
403 | } | ||
404 | |||
405 | |||
406 | dev_dbg(&ds1wm_data->pdev->dev, | ||
407 | "pass: %d found %0#18llx\n", pass, r_prime); | ||
408 | slave_found(master_dev, r_prime); | ||
409 | ++slaves_found; | ||
410 | dev_dbg(&ds1wm_data->pdev->dev, | ||
411 | "pass: %d complete, preparing next pass\n", pass); | ||
412 | |||
413 | /* any discrepency found which we already choose the | ||
414 | '1' branch is now is now irrelevant we reveal the | ||
415 | next branch with this: */ | ||
416 | d &= ~r; | ||
417 | /* find last bit set, i.e. the most signif. bit set */ | ||
418 | ms_discrep_bit = fls64(d) - 1; | ||
419 | dev_dbg(&ds1wm_data->pdev->dev, | ||
420 | "pass: %d new d:%0#18llx MS discrep bit:%d\n", | ||
421 | pass, d, ms_discrep_bit); | ||
422 | |||
423 | /* prev_ms_discrep_bit = ms_discrep_bit; | ||
424 | prepare for next ROM search: */ | ||
425 | if (ms_discrep_bit == -1) | ||
426 | break; | ||
427 | |||
428 | r = (r & ~(~0ull << (ms_discrep_bit))) | 1 << ms_discrep_bit; | ||
429 | } /* end while true */ | ||
430 | dev_dbg(&ds1wm_data->pdev->dev, | ||
431 | "pass: %d total: %d search done ms d bit pos: %d\n", pass, | ||
432 | slaves_found, ms_discrep_bit); | ||
317 | } | 433 | } |
318 | 434 | ||
319 | /* --------------------------------------------------------------------- */ | 435 | /* --------------------------------------------------------------------- */ |
@@ -351,13 +467,21 @@ static int ds1wm_probe(struct platform_device *pdev) | |||
351 | ret = -ENOMEM; | 467 | ret = -ENOMEM; |
352 | goto err0; | 468 | goto err0; |
353 | } | 469 | } |
354 | plat = mfd_get_data(pdev); | ||
355 | 470 | ||
356 | /* calculate bus shift from mem resource */ | 471 | /* calculate bus shift from mem resource */ |
357 | ds1wm_data->bus_shift = resource_size(res) >> 3; | 472 | ds1wm_data->bus_shift = resource_size(res) >> 3; |
358 | 473 | ||
359 | ds1wm_data->pdev = pdev; | 474 | ds1wm_data->pdev = pdev; |
360 | ds1wm_data->cell = mfd_get_cell(pdev); | 475 | ds1wm_data->cell = mfd_get_cell(pdev); |
476 | if (!ds1wm_data->cell) { | ||
477 | ret = -ENODEV; | ||
478 | goto err1; | ||
479 | } | ||
480 | plat = pdev->dev.platform_data; | ||
481 | if (!plat) { | ||
482 | ret = -ENODEV; | ||
483 | goto err1; | ||
484 | } | ||
361 | 485 | ||
362 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | 486 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); |
363 | if (!res) { | 487 | if (!res) { |
@@ -365,15 +489,15 @@ static int ds1wm_probe(struct platform_device *pdev) | |||
365 | goto err1; | 489 | goto err1; |
366 | } | 490 | } |
367 | ds1wm_data->irq = res->start; | 491 | ds1wm_data->irq = res->start; |
368 | ds1wm_data->active_high = plat->active_high; | 492 | ds1wm_data->int_en_reg_none = (plat->active_high ? DS1WM_INTEN_IAS : 0); |
369 | 493 | ||
370 | if (res->flags & IORESOURCE_IRQ_HIGHEDGE) | 494 | if (res->flags & IORESOURCE_IRQ_HIGHEDGE) |
371 | irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); | 495 | irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_RISING); |
372 | if (res->flags & IORESOURCE_IRQ_LOWEDGE) | 496 | if (res->flags & IORESOURCE_IRQ_LOWEDGE) |
373 | irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); | 497 | irq_set_irq_type(ds1wm_data->irq, IRQ_TYPE_EDGE_FALLING); |
374 | 498 | ||
375 | ret = request_irq(ds1wm_data->irq, ds1wm_isr, IRQF_DISABLED, | 499 | ret = request_irq(ds1wm_data->irq, ds1wm_isr, |
376 | "ds1wm", ds1wm_data); | 500 | IRQF_DISABLED | IRQF_SHARED, "ds1wm", ds1wm_data); |
377 | if (ret) | 501 | if (ret) |
378 | goto err1; | 502 | goto err1; |
379 | 503 | ||
@@ -460,5 +584,6 @@ module_exit(ds1wm_exit); | |||
460 | 584 | ||
461 | MODULE_LICENSE("GPL"); | 585 | MODULE_LICENSE("GPL"); |
462 | MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>, " | 586 | MODULE_AUTHOR("Szabolcs Gyurko <szabolcs.gyurko@tlt.hu>, " |
463 | "Matt Reimer <mreimer@vpop.net>"); | 587 | "Matt Reimer <mreimer@vpop.net>," |
588 | "Jean-Francois Dagenais <dagenaisj@sonatest.com>"); | ||
464 | MODULE_DESCRIPTION("DS1WM w1 busmaster driver"); | 589 | MODULE_DESCRIPTION("DS1WM w1 busmaster driver"); |
diff --git a/drivers/w1/slaves/Kconfig b/drivers/w1/slaves/Kconfig index f0c909625bd1..d0cb01b42012 100644 --- a/drivers/w1/slaves/Kconfig +++ b/drivers/w1/slaves/Kconfig | |||
@@ -16,6 +16,13 @@ config W1_SLAVE_SMEM | |||
16 | Say Y here if you want to connect 1-wire | 16 | Say Y here if you want to connect 1-wire |
17 | simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire. | 17 | simple 64bit memory rom(ds2401/ds2411/ds1990*) to your wire. |
18 | 18 | ||
19 | config W1_SLAVE_DS2408 | ||
20 | tristate "8-Channel Addressable Switch (IO Expander) 0x29 family support (DS2408)" | ||
21 | help | ||
22 | Say Y here if you want to use a 1-wire | ||
23 | |||
24 | DS2408 8-Channel Addressable Switch device support | ||
25 | |||
19 | config W1_SLAVE_DS2423 | 26 | config W1_SLAVE_DS2423 |
20 | tristate "Counter 1-wire device (DS2423)" | 27 | tristate "Counter 1-wire device (DS2423)" |
21 | select CRC16 | 28 | select CRC16 |
@@ -61,6 +68,19 @@ config W1_SLAVE_DS2760 | |||
61 | 68 | ||
62 | If you are unsure, say N. | 69 | If you are unsure, say N. |
63 | 70 | ||
71 | config W1_SLAVE_DS2780 | ||
72 | tristate "Dallas 2780 battery monitor chip" | ||
73 | depends on W1 | ||
74 | help | ||
75 | If you enable this you will have the DS2780 battery monitor | ||
76 | chip support. | ||
77 | |||
78 | The battery monitor chip is used in many batteries/devices | ||
79 | as the one who is responsible for charging/discharging/monitoring | ||
80 | Li+ batteries. | ||
81 | |||
82 | If you are unsure, say N. | ||
83 | |||
64 | config W1_SLAVE_BQ27000 | 84 | config W1_SLAVE_BQ27000 |
65 | tristate "BQ27000 slave support" | 85 | tristate "BQ27000 slave support" |
66 | depends on W1 | 86 | depends on W1 |
diff --git a/drivers/w1/slaves/Makefile b/drivers/w1/slaves/Makefile index 3c76350a24f7..1f31e9fb0b25 100644 --- a/drivers/w1/slaves/Makefile +++ b/drivers/w1/slaves/Makefile | |||
@@ -4,8 +4,10 @@ | |||
4 | 4 | ||
5 | obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o | 5 | obj-$(CONFIG_W1_SLAVE_THERM) += w1_therm.o |
6 | obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o | 6 | obj-$(CONFIG_W1_SLAVE_SMEM) += w1_smem.o |
7 | obj-$(CONFIG_W1_SLAVE_DS2408) += w1_ds2408.o | ||
7 | obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o | 8 | obj-$(CONFIG_W1_SLAVE_DS2423) += w1_ds2423.o |
8 | obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o | 9 | obj-$(CONFIG_W1_SLAVE_DS2431) += w1_ds2431.o |
9 | obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o | 10 | obj-$(CONFIG_W1_SLAVE_DS2433) += w1_ds2433.o |
10 | obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o | 11 | obj-$(CONFIG_W1_SLAVE_DS2760) += w1_ds2760.o |
12 | obj-$(CONFIG_W1_SLAVE_DS2780) += w1_ds2780.o | ||
11 | obj-$(CONFIG_W1_SLAVE_BQ27000) += w1_bq27000.o | 13 | obj-$(CONFIG_W1_SLAVE_BQ27000) += w1_bq27000.o |
diff --git a/drivers/w1/slaves/w1_ds2408.c b/drivers/w1/slaves/w1_ds2408.c new file mode 100644 index 000000000000..c37781899d90 --- /dev/null +++ b/drivers/w1/slaves/w1_ds2408.c | |||
@@ -0,0 +1,402 @@ | |||
1 | /* | ||
2 | * w1_ds2408.c - w1 family 29 (DS2408) driver | ||
3 | * | ||
4 | * Copyright (c) 2010 Jean-Francois Dagenais <dagenaisj@sonatest.com> | ||
5 | * | ||
6 | * This source code is licensed under the GNU General Public License, | ||
7 | * Version 2. See the file COPYING for more details. | ||
8 | */ | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/module.h> | ||
12 | #include <linux/moduleparam.h> | ||
13 | #include <linux/device.h> | ||
14 | #include <linux/types.h> | ||
15 | #include <linux/delay.h> | ||
16 | #include <linux/slab.h> | ||
17 | |||
18 | #include "../w1.h" | ||
19 | #include "../w1_int.h" | ||
20 | #include "../w1_family.h" | ||
21 | |||
22 | MODULE_LICENSE("GPL"); | ||
23 | MODULE_AUTHOR("Jean-Francois Dagenais <dagenaisj@sonatest.com>"); | ||
24 | MODULE_DESCRIPTION("w1 family 29 driver for DS2408 8 Pin IO"); | ||
25 | |||
26 | |||
27 | #define W1_F29_RETRIES 3 | ||
28 | |||
29 | #define W1_F29_REG_LOGIG_STATE 0x88 /* R */ | ||
30 | #define W1_F29_REG_OUTPUT_LATCH_STATE 0x89 /* R */ | ||
31 | #define W1_F29_REG_ACTIVITY_LATCH_STATE 0x8A /* R */ | ||
32 | #define W1_F29_REG_COND_SEARCH_SELECT_MASK 0x8B /* RW */ | ||
33 | #define W1_F29_REG_COND_SEARCH_POL_SELECT 0x8C /* RW */ | ||
34 | #define W1_F29_REG_CONTROL_AND_STATUS 0x8D /* RW */ | ||
35 | |||
36 | #define W1_F29_FUNC_READ_PIO_REGS 0xF0 | ||
37 | #define W1_F29_FUNC_CHANN_ACCESS_READ 0xF5 | ||
38 | #define W1_F29_FUNC_CHANN_ACCESS_WRITE 0x5A | ||
39 | /* also used to write the control/status reg (0x8D): */ | ||
40 | #define W1_F29_FUNC_WRITE_COND_SEARCH_REG 0xCC | ||
41 | #define W1_F29_FUNC_RESET_ACTIVITY_LATCHES 0xC3 | ||
42 | |||
43 | #define W1_F29_SUCCESS_CONFIRM_BYTE 0xAA | ||
44 | |||
45 | static int _read_reg(struct w1_slave *sl, u8 address, unsigned char* buf) | ||
46 | { | ||
47 | u8 wrbuf[3]; | ||
48 | dev_dbg(&sl->dev, | ||
49 | "Reading with slave: %p, reg addr: %0#4x, buff addr: %p", | ||
50 | sl, (unsigned int)address, buf); | ||
51 | |||
52 | if (!buf) | ||
53 | return -EINVAL; | ||
54 | |||
55 | mutex_lock(&sl->master->mutex); | ||
56 | dev_dbg(&sl->dev, "mutex locked"); | ||
57 | |||
58 | if (w1_reset_select_slave(sl)) { | ||
59 | mutex_unlock(&sl->master->mutex); | ||
60 | return -EIO; | ||
61 | } | ||
62 | |||
63 | wrbuf[0] = W1_F29_FUNC_READ_PIO_REGS; | ||
64 | wrbuf[1] = address; | ||
65 | wrbuf[2] = 0; | ||
66 | w1_write_block(sl->master, wrbuf, 3); | ||
67 | *buf = w1_read_8(sl->master); | ||
68 | |||
69 | mutex_unlock(&sl->master->mutex); | ||
70 | dev_dbg(&sl->dev, "mutex unlocked"); | ||
71 | return 1; | ||
72 | } | ||
73 | |||
74 | static ssize_t w1_f29_read_state( | ||
75 | struct file *filp, struct kobject *kobj, | ||
76 | struct bin_attribute *bin_attr, | ||
77 | char *buf, loff_t off, size_t count) | ||
78 | { | ||
79 | dev_dbg(&kobj_to_w1_slave(kobj)->dev, | ||
80 | "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p", | ||
81 | bin_attr->attr.name, kobj, (unsigned int)off, count, buf); | ||
82 | if (count != 1 || off != 0) | ||
83 | return -EFAULT; | ||
84 | return _read_reg(kobj_to_w1_slave(kobj), W1_F29_REG_LOGIG_STATE, buf); | ||
85 | } | ||
86 | |||
87 | static ssize_t w1_f29_read_output( | ||
88 | struct file *filp, struct kobject *kobj, | ||
89 | struct bin_attribute *bin_attr, | ||
90 | char *buf, loff_t off, size_t count) | ||
91 | { | ||
92 | dev_dbg(&kobj_to_w1_slave(kobj)->dev, | ||
93 | "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p", | ||
94 | bin_attr->attr.name, kobj, (unsigned int)off, count, buf); | ||
95 | if (count != 1 || off != 0) | ||
96 | return -EFAULT; | ||
97 | return _read_reg(kobj_to_w1_slave(kobj), | ||
98 | W1_F29_REG_OUTPUT_LATCH_STATE, buf); | ||
99 | } | ||
100 | |||
101 | static ssize_t w1_f29_read_activity( | ||
102 | struct file *filp, struct kobject *kobj, | ||
103 | struct bin_attribute *bin_attr, | ||
104 | char *buf, loff_t off, size_t count) | ||
105 | { | ||
106 | dev_dbg(&kobj_to_w1_slave(kobj)->dev, | ||
107 | "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p", | ||
108 | bin_attr->attr.name, kobj, (unsigned int)off, count, buf); | ||
109 | if (count != 1 || off != 0) | ||
110 | return -EFAULT; | ||
111 | return _read_reg(kobj_to_w1_slave(kobj), | ||
112 | W1_F29_REG_ACTIVITY_LATCH_STATE, buf); | ||
113 | } | ||
114 | |||
115 | static ssize_t w1_f29_read_cond_search_mask( | ||
116 | struct file *filp, struct kobject *kobj, | ||
117 | struct bin_attribute *bin_attr, | ||
118 | char *buf, loff_t off, size_t count) | ||
119 | { | ||
120 | dev_dbg(&kobj_to_w1_slave(kobj)->dev, | ||
121 | "Reading %s kobj: %p, off: %0#10x, count: %zu, buff addr: %p", | ||
122 | bin_attr->attr.name, kobj, (unsigned int)off, count, buf); | ||
123 | if (count != 1 || off != 0) | ||
124 | return -EFAULT; | ||
125 | return _read_reg(kobj_to_w1_slave(kobj), | ||
126 | W1_F29_REG_COND_SEARCH_SELECT_MASK, buf); | ||
127 | } | ||
128 | |||
129 | static ssize_t w1_f29_read_cond_search_polarity( | ||
130 | struct file *filp, struct kobject *kobj, | ||
131 | struct bin_attribute *bin_attr, | ||
132 | char *buf, loff_t off, size_t count) | ||
133 | { | ||
134 | if (count != 1 || off != 0) | ||
135 | return -EFAULT; | ||
136 | return _read_reg(kobj_to_w1_slave(kobj), | ||
137 | W1_F29_REG_COND_SEARCH_POL_SELECT, buf); | ||
138 | } | ||
139 | |||
140 | static ssize_t w1_f29_read_status_control( | ||
141 | struct file *filp, struct kobject *kobj, | ||
142 | struct bin_attribute *bin_attr, | ||
143 | char *buf, loff_t off, size_t count) | ||
144 | { | ||
145 | if (count != 1 || off != 0) | ||
146 | return -EFAULT; | ||
147 | return _read_reg(kobj_to_w1_slave(kobj), | ||
148 | W1_F29_REG_CONTROL_AND_STATUS, buf); | ||
149 | } | ||
150 | |||
151 | |||
152 | |||
153 | |||
154 | static ssize_t w1_f29_write_output( | ||
155 | struct file *filp, struct kobject *kobj, | ||
156 | struct bin_attribute *bin_attr, | ||
157 | char *buf, loff_t off, size_t count) | ||
158 | { | ||
159 | struct w1_slave *sl = kobj_to_w1_slave(kobj); | ||
160 | u8 w1_buf[3]; | ||
161 | u8 readBack; | ||
162 | unsigned int retries = W1_F29_RETRIES; | ||
163 | |||
164 | if (count != 1 || off != 0) | ||
165 | return -EFAULT; | ||
166 | |||
167 | dev_dbg(&sl->dev, "locking mutex for write_output"); | ||
168 | mutex_lock(&sl->master->mutex); | ||
169 | dev_dbg(&sl->dev, "mutex locked"); | ||
170 | |||
171 | if (w1_reset_select_slave(sl)) | ||
172 | goto error; | ||
173 | |||
174 | while (retries--) { | ||
175 | w1_buf[0] = W1_F29_FUNC_CHANN_ACCESS_WRITE; | ||
176 | w1_buf[1] = *buf; | ||
177 | w1_buf[2] = ~(*buf); | ||
178 | w1_write_block(sl->master, w1_buf, 3); | ||
179 | |||
180 | readBack = w1_read_8(sl->master); | ||
181 | /* here the master could read another byte which | ||
182 | would be the PIO reg (the actual pin logic state) | ||
183 | since in this driver we don't know which pins are | ||
184 | in and outs, there's no value to read the state and | ||
185 | compare. with (*buf) so end this command abruptly: */ | ||
186 | if (w1_reset_resume_command(sl->master)) | ||
187 | goto error; | ||
188 | |||
189 | if (readBack != 0xAA) { | ||
190 | /* try again, the slave is ready for a command */ | ||
191 | continue; | ||
192 | } | ||
193 | |||
194 | /* go read back the output latches */ | ||
195 | /* (the direct effect of the write above) */ | ||
196 | w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS; | ||
197 | w1_buf[1] = W1_F29_REG_OUTPUT_LATCH_STATE; | ||
198 | w1_buf[2] = 0; | ||
199 | w1_write_block(sl->master, w1_buf, 3); | ||
200 | /* read the result of the READ_PIO_REGS command */ | ||
201 | if (w1_read_8(sl->master) == *buf) { | ||
202 | /* success! */ | ||
203 | mutex_unlock(&sl->master->mutex); | ||
204 | dev_dbg(&sl->dev, | ||
205 | "mutex unlocked, retries:%d", retries); | ||
206 | return 1; | ||
207 | } | ||
208 | } | ||
209 | error: | ||
210 | mutex_unlock(&sl->master->mutex); | ||
211 | dev_dbg(&sl->dev, "mutex unlocked in error, retries:%d", retries); | ||
212 | |||
213 | return -EIO; | ||
214 | } | ||
215 | |||
216 | |||
217 | /** | ||
218 | * Writing to the activity file resets the activity latches. | ||
219 | */ | ||
220 | static ssize_t w1_f29_write_activity( | ||
221 | struct file *filp, struct kobject *kobj, | ||
222 | struct bin_attribute *bin_attr, | ||
223 | char *buf, loff_t off, size_t count) | ||
224 | { | ||
225 | struct w1_slave *sl = kobj_to_w1_slave(kobj); | ||
226 | unsigned int retries = W1_F29_RETRIES; | ||
227 | |||
228 | if (count != 1 || off != 0) | ||
229 | return -EFAULT; | ||
230 | |||
231 | mutex_lock(&sl->master->mutex); | ||
232 | |||
233 | if (w1_reset_select_slave(sl)) | ||
234 | goto error; | ||
235 | |||
236 | while (retries--) { | ||
237 | w1_write_8(sl->master, W1_F29_FUNC_RESET_ACTIVITY_LATCHES); | ||
238 | if (w1_read_8(sl->master) == W1_F29_SUCCESS_CONFIRM_BYTE) { | ||
239 | mutex_unlock(&sl->master->mutex); | ||
240 | return 1; | ||
241 | } | ||
242 | if (w1_reset_resume_command(sl->master)) | ||
243 | goto error; | ||
244 | } | ||
245 | |||
246 | error: | ||
247 | mutex_unlock(&sl->master->mutex); | ||
248 | return -EIO; | ||
249 | } | ||
250 | |||
251 | static ssize_t w1_f29_write_status_control( | ||
252 | struct file *filp, | ||
253 | struct kobject *kobj, | ||
254 | struct bin_attribute *bin_attr, | ||
255 | char *buf, | ||
256 | loff_t off, | ||
257 | size_t count) | ||
258 | { | ||
259 | struct w1_slave *sl = kobj_to_w1_slave(kobj); | ||
260 | u8 w1_buf[4]; | ||
261 | unsigned int retries = W1_F29_RETRIES; | ||
262 | |||
263 | if (count != 1 || off != 0) | ||
264 | return -EFAULT; | ||
265 | |||
266 | mutex_lock(&sl->master->mutex); | ||
267 | |||
268 | if (w1_reset_select_slave(sl)) | ||
269 | goto error; | ||
270 | |||
271 | while (retries--) { | ||
272 | w1_buf[0] = W1_F29_FUNC_WRITE_COND_SEARCH_REG; | ||
273 | w1_buf[1] = W1_F29_REG_CONTROL_AND_STATUS; | ||
274 | w1_buf[2] = 0; | ||
275 | w1_buf[3] = *buf; | ||
276 | |||
277 | w1_write_block(sl->master, w1_buf, 4); | ||
278 | if (w1_reset_resume_command(sl->master)) | ||
279 | goto error; | ||
280 | |||
281 | w1_buf[0] = W1_F29_FUNC_READ_PIO_REGS; | ||
282 | w1_buf[1] = W1_F29_REG_CONTROL_AND_STATUS; | ||
283 | w1_buf[2] = 0; | ||
284 | |||
285 | w1_write_block(sl->master, w1_buf, 3); | ||
286 | if (w1_read_8(sl->master) == *buf) { | ||
287 | /* success! */ | ||
288 | mutex_unlock(&sl->master->mutex); | ||
289 | return 1; | ||
290 | } | ||
291 | } | ||
292 | error: | ||
293 | mutex_unlock(&sl->master->mutex); | ||
294 | |||
295 | return -EIO; | ||
296 | } | ||
297 | |||
298 | |||
299 | |||
300 | #define NB_SYSFS_BIN_FILES 6 | ||
301 | static struct bin_attribute w1_f29_sysfs_bin_files[NB_SYSFS_BIN_FILES] = { | ||
302 | { | ||
303 | .attr = { | ||
304 | .name = "state", | ||
305 | .mode = S_IRUGO, | ||
306 | }, | ||
307 | .size = 1, | ||
308 | .read = w1_f29_read_state, | ||
309 | }, | ||
310 | { | ||
311 | .attr = { | ||
312 | .name = "output", | ||
313 | .mode = S_IRUGO | S_IWUSR | S_IWGRP, | ||
314 | }, | ||
315 | .size = 1, | ||
316 | .read = w1_f29_read_output, | ||
317 | .write = w1_f29_write_output, | ||
318 | }, | ||
319 | { | ||
320 | .attr = { | ||
321 | .name = "activity", | ||
322 | .mode = S_IRUGO, | ||
323 | }, | ||
324 | .size = 1, | ||
325 | .read = w1_f29_read_activity, | ||
326 | .write = w1_f29_write_activity, | ||
327 | }, | ||
328 | { | ||
329 | .attr = { | ||
330 | .name = "cond_search_mask", | ||
331 | .mode = S_IRUGO, | ||
332 | }, | ||
333 | .size = 1, | ||
334 | .read = w1_f29_read_cond_search_mask, | ||
335 | .write = 0, | ||
336 | }, | ||
337 | { | ||
338 | .attr = { | ||
339 | .name = "cond_search_polarity", | ||
340 | .mode = S_IRUGO, | ||
341 | }, | ||
342 | .size = 1, | ||
343 | .read = w1_f29_read_cond_search_polarity, | ||
344 | .write = 0, | ||
345 | }, | ||
346 | { | ||
347 | .attr = { | ||
348 | .name = "status_control", | ||
349 | .mode = S_IRUGO | S_IWUSR | S_IWGRP, | ||
350 | }, | ||
351 | .size = 1, | ||
352 | .read = w1_f29_read_status_control, | ||
353 | .write = w1_f29_write_status_control, | ||
354 | } | ||
355 | }; | ||
356 | |||
357 | static int w1_f29_add_slave(struct w1_slave *sl) | ||
358 | { | ||
359 | int err = 0; | ||
360 | int i; | ||
361 | |||
362 | for (i = 0; i < NB_SYSFS_BIN_FILES && !err; ++i) | ||
363 | err = sysfs_create_bin_file( | ||
364 | &sl->dev.kobj, | ||
365 | &(w1_f29_sysfs_bin_files[i])); | ||
366 | if (err) | ||
367 | while (--i >= 0) | ||
368 | sysfs_remove_bin_file(&sl->dev.kobj, | ||
369 | &(w1_f29_sysfs_bin_files[i])); | ||
370 | return err; | ||
371 | } | ||
372 | |||
373 | static void w1_f29_remove_slave(struct w1_slave *sl) | ||
374 | { | ||
375 | int i; | ||
376 | for (i = NB_SYSFS_BIN_FILES; i <= 0; --i) | ||
377 | sysfs_remove_bin_file(&sl->dev.kobj, | ||
378 | &(w1_f29_sysfs_bin_files[i])); | ||
379 | } | ||
380 | |||
381 | static struct w1_family_ops w1_f29_fops = { | ||
382 | .add_slave = w1_f29_add_slave, | ||
383 | .remove_slave = w1_f29_remove_slave, | ||
384 | }; | ||
385 | |||
386 | static struct w1_family w1_family_29 = { | ||
387 | .fid = W1_FAMILY_DS2408, | ||
388 | .fops = &w1_f29_fops, | ||
389 | }; | ||
390 | |||
391 | static int __init w1_f29_init(void) | ||
392 | { | ||
393 | return w1_register_family(&w1_family_29); | ||
394 | } | ||
395 | |||
396 | static void __exit w1_f29_exit(void) | ||
397 | { | ||
398 | w1_unregister_family(&w1_family_29); | ||
399 | } | ||
400 | |||
401 | module_init(w1_f29_init); | ||
402 | module_exit(w1_f29_exit); | ||
diff --git a/drivers/w1/slaves/w1_ds2780.c b/drivers/w1/slaves/w1_ds2780.c new file mode 100644 index 000000000000..274c8f38303f --- /dev/null +++ b/drivers/w1/slaves/w1_ds2780.c | |||
@@ -0,0 +1,217 @@ | |||
1 | /* | ||
2 | * 1-Wire implementation for the ds2780 chip | ||
3 | * | ||
4 | * Copyright (C) 2010 Indesign, LLC | ||
5 | * | ||
6 | * Author: Clifton Barnes <cabarnes@indesign-llc.com> | ||
7 | * | ||
8 | * Based on w1-ds2760 driver | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/module.h> | ||
18 | #include <linux/device.h> | ||
19 | #include <linux/types.h> | ||
20 | #include <linux/platform_device.h> | ||
21 | #include <linux/mutex.h> | ||
22 | #include <linux/idr.h> | ||
23 | |||
24 | #include "../w1.h" | ||
25 | #include "../w1_int.h" | ||
26 | #include "../w1_family.h" | ||
27 | #include "w1_ds2780.h" | ||
28 | |||
29 | int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count, | ||
30 | int io) | ||
31 | { | ||
32 | struct w1_slave *sl = container_of(dev, struct w1_slave, dev); | ||
33 | |||
34 | if (!dev) | ||
35 | return -ENODEV; | ||
36 | |||
37 | mutex_lock(&sl->master->mutex); | ||
38 | |||
39 | if (addr > DS2780_DATA_SIZE || addr < 0) { | ||
40 | count = 0; | ||
41 | goto out; | ||
42 | } | ||
43 | count = min_t(int, count, DS2780_DATA_SIZE - addr); | ||
44 | |||
45 | if (w1_reset_select_slave(sl) == 0) { | ||
46 | if (io) { | ||
47 | w1_write_8(sl->master, W1_DS2780_WRITE_DATA); | ||
48 | w1_write_8(sl->master, addr); | ||
49 | w1_write_block(sl->master, buf, count); | ||
50 | /* XXX w1_write_block returns void, not n_written */ | ||
51 | } else { | ||
52 | w1_write_8(sl->master, W1_DS2780_READ_DATA); | ||
53 | w1_write_8(sl->master, addr); | ||
54 | count = w1_read_block(sl->master, buf, count); | ||
55 | } | ||
56 | } | ||
57 | |||
58 | out: | ||
59 | mutex_unlock(&sl->master->mutex); | ||
60 | |||
61 | return count; | ||
62 | } | ||
63 | EXPORT_SYMBOL(w1_ds2780_io); | ||
64 | |||
65 | int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd) | ||
66 | { | ||
67 | struct w1_slave *sl = container_of(dev, struct w1_slave, dev); | ||
68 | |||
69 | if (!dev) | ||
70 | return -EINVAL; | ||
71 | |||
72 | mutex_lock(&sl->master->mutex); | ||
73 | |||
74 | if (w1_reset_select_slave(sl) == 0) { | ||
75 | w1_write_8(sl->master, cmd); | ||
76 | w1_write_8(sl->master, addr); | ||
77 | } | ||
78 | |||
79 | mutex_unlock(&sl->master->mutex); | ||
80 | return 0; | ||
81 | } | ||
82 | EXPORT_SYMBOL(w1_ds2780_eeprom_cmd); | ||
83 | |||
84 | static ssize_t w1_ds2780_read_bin(struct file *filp, | ||
85 | struct kobject *kobj, | ||
86 | struct bin_attribute *bin_attr, | ||
87 | char *buf, loff_t off, size_t count) | ||
88 | { | ||
89 | struct device *dev = container_of(kobj, struct device, kobj); | ||
90 | return w1_ds2780_io(dev, buf, off, count, 0); | ||
91 | } | ||
92 | |||
93 | static struct bin_attribute w1_ds2780_bin_attr = { | ||
94 | .attr = { | ||
95 | .name = "w1_slave", | ||
96 | .mode = S_IRUGO, | ||
97 | }, | ||
98 | .size = DS2780_DATA_SIZE, | ||
99 | .read = w1_ds2780_read_bin, | ||
100 | }; | ||
101 | |||
102 | static DEFINE_IDR(bat_idr); | ||
103 | static DEFINE_MUTEX(bat_idr_lock); | ||
104 | |||
105 | static int new_bat_id(void) | ||
106 | { | ||
107 | int ret; | ||
108 | |||
109 | while (1) { | ||
110 | int id; | ||
111 | |||
112 | ret = idr_pre_get(&bat_idr, GFP_KERNEL); | ||
113 | if (ret == 0) | ||
114 | return -ENOMEM; | ||
115 | |||
116 | mutex_lock(&bat_idr_lock); | ||
117 | ret = idr_get_new(&bat_idr, NULL, &id); | ||
118 | mutex_unlock(&bat_idr_lock); | ||
119 | |||
120 | if (ret == 0) { | ||
121 | ret = id & MAX_ID_MASK; | ||
122 | break; | ||
123 | } else if (ret == -EAGAIN) { | ||
124 | continue; | ||
125 | } else { | ||
126 | break; | ||
127 | } | ||
128 | } | ||
129 | |||
130 | return ret; | ||
131 | } | ||
132 | |||
133 | static void release_bat_id(int id) | ||
134 | { | ||
135 | mutex_lock(&bat_idr_lock); | ||
136 | idr_remove(&bat_idr, id); | ||
137 | mutex_unlock(&bat_idr_lock); | ||
138 | } | ||
139 | |||
140 | static int w1_ds2780_add_slave(struct w1_slave *sl) | ||
141 | { | ||
142 | int ret; | ||
143 | int id; | ||
144 | struct platform_device *pdev; | ||
145 | |||
146 | id = new_bat_id(); | ||
147 | if (id < 0) { | ||
148 | ret = id; | ||
149 | goto noid; | ||
150 | } | ||
151 | |||
152 | pdev = platform_device_alloc("ds2780-battery", id); | ||
153 | if (!pdev) { | ||
154 | ret = -ENOMEM; | ||
155 | goto pdev_alloc_failed; | ||
156 | } | ||
157 | pdev->dev.parent = &sl->dev; | ||
158 | |||
159 | ret = platform_device_add(pdev); | ||
160 | if (ret) | ||
161 | goto pdev_add_failed; | ||
162 | |||
163 | ret = sysfs_create_bin_file(&sl->dev.kobj, &w1_ds2780_bin_attr); | ||
164 | if (ret) | ||
165 | goto bin_attr_failed; | ||
166 | |||
167 | dev_set_drvdata(&sl->dev, pdev); | ||
168 | |||
169 | return 0; | ||
170 | |||
171 | bin_attr_failed: | ||
172 | pdev_add_failed: | ||
173 | platform_device_unregister(pdev); | ||
174 | pdev_alloc_failed: | ||
175 | release_bat_id(id); | ||
176 | noid: | ||
177 | return ret; | ||
178 | } | ||
179 | |||
180 | static void w1_ds2780_remove_slave(struct w1_slave *sl) | ||
181 | { | ||
182 | struct platform_device *pdev = dev_get_drvdata(&sl->dev); | ||
183 | int id = pdev->id; | ||
184 | |||
185 | platform_device_unregister(pdev); | ||
186 | release_bat_id(id); | ||
187 | sysfs_remove_bin_file(&sl->dev.kobj, &w1_ds2780_bin_attr); | ||
188 | } | ||
189 | |||
190 | static struct w1_family_ops w1_ds2780_fops = { | ||
191 | .add_slave = w1_ds2780_add_slave, | ||
192 | .remove_slave = w1_ds2780_remove_slave, | ||
193 | }; | ||
194 | |||
195 | static struct w1_family w1_ds2780_family = { | ||
196 | .fid = W1_FAMILY_DS2780, | ||
197 | .fops = &w1_ds2780_fops, | ||
198 | }; | ||
199 | |||
200 | static int __init w1_ds2780_init(void) | ||
201 | { | ||
202 | idr_init(&bat_idr); | ||
203 | return w1_register_family(&w1_ds2780_family); | ||
204 | } | ||
205 | |||
206 | static void __exit w1_ds2780_exit(void) | ||
207 | { | ||
208 | w1_unregister_family(&w1_ds2780_family); | ||
209 | idr_destroy(&bat_idr); | ||
210 | } | ||
211 | |||
212 | module_init(w1_ds2780_init); | ||
213 | module_exit(w1_ds2780_exit); | ||
214 | |||
215 | MODULE_LICENSE("GPL"); | ||
216 | MODULE_AUTHOR("Clifton Barnes <cabarnes@indesign-llc.com>"); | ||
217 | MODULE_DESCRIPTION("1-wire Driver for Maxim/Dallas DS2780 Stand-Alone Fuel Gauge IC"); | ||
diff --git a/drivers/w1/slaves/w1_ds2780.h b/drivers/w1/slaves/w1_ds2780.h new file mode 100644 index 000000000000..a1fba79eb1b5 --- /dev/null +++ b/drivers/w1/slaves/w1_ds2780.h | |||
@@ -0,0 +1,129 @@ | |||
1 | /* | ||
2 | * 1-Wire implementation for the ds2780 chip | ||
3 | * | ||
4 | * Copyright (C) 2010 Indesign, LLC | ||
5 | * | ||
6 | * Author: Clifton Barnes <cabarnes@indesign-llc.com> | ||
7 | * | ||
8 | * Based on w1-ds2760 driver | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License version 2 as | ||
12 | * published by the Free Software Foundation. | ||
13 | * | ||
14 | */ | ||
15 | |||
16 | #ifndef _W1_DS2780_H | ||
17 | #define _W1_DS2780_H | ||
18 | |||
19 | /* Function commands */ | ||
20 | #define W1_DS2780_READ_DATA 0x69 | ||
21 | #define W1_DS2780_WRITE_DATA 0x6C | ||
22 | #define W1_DS2780_COPY_DATA 0x48 | ||
23 | #define W1_DS2780_RECALL_DATA 0xB8 | ||
24 | #define W1_DS2780_LOCK 0x6A | ||
25 | |||
26 | /* Register map */ | ||
27 | /* Register 0x00 Reserved */ | ||
28 | #define DS2780_STATUS_REG 0x01 | ||
29 | #define DS2780_RAAC_MSB_REG 0x02 | ||
30 | #define DS2780_RAAC_LSB_REG 0x03 | ||
31 | #define DS2780_RSAC_MSB_REG 0x04 | ||
32 | #define DS2780_RSAC_LSB_REG 0x05 | ||
33 | #define DS2780_RARC_REG 0x06 | ||
34 | #define DS2780_RSRC_REG 0x07 | ||
35 | #define DS2780_IAVG_MSB_REG 0x08 | ||
36 | #define DS2780_IAVG_LSB_REG 0x09 | ||
37 | #define DS2780_TEMP_MSB_REG 0x0A | ||
38 | #define DS2780_TEMP_LSB_REG 0x0B | ||
39 | #define DS2780_VOLT_MSB_REG 0x0C | ||
40 | #define DS2780_VOLT_LSB_REG 0x0D | ||
41 | #define DS2780_CURRENT_MSB_REG 0x0E | ||
42 | #define DS2780_CURRENT_LSB_REG 0x0F | ||
43 | #define DS2780_ACR_MSB_REG 0x10 | ||
44 | #define DS2780_ACR_LSB_REG 0x11 | ||
45 | #define DS2780_ACRL_MSB_REG 0x12 | ||
46 | #define DS2780_ACRL_LSB_REG 0x13 | ||
47 | #define DS2780_AS_REG 0x14 | ||
48 | #define DS2780_SFR_REG 0x15 | ||
49 | #define DS2780_FULL_MSB_REG 0x16 | ||
50 | #define DS2780_FULL_LSB_REG 0x17 | ||
51 | #define DS2780_AE_MSB_REG 0x18 | ||
52 | #define DS2780_AE_LSB_REG 0x19 | ||
53 | #define DS2780_SE_MSB_REG 0x1A | ||
54 | #define DS2780_SE_LSB_REG 0x1B | ||
55 | /* Register 0x1C - 0x1E Reserved */ | ||
56 | #define DS2780_EEPROM_REG 0x1F | ||
57 | #define DS2780_EEPROM_BLOCK0_START 0x20 | ||
58 | /* Register 0x20 - 0x2F User EEPROM */ | ||
59 | #define DS2780_EEPROM_BLOCK0_END 0x2F | ||
60 | /* Register 0x30 - 0x5F Reserved */ | ||
61 | #define DS2780_EEPROM_BLOCK1_START 0x60 | ||
62 | #define DS2780_CONTROL_REG 0x60 | ||
63 | #define DS2780_AB_REG 0x61 | ||
64 | #define DS2780_AC_MSB_REG 0x62 | ||
65 | #define DS2780_AC_LSB_REG 0x63 | ||
66 | #define DS2780_VCHG_REG 0x64 | ||
67 | #define DS2780_IMIN_REG 0x65 | ||
68 | #define DS2780_VAE_REG 0x66 | ||
69 | #define DS2780_IAE_REG 0x67 | ||
70 | #define DS2780_AE_40_REG 0x68 | ||
71 | #define DS2780_RSNSP_REG 0x69 | ||
72 | #define DS2780_FULL_40_MSB_REG 0x6A | ||
73 | #define DS2780_FULL_40_LSB_REG 0x6B | ||
74 | #define DS2780_FULL_3040_SLOPE_REG 0x6C | ||
75 | #define DS2780_FULL_2030_SLOPE_REG 0x6D | ||
76 | #define DS2780_FULL_1020_SLOPE_REG 0x6E | ||
77 | #define DS2780_FULL_0010_SLOPE_REG 0x6F | ||
78 | #define DS2780_AE_3040_SLOPE_REG 0x70 | ||
79 | #define DS2780_AE_2030_SLOPE_REG 0x71 | ||
80 | #define DS2780_AE_1020_SLOPE_REG 0x72 | ||
81 | #define DS2780_AE_0010_SLOPE_REG 0x73 | ||
82 | #define DS2780_SE_3040_SLOPE_REG 0x74 | ||
83 | #define DS2780_SE_2030_SLOPE_REG 0x75 | ||
84 | #define DS2780_SE_1020_SLOPE_REG 0x76 | ||
85 | #define DS2780_SE_0010_SLOPE_REG 0x77 | ||
86 | #define DS2780_RSGAIN_MSB_REG 0x78 | ||
87 | #define DS2780_RSGAIN_LSB_REG 0x79 | ||
88 | #define DS2780_RSTC_REG 0x7A | ||
89 | #define DS2780_FRSGAIN_MSB_REG 0x7B | ||
90 | #define DS2780_FRSGAIN_LSB_REG 0x7C | ||
91 | #define DS2780_EEPROM_BLOCK1_END 0x7C | ||
92 | /* Register 0x7D - 0xFF Reserved */ | ||
93 | |||
94 | /* Number of valid register addresses */ | ||
95 | #define DS2780_DATA_SIZE 0x80 | ||
96 | |||
97 | /* Status register bits */ | ||
98 | #define DS2780_STATUS_REG_CHGTF (1 << 7) | ||
99 | #define DS2780_STATUS_REG_AEF (1 << 6) | ||
100 | #define DS2780_STATUS_REG_SEF (1 << 5) | ||
101 | #define DS2780_STATUS_REG_LEARNF (1 << 4) | ||
102 | /* Bit 3 Reserved */ | ||
103 | #define DS2780_STATUS_REG_UVF (1 << 2) | ||
104 | #define DS2780_STATUS_REG_PORF (1 << 1) | ||
105 | /* Bit 0 Reserved */ | ||
106 | |||
107 | /* Control register bits */ | ||
108 | /* Bit 7 Reserved */ | ||
109 | #define DS2780_CONTROL_REG_UVEN (1 << 6) | ||
110 | #define DS2780_CONTROL_REG_PMOD (1 << 5) | ||
111 | #define DS2780_CONTROL_REG_RNAOP (1 << 4) | ||
112 | /* Bit 0 - 3 Reserved */ | ||
113 | |||
114 | /* Special feature register bits */ | ||
115 | /* Bit 1 - 7 Reserved */ | ||
116 | #define DS2780_SFR_REG_PIOSC (1 << 0) | ||
117 | |||
118 | /* EEPROM register bits */ | ||
119 | #define DS2780_EEPROM_REG_EEC (1 << 7) | ||
120 | #define DS2780_EEPROM_REG_LOCK (1 << 6) | ||
121 | /* Bit 2 - 6 Reserved */ | ||
122 | #define DS2780_EEPROM_REG_BL1 (1 << 1) | ||
123 | #define DS2780_EEPROM_REG_BL0 (1 << 0) | ||
124 | |||
125 | extern int w1_ds2780_io(struct device *dev, char *buf, int addr, size_t count, | ||
126 | int io); | ||
127 | extern int w1_ds2780_eeprom_cmd(struct device *dev, int addr, int cmd); | ||
128 | |||
129 | #endif /* !_W1_DS2780_H */ | ||
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c index b7b5014ff714..10606c822756 100644 --- a/drivers/w1/w1.c +++ b/drivers/w1/w1.c | |||
@@ -827,7 +827,7 @@ void w1_reconnect_slaves(struct w1_family *f, int attach) | |||
827 | mutex_unlock(&w1_mlock); | 827 | mutex_unlock(&w1_mlock); |
828 | } | 828 | } |
829 | 829 | ||
830 | static void w1_slave_found(struct w1_master *dev, u64 rn) | 830 | void w1_slave_found(struct w1_master *dev, u64 rn) |
831 | { | 831 | { |
832 | struct w1_slave *sl; | 832 | struct w1_slave *sl; |
833 | struct w1_reg_num *tmp; | 833 | struct w1_reg_num *tmp; |
@@ -933,14 +933,15 @@ void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb | |||
933 | } | 933 | } |
934 | } | 934 | } |
935 | 935 | ||
936 | void w1_search_process(struct w1_master *dev, u8 search_type) | 936 | void w1_search_process_cb(struct w1_master *dev, u8 search_type, |
937 | w1_slave_found_callback cb) | ||
937 | { | 938 | { |
938 | struct w1_slave *sl, *sln; | 939 | struct w1_slave *sl, *sln; |
939 | 940 | ||
940 | list_for_each_entry(sl, &dev->slist, w1_slave_entry) | 941 | list_for_each_entry(sl, &dev->slist, w1_slave_entry) |
941 | clear_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags); | 942 | clear_bit(W1_SLAVE_ACTIVE, (long *)&sl->flags); |
942 | 943 | ||
943 | w1_search_devices(dev, search_type, w1_slave_found); | 944 | w1_search_devices(dev, search_type, cb); |
944 | 945 | ||
945 | list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { | 946 | list_for_each_entry_safe(sl, sln, &dev->slist, w1_slave_entry) { |
946 | if (!test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags) && !--sl->ttl) | 947 | if (!test_bit(W1_SLAVE_ACTIVE, (unsigned long *)&sl->flags) && !--sl->ttl) |
@@ -953,6 +954,11 @@ void w1_search_process(struct w1_master *dev, u8 search_type) | |||
953 | dev->search_count--; | 954 | dev->search_count--; |
954 | } | 955 | } |
955 | 956 | ||
957 | static void w1_search_process(struct w1_master *dev, u8 search_type) | ||
958 | { | ||
959 | w1_search_process_cb(dev, search_type, w1_slave_found); | ||
960 | } | ||
961 | |||
956 | int w1_process(void *data) | 962 | int w1_process(void *data) |
957 | { | 963 | { |
958 | struct w1_master *dev = (struct w1_master *) data; | 964 | struct w1_master *dev = (struct w1_master *) data; |
diff --git a/drivers/w1/w1.h b/drivers/w1/w1.h index d8a9709f3449..1ce23fc6186c 100644 --- a/drivers/w1/w1.h +++ b/drivers/w1/w1.h | |||
@@ -55,6 +55,7 @@ struct w1_reg_num | |||
55 | #define W1_READ_ROM 0x33 | 55 | #define W1_READ_ROM 0x33 |
56 | #define W1_READ_PSUPPLY 0xB4 | 56 | #define W1_READ_PSUPPLY 0xB4 |
57 | #define W1_MATCH_ROM 0x55 | 57 | #define W1_MATCH_ROM 0x55 |
58 | #define W1_RESUME_CMD 0xA5 | ||
58 | 59 | ||
59 | #define W1_SLAVE_ACTIVE 0 | 60 | #define W1_SLAVE_ACTIVE 0 |
60 | 61 | ||
@@ -193,7 +194,9 @@ void w1_destroy_master_attributes(struct w1_master *master); | |||
193 | void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); | 194 | void w1_search(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); |
194 | void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); | 195 | void w1_search_devices(struct w1_master *dev, u8 search_type, w1_slave_found_callback cb); |
195 | struct w1_slave *w1_search_slave(struct w1_reg_num *id); | 196 | struct w1_slave *w1_search_slave(struct w1_reg_num *id); |
196 | void w1_search_process(struct w1_master *dev, u8 search_type); | 197 | void w1_slave_found(struct w1_master *dev, u64 rn); |
198 | void w1_search_process_cb(struct w1_master *dev, u8 search_type, | ||
199 | w1_slave_found_callback cb); | ||
197 | struct w1_master *w1_search_master_id(u32 id); | 200 | struct w1_master *w1_search_master_id(u32 id); |
198 | 201 | ||
199 | /* Disconnect and reconnect devices in the given family. Used for finding | 202 | /* Disconnect and reconnect devices in the given family. Used for finding |
@@ -213,6 +216,7 @@ void w1_write_block(struct w1_master *, const u8 *, int); | |||
213 | void w1_touch_block(struct w1_master *, u8 *, int); | 216 | void w1_touch_block(struct w1_master *, u8 *, int); |
214 | u8 w1_read_block(struct w1_master *, u8 *, int); | 217 | u8 w1_read_block(struct w1_master *, u8 *, int); |
215 | int w1_reset_select_slave(struct w1_slave *sl); | 218 | int w1_reset_select_slave(struct w1_slave *sl); |
219 | int w1_reset_resume_command(struct w1_master *); | ||
216 | void w1_next_pullup(struct w1_master *, int); | 220 | void w1_next_pullup(struct w1_master *, int); |
217 | 221 | ||
218 | static inline struct w1_slave* dev_to_w1_slave(struct device *dev) | 222 | static inline struct w1_slave* dev_to_w1_slave(struct device *dev) |
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h index f3b636d7cafe..97479ae70b9c 100644 --- a/drivers/w1/w1_family.h +++ b/drivers/w1/w1_family.h | |||
@@ -34,8 +34,10 @@ | |||
34 | #define W1_THERM_DS1822 0x22 | 34 | #define W1_THERM_DS1822 0x22 |
35 | #define W1_EEPROM_DS2433 0x23 | 35 | #define W1_EEPROM_DS2433 0x23 |
36 | #define W1_THERM_DS18B20 0x28 | 36 | #define W1_THERM_DS18B20 0x28 |
37 | #define W1_FAMILY_DS2408 0x29 | ||
37 | #define W1_EEPROM_DS2431 0x2D | 38 | #define W1_EEPROM_DS2431 0x2D |
38 | #define W1_FAMILY_DS2760 0x30 | 39 | #define W1_FAMILY_DS2760 0x30 |
40 | #define W1_FAMILY_DS2780 0x32 | ||
39 | 41 | ||
40 | #define MAXNAMELEN 32 | 42 | #define MAXNAMELEN 32 |
41 | 43 | ||
diff --git a/drivers/w1/w1_io.c b/drivers/w1/w1_io.c index 3ebe9726a9e5..8e8b64cfafb6 100644 --- a/drivers/w1/w1_io.c +++ b/drivers/w1/w1_io.c | |||
@@ -390,6 +390,32 @@ int w1_reset_select_slave(struct w1_slave *sl) | |||
390 | EXPORT_SYMBOL_GPL(w1_reset_select_slave); | 390 | EXPORT_SYMBOL_GPL(w1_reset_select_slave); |
391 | 391 | ||
392 | /** | 392 | /** |
393 | * When the workflow with a slave amongst many requires several | ||
394 | * successive commands a reset between each, this function is similar | ||
395 | * to doing a reset then a match ROM for the last matched ROM. The | ||
396 | * advantage being that the matched ROM step is skipped in favor of the | ||
397 | * resume command. The slave must support the command of course. | ||
398 | * | ||
399 | * If the bus has only one slave, traditionnaly the match ROM is skipped | ||
400 | * and a "SKIP ROM" is done for efficiency. On multi-slave busses, this | ||
401 | * doesn't work of course, but the resume command is the next best thing. | ||
402 | * | ||
403 | * The w1 master lock must be held. | ||
404 | * | ||
405 | * @param dev the master device | ||
406 | */ | ||
407 | int w1_reset_resume_command(struct w1_master *dev) | ||
408 | { | ||
409 | if (w1_reset_bus(dev)) | ||
410 | return -1; | ||
411 | |||
412 | /* This will make only the last matched slave perform a skip ROM. */ | ||
413 | w1_write_8(dev, W1_RESUME_CMD); | ||
414 | return 0; | ||
415 | } | ||
416 | EXPORT_SYMBOL_GPL(w1_reset_resume_command); | ||
417 | |||
418 | /** | ||
393 | * Put out a strong pull-up of the specified duration after the next write | 419 | * Put out a strong pull-up of the specified duration after the next write |
394 | * operation. Not all hardware supports strong pullups. Hardware that | 420 | * operation. Not all hardware supports strong pullups. Hardware that |
395 | * doesn't support strong pullups will sleep for the given time after the | 421 | * doesn't support strong pullups will sleep for the given time after the |
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c index 7e667bc77ef2..55aabd927c60 100644 --- a/drivers/w1/w1_netlink.c +++ b/drivers/w1/w1_netlink.c | |||
@@ -55,6 +55,9 @@ static void w1_send_slave(struct w1_master *dev, u64 rn) | |||
55 | struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1); | 55 | struct w1_netlink_cmd *cmd = (struct w1_netlink_cmd *)(hdr + 1); |
56 | int avail; | 56 | int avail; |
57 | 57 | ||
58 | /* update kernel slave list */ | ||
59 | w1_slave_found(dev, rn); | ||
60 | |||
58 | avail = dev->priv_size - cmd->len; | 61 | avail = dev->priv_size - cmd->len; |
59 | 62 | ||
60 | if (avail > 8) { | 63 | if (avail > 8) { |
@@ -85,7 +88,7 @@ static int w1_process_search_command(struct w1_master *dev, struct cn_msg *msg, | |||
85 | dev->priv = msg; | 88 | dev->priv = msg; |
86 | dev->priv_size = avail; | 89 | dev->priv_size = avail; |
87 | 90 | ||
88 | w1_search_devices(dev, search_type, w1_send_slave); | 91 | w1_search_process_cb(dev, search_type, w1_send_slave); |
89 | 92 | ||
90 | msg->ack = 0; | 93 | msg->ack = 0; |
91 | cn_netlink_send(msg, 0, GFP_KERNEL); | 94 | cn_netlink_send(msg, 0, GFP_KERNEL); |
diff --git a/drivers/watchdog/rdc321x_wdt.c b/drivers/watchdog/rdc321x_wdt.c index d8e725082fdc..428f8a1583e8 100644 --- a/drivers/watchdog/rdc321x_wdt.c +++ b/drivers/watchdog/rdc321x_wdt.c | |||
@@ -37,7 +37,6 @@ | |||
37 | #include <linux/io.h> | 37 | #include <linux/io.h> |
38 | #include <linux/uaccess.h> | 38 | #include <linux/uaccess.h> |
39 | #include <linux/mfd/rdc321x.h> | 39 | #include <linux/mfd/rdc321x.h> |
40 | #include <linux/mfd/core.h> | ||
41 | 40 | ||
42 | #define RDC_WDT_MASK 0x80000000 /* Mask */ | 41 | #define RDC_WDT_MASK 0x80000000 /* Mask */ |
43 | #define RDC_WDT_EN 0x00800000 /* Enable bit */ | 42 | #define RDC_WDT_EN 0x00800000 /* Enable bit */ |
@@ -232,7 +231,7 @@ static int __devinit rdc321x_wdt_probe(struct platform_device *pdev) | |||
232 | struct resource *r; | 231 | struct resource *r; |
233 | struct rdc321x_wdt_pdata *pdata; | 232 | struct rdc321x_wdt_pdata *pdata; |
234 | 233 | ||
235 | pdata = mfd_get_data(pdev); | 234 | pdata = pdev->dev.platform_data; |
236 | if (!pdata) { | 235 | if (!pdata) { |
237 | dev_err(&pdev->dev, "no platform data supplied\n"); | 236 | dev_err(&pdev->dev, "no platform data supplied\n"); |
238 | return -ENODEV; | 237 | return -ENODEV; |
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile index 4781f806701d..bbc18258ecc5 100644 --- a/drivers/xen/Makefile +++ b/drivers/xen/Makefile | |||
@@ -1,5 +1,6 @@ | |||
1 | obj-y += grant-table.o features.o events.o manage.o balloon.o | 1 | obj-y += grant-table.o features.o events.o manage.o balloon.o |
2 | obj-y += xenbus/ | 2 | obj-y += xenbus/ |
3 | obj-y += tmem.o | ||
3 | 4 | ||
4 | nostackp := $(call cc-option, -fno-stack-protector) | 5 | nostackp := $(call cc-option, -fno-stack-protector) |
5 | CFLAGS_features.o := $(nostackp) | 6 | CFLAGS_features.o := $(nostackp) |
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c new file mode 100644 index 000000000000..816a44959ef0 --- /dev/null +++ b/drivers/xen/tmem.c | |||
@@ -0,0 +1,264 @@ | |||
1 | /* | ||
2 | * Xen implementation for transcendent memory (tmem) | ||
3 | * | ||
4 | * Copyright (C) 2009-2010 Oracle Corp. All rights reserved. | ||
5 | * Author: Dan Magenheimer | ||
6 | */ | ||
7 | |||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/pagemap.h> | ||
12 | #include <linux/cleancache.h> | ||
13 | |||
14 | #include <xen/xen.h> | ||
15 | #include <xen/interface/xen.h> | ||
16 | #include <asm/xen/hypercall.h> | ||
17 | #include <asm/xen/page.h> | ||
18 | #include <asm/xen/hypervisor.h> | ||
19 | |||
20 | #define TMEM_CONTROL 0 | ||
21 | #define TMEM_NEW_POOL 1 | ||
22 | #define TMEM_DESTROY_POOL 2 | ||
23 | #define TMEM_NEW_PAGE 3 | ||
24 | #define TMEM_PUT_PAGE 4 | ||
25 | #define TMEM_GET_PAGE 5 | ||
26 | #define TMEM_FLUSH_PAGE 6 | ||
27 | #define TMEM_FLUSH_OBJECT 7 | ||
28 | #define TMEM_READ 8 | ||
29 | #define TMEM_WRITE 9 | ||
30 | #define TMEM_XCHG 10 | ||
31 | |||
32 | /* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */ | ||
33 | #define TMEM_POOL_PERSIST 1 | ||
34 | #define TMEM_POOL_SHARED 2 | ||
35 | #define TMEM_POOL_PAGESIZE_SHIFT 4 | ||
36 | #define TMEM_VERSION_SHIFT 24 | ||
37 | |||
38 | |||
39 | struct tmem_pool_uuid { | ||
40 | u64 uuid_lo; | ||
41 | u64 uuid_hi; | ||
42 | }; | ||
43 | |||
44 | struct tmem_oid { | ||
45 | u64 oid[3]; | ||
46 | }; | ||
47 | |||
48 | #define TMEM_POOL_PRIVATE_UUID { 0, 0 } | ||
49 | |||
50 | /* flags for tmem_ops.new_pool */ | ||
51 | #define TMEM_POOL_PERSIST 1 | ||
52 | #define TMEM_POOL_SHARED 2 | ||
53 | |||
54 | /* xen tmem foundation ops/hypercalls */ | ||
55 | |||
56 | static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid, | ||
57 | u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len) | ||
58 | { | ||
59 | struct tmem_op op; | ||
60 | int rc = 0; | ||
61 | |||
62 | op.cmd = tmem_cmd; | ||
63 | op.pool_id = tmem_pool; | ||
64 | op.u.gen.oid[0] = oid.oid[0]; | ||
65 | op.u.gen.oid[1] = oid.oid[1]; | ||
66 | op.u.gen.oid[2] = oid.oid[2]; | ||
67 | op.u.gen.index = index; | ||
68 | op.u.gen.tmem_offset = tmem_offset; | ||
69 | op.u.gen.pfn_offset = pfn_offset; | ||
70 | op.u.gen.len = len; | ||
71 | set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn); | ||
72 | rc = HYPERVISOR_tmem_op(&op); | ||
73 | return rc; | ||
74 | } | ||
75 | |||
76 | static int xen_tmem_new_pool(struct tmem_pool_uuid uuid, | ||
77 | u32 flags, unsigned long pagesize) | ||
78 | { | ||
79 | struct tmem_op op; | ||
80 | int rc = 0, pageshift; | ||
81 | |||
82 | for (pageshift = 0; pagesize != 1; pageshift++) | ||
83 | pagesize >>= 1; | ||
84 | flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT; | ||
85 | flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT; | ||
86 | op.cmd = TMEM_NEW_POOL; | ||
87 | op.u.new.uuid[0] = uuid.uuid_lo; | ||
88 | op.u.new.uuid[1] = uuid.uuid_hi; | ||
89 | op.u.new.flags = flags; | ||
90 | rc = HYPERVISOR_tmem_op(&op); | ||
91 | return rc; | ||
92 | } | ||
93 | |||
94 | /* xen generic tmem ops */ | ||
95 | |||
96 | static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid, | ||
97 | u32 index, unsigned long pfn) | ||
98 | { | ||
99 | unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; | ||
100 | |||
101 | return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index, | ||
102 | gmfn, 0, 0, 0); | ||
103 | } | ||
104 | |||
105 | static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid, | ||
106 | u32 index, unsigned long pfn) | ||
107 | { | ||
108 | unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn; | ||
109 | |||
110 | return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index, | ||
111 | gmfn, 0, 0, 0); | ||
112 | } | ||
113 | |||
114 | static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index) | ||
115 | { | ||
116 | return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index, | ||
117 | 0, 0, 0, 0); | ||
118 | } | ||
119 | |||
120 | static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid) | ||
121 | { | ||
122 | return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0); | ||
123 | } | ||
124 | |||
125 | static int xen_tmem_destroy_pool(u32 pool_id) | ||
126 | { | ||
127 | struct tmem_oid oid = { { 0 } }; | ||
128 | |||
129 | return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0); | ||
130 | } | ||
131 | |||
132 | int tmem_enabled; | ||
133 | |||
134 | static int __init enable_tmem(char *s) | ||
135 | { | ||
136 | tmem_enabled = 1; | ||
137 | return 1; | ||
138 | } | ||
139 | |||
140 | __setup("tmem", enable_tmem); | ||
141 | |||
142 | /* cleancache ops */ | ||
143 | |||
144 | static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key, | ||
145 | pgoff_t index, struct page *page) | ||
146 | { | ||
147 | u32 ind = (u32) index; | ||
148 | struct tmem_oid oid = *(struct tmem_oid *)&key; | ||
149 | unsigned long pfn = page_to_pfn(page); | ||
150 | |||
151 | if (pool < 0) | ||
152 | return; | ||
153 | if (ind != index) | ||
154 | return; | ||
155 | mb(); /* ensure page is quiescent; tmem may address it with an alias */ | ||
156 | (void)xen_tmem_put_page((u32)pool, oid, ind, pfn); | ||
157 | } | ||
158 | |||
159 | static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key, | ||
160 | pgoff_t index, struct page *page) | ||
161 | { | ||
162 | u32 ind = (u32) index; | ||
163 | struct tmem_oid oid = *(struct tmem_oid *)&key; | ||
164 | unsigned long pfn = page_to_pfn(page); | ||
165 | int ret; | ||
166 | |||
167 | /* translate return values to linux semantics */ | ||
168 | if (pool < 0) | ||
169 | return -1; | ||
170 | if (ind != index) | ||
171 | return -1; | ||
172 | ret = xen_tmem_get_page((u32)pool, oid, ind, pfn); | ||
173 | if (ret == 1) | ||
174 | return 0; | ||
175 | else | ||
176 | return -1; | ||
177 | } | ||
178 | |||
179 | static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key, | ||
180 | pgoff_t index) | ||
181 | { | ||
182 | u32 ind = (u32) index; | ||
183 | struct tmem_oid oid = *(struct tmem_oid *)&key; | ||
184 | |||
185 | if (pool < 0) | ||
186 | return; | ||
187 | if (ind != index) | ||
188 | return; | ||
189 | (void)xen_tmem_flush_page((u32)pool, oid, ind); | ||
190 | } | ||
191 | |||
192 | static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key) | ||
193 | { | ||
194 | struct tmem_oid oid = *(struct tmem_oid *)&key; | ||
195 | |||
196 | if (pool < 0) | ||
197 | return; | ||
198 | (void)xen_tmem_flush_object((u32)pool, oid); | ||
199 | } | ||
200 | |||
201 | static void tmem_cleancache_flush_fs(int pool) | ||
202 | { | ||
203 | if (pool < 0) | ||
204 | return; | ||
205 | (void)xen_tmem_destroy_pool((u32)pool); | ||
206 | } | ||
207 | |||
208 | static int tmem_cleancache_init_fs(size_t pagesize) | ||
209 | { | ||
210 | struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID; | ||
211 | |||
212 | return xen_tmem_new_pool(uuid_private, 0, pagesize); | ||
213 | } | ||
214 | |||
215 | static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize) | ||
216 | { | ||
217 | struct tmem_pool_uuid shared_uuid; | ||
218 | |||
219 | shared_uuid.uuid_lo = *(u64 *)uuid; | ||
220 | shared_uuid.uuid_hi = *(u64 *)(&uuid[8]); | ||
221 | return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize); | ||
222 | } | ||
223 | |||
224 | static int use_cleancache = 1; | ||
225 | |||
226 | static int __init no_cleancache(char *s) | ||
227 | { | ||
228 | use_cleancache = 0; | ||
229 | return 1; | ||
230 | } | ||
231 | |||
232 | __setup("nocleancache", no_cleancache); | ||
233 | |||
234 | static struct cleancache_ops tmem_cleancache_ops = { | ||
235 | .put_page = tmem_cleancache_put_page, | ||
236 | .get_page = tmem_cleancache_get_page, | ||
237 | .flush_page = tmem_cleancache_flush_page, | ||
238 | .flush_inode = tmem_cleancache_flush_inode, | ||
239 | .flush_fs = tmem_cleancache_flush_fs, | ||
240 | .init_shared_fs = tmem_cleancache_init_shared_fs, | ||
241 | .init_fs = tmem_cleancache_init_fs | ||
242 | }; | ||
243 | |||
244 | static int __init xen_tmem_init(void) | ||
245 | { | ||
246 | struct cleancache_ops old_ops; | ||
247 | |||
248 | if (!xen_domain()) | ||
249 | return 0; | ||
250 | #ifdef CONFIG_CLEANCACHE | ||
251 | BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid)); | ||
252 | if (tmem_enabled && use_cleancache) { | ||
253 | char *s = ""; | ||
254 | old_ops = cleancache_register_ops(&tmem_cleancache_ops); | ||
255 | if (old_ops.init_fs != NULL) | ||
256 | s = " (WARNING: cleancache_ops overridden)"; | ||
257 | printk(KERN_INFO "cleancache enabled, RAM provided by " | ||
258 | "Xen Transcendent Memory%s\n", s); | ||
259 | } | ||
260 | #endif | ||
261 | return 0; | ||
262 | } | ||
263 | |||
264 | module_init(xen_tmem_init) | ||
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c index 7f6c67703195..8d7f3e69ae29 100644 --- a/fs/9p/vfs_inode.c +++ b/fs/9p/vfs_inode.c | |||
@@ -814,6 +814,7 @@ int v9fs_vfs_unlink(struct inode *i, struct dentry *d) | |||
814 | 814 | ||
815 | int v9fs_vfs_rmdir(struct inode *i, struct dentry *d) | 815 | int v9fs_vfs_rmdir(struct inode *i, struct dentry *d) |
816 | { | 816 | { |
817 | dentry_unhash(d); | ||
817 | return v9fs_remove(i, d, 1); | 818 | return v9fs_remove(i, d, 1); |
818 | } | 819 | } |
819 | 820 | ||
@@ -839,6 +840,9 @@ v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
839 | struct p9_fid *newdirfid; | 840 | struct p9_fid *newdirfid; |
840 | struct p9_wstat wstat; | 841 | struct p9_wstat wstat; |
841 | 842 | ||
843 | if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) | ||
844 | dentry_unhash(new_dentry); | ||
845 | |||
842 | P9_DPRINTK(P9_DEBUG_VFS, "\n"); | 846 | P9_DPRINTK(P9_DEBUG_VFS, "\n"); |
843 | retval = 0; | 847 | retval = 0; |
844 | old_inode = old_dentry->d_inode; | 848 | old_inode = old_dentry->d_inode; |
diff --git a/fs/Kconfig b/fs/Kconfig index f6edba2e069f..19891aab9c6e 100644 --- a/fs/Kconfig +++ b/fs/Kconfig | |||
@@ -47,7 +47,7 @@ config FS_POSIX_ACL | |||
47 | def_bool n | 47 | def_bool n |
48 | 48 | ||
49 | config EXPORTFS | 49 | config EXPORTFS |
50 | bool | 50 | tristate |
51 | 51 | ||
52 | config FILE_LOCKING | 52 | config FILE_LOCKING |
53 | bool "Enable POSIX file locking API" if EXPERT | 53 | bool "Enable POSIX file locking API" if EXPERT |
diff --git a/fs/affs/namei.c b/fs/affs/namei.c index e3e9efc1fdd8..03330e2e390c 100644 --- a/fs/affs/namei.c +++ b/fs/affs/namei.c | |||
@@ -320,6 +320,8 @@ affs_rmdir(struct inode *dir, struct dentry *dentry) | |||
320 | dentry->d_inode->i_ino, | 320 | dentry->d_inode->i_ino, |
321 | (int)dentry->d_name.len, dentry->d_name.name); | 321 | (int)dentry->d_name.len, dentry->d_name.name); |
322 | 322 | ||
323 | dentry_unhash(dentry); | ||
324 | |||
323 | return affs_remove_header(dentry); | 325 | return affs_remove_header(dentry); |
324 | } | 326 | } |
325 | 327 | ||
@@ -417,6 +419,9 @@ affs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
417 | struct buffer_head *bh = NULL; | 419 | struct buffer_head *bh = NULL; |
418 | int retval; | 420 | int retval; |
419 | 421 | ||
422 | if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) | ||
423 | dentry_unhash(new_dentry); | ||
424 | |||
420 | pr_debug("AFFS: rename(old=%u,\"%*s\" to new=%u,\"%*s\")\n", | 425 | pr_debug("AFFS: rename(old=%u,\"%*s\" to new=%u,\"%*s\")\n", |
421 | (u32)old_dir->i_ino, (int)old_dentry->d_name.len, old_dentry->d_name.name, | 426 | (u32)old_dir->i_ino, (int)old_dentry->d_name.len, old_dentry->d_name.name, |
422 | (u32)new_dir->i_ino, (int)new_dentry->d_name.len, new_dentry->d_name.name); | 427 | (u32)new_dir->i_ino, (int)new_dentry->d_name.len, new_dentry->d_name.name); |
diff --git a/fs/afs/dir.c b/fs/afs/dir.c index 20c106f24927..2c4e05160042 100644 --- a/fs/afs/dir.c +++ b/fs/afs/dir.c | |||
@@ -845,6 +845,8 @@ static int afs_rmdir(struct inode *dir, struct dentry *dentry) | |||
845 | _enter("{%x:%u},{%s}", | 845 | _enter("{%x:%u},{%s}", |
846 | dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name); | 846 | dvnode->fid.vid, dvnode->fid.vnode, dentry->d_name.name); |
847 | 847 | ||
848 | dentry_unhash(dentry); | ||
849 | |||
848 | ret = -ENAMETOOLONG; | 850 | ret = -ENAMETOOLONG; |
849 | if (dentry->d_name.len >= AFSNAMEMAX) | 851 | if (dentry->d_name.len >= AFSNAMEMAX) |
850 | goto error; | 852 | goto error; |
@@ -1146,6 +1148,9 @@ static int afs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1146 | struct key *key; | 1148 | struct key *key; |
1147 | int ret; | 1149 | int ret; |
1148 | 1150 | ||
1151 | if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) | ||
1152 | dentry_unhash(new_dentry); | ||
1153 | |||
1149 | vnode = AFS_FS_I(old_dentry->d_inode); | 1154 | vnode = AFS_FS_I(old_dentry->d_inode); |
1150 | orig_dvnode = AFS_FS_I(old_dir); | 1155 | orig_dvnode = AFS_FS_I(old_dir); |
1151 | new_dvnode = AFS_FS_I(new_dir); | 1156 | new_dvnode = AFS_FS_I(new_dir); |
diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index f55ae23b137e..87d95a8cddbc 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c | |||
@@ -583,6 +583,8 @@ static int autofs4_dir_unlink(struct inode *dir, struct dentry *dentry) | |||
583 | if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) | 583 | if (!autofs4_oz_mode(sbi) && !capable(CAP_SYS_ADMIN)) |
584 | return -EACCES; | 584 | return -EACCES; |
585 | 585 | ||
586 | dentry_unhash(dentry); | ||
587 | |||
586 | if (atomic_dec_and_test(&ino->count)) { | 588 | if (atomic_dec_and_test(&ino->count)) { |
587 | p_ino = autofs4_dentry_ino(dentry->d_parent); | 589 | p_ino = autofs4_dentry_ino(dentry->d_parent); |
588 | if (p_ino && dentry->d_parent != dentry) | 590 | if (p_ino && dentry->d_parent != dentry) |
diff --git a/fs/bfs/dir.c b/fs/bfs/dir.c index b14cebfd9047..c7d1d06b0483 100644 --- a/fs/bfs/dir.c +++ b/fs/bfs/dir.c | |||
@@ -224,6 +224,9 @@ static int bfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
224 | struct bfs_sb_info *info; | 224 | struct bfs_sb_info *info; |
225 | int error = -ENOENT; | 225 | int error = -ENOENT; |
226 | 226 | ||
227 | if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) | ||
228 | dentry_unhash(new_dentry); | ||
229 | |||
227 | old_bh = new_bh = NULL; | 230 | old_bh = new_bh = NULL; |
228 | old_inode = old_dentry->d_inode; | 231 | old_inode = old_dentry->d_inode; |
229 | if (S_ISDIR(old_inode->i_mode)) | 232 | if (S_ISDIR(old_inode->i_mode)) |
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile index 31610ea73aec..9b72dcf1cd25 100644 --- a/fs/btrfs/Makefile +++ b/fs/btrfs/Makefile | |||
@@ -7,4 +7,4 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \ | |||
7 | extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ | 7 | extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \ |
8 | extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ | 8 | extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \ |
9 | export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ | 9 | export.o tree-log.o acl.o free-space-cache.o zlib.o lzo.o \ |
10 | compression.o delayed-ref.o relocation.o | 10 | compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o |
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c index 44ea5b92e1ba..f66fc9959733 100644 --- a/fs/btrfs/acl.c +++ b/fs/btrfs/acl.c | |||
@@ -288,7 +288,7 @@ int btrfs_acl_chmod(struct inode *inode) | |||
288 | return 0; | 288 | return 0; |
289 | 289 | ||
290 | acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS); | 290 | acl = btrfs_get_acl(inode, ACL_TYPE_ACCESS); |
291 | if (IS_ERR(acl) || !acl) | 291 | if (IS_ERR_OR_NULL(acl)) |
292 | return PTR_ERR(acl); | 292 | return PTR_ERR(acl); |
293 | 293 | ||
294 | clone = posix_acl_clone(acl, GFP_KERNEL); | 294 | clone = posix_acl_clone(acl, GFP_KERNEL); |
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h index 57c3bb2884ce..93b1aa932014 100644 --- a/fs/btrfs/btrfs_inode.h +++ b/fs/btrfs/btrfs_inode.h | |||
@@ -22,6 +22,7 @@ | |||
22 | #include "extent_map.h" | 22 | #include "extent_map.h" |
23 | #include "extent_io.h" | 23 | #include "extent_io.h" |
24 | #include "ordered-data.h" | 24 | #include "ordered-data.h" |
25 | #include "delayed-inode.h" | ||
25 | 26 | ||
26 | /* in memory btrfs inode */ | 27 | /* in memory btrfs inode */ |
27 | struct btrfs_inode { | 28 | struct btrfs_inode { |
@@ -152,20 +153,34 @@ struct btrfs_inode { | |||
152 | unsigned ordered_data_close:1; | 153 | unsigned ordered_data_close:1; |
153 | unsigned orphan_meta_reserved:1; | 154 | unsigned orphan_meta_reserved:1; |
154 | unsigned dummy_inode:1; | 155 | unsigned dummy_inode:1; |
156 | unsigned in_defrag:1; | ||
155 | 157 | ||
156 | /* | 158 | /* |
157 | * always compress this one file | 159 | * always compress this one file |
158 | */ | 160 | */ |
159 | unsigned force_compress:4; | 161 | unsigned force_compress:4; |
160 | 162 | ||
163 | struct btrfs_delayed_node *delayed_node; | ||
164 | |||
161 | struct inode vfs_inode; | 165 | struct inode vfs_inode; |
162 | }; | 166 | }; |
163 | 167 | ||
168 | extern unsigned char btrfs_filetype_table[]; | ||
169 | |||
164 | static inline struct btrfs_inode *BTRFS_I(struct inode *inode) | 170 | static inline struct btrfs_inode *BTRFS_I(struct inode *inode) |
165 | { | 171 | { |
166 | return container_of(inode, struct btrfs_inode, vfs_inode); | 172 | return container_of(inode, struct btrfs_inode, vfs_inode); |
167 | } | 173 | } |
168 | 174 | ||
175 | static inline u64 btrfs_ino(struct inode *inode) | ||
176 | { | ||
177 | u64 ino = BTRFS_I(inode)->location.objectid; | ||
178 | |||
179 | if (ino <= BTRFS_FIRST_FREE_OBJECTID) | ||
180 | ino = inode->i_ino; | ||
181 | return ino; | ||
182 | } | ||
183 | |||
169 | static inline void btrfs_i_size_write(struct inode *inode, u64 size) | 184 | static inline void btrfs_i_size_write(struct inode *inode, u64 size) |
170 | { | 185 | { |
171 | i_size_write(inode, size); | 186 | i_size_write(inode, size); |
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c index 41d1d7c70e29..bfe42b03eaf9 100644 --- a/fs/btrfs/compression.c +++ b/fs/btrfs/compression.c | |||
@@ -125,9 +125,10 @@ static int check_compressed_csum(struct inode *inode, | |||
125 | kunmap_atomic(kaddr, KM_USER0); | 125 | kunmap_atomic(kaddr, KM_USER0); |
126 | 126 | ||
127 | if (csum != *cb_sum) { | 127 | if (csum != *cb_sum) { |
128 | printk(KERN_INFO "btrfs csum failed ino %lu " | 128 | printk(KERN_INFO "btrfs csum failed ino %llu " |
129 | "extent %llu csum %u " | 129 | "extent %llu csum %u " |
130 | "wanted %u mirror %d\n", inode->i_ino, | 130 | "wanted %u mirror %d\n", |
131 | (unsigned long long)btrfs_ino(inode), | ||
131 | (unsigned long long)disk_start, | 132 | (unsigned long long)disk_start, |
132 | csum, *cb_sum, cb->mirror_num); | 133 | csum, *cb_sum, cb->mirror_num); |
133 | ret = -EIO; | 134 | ret = -EIO; |
@@ -332,7 +333,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
332 | struct compressed_bio *cb; | 333 | struct compressed_bio *cb; |
333 | unsigned long bytes_left; | 334 | unsigned long bytes_left; |
334 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 335 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; |
335 | int page_index = 0; | 336 | int pg_index = 0; |
336 | struct page *page; | 337 | struct page *page; |
337 | u64 first_byte = disk_start; | 338 | u64 first_byte = disk_start; |
338 | struct block_device *bdev; | 339 | struct block_device *bdev; |
@@ -366,8 +367,8 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start, | |||
366 | 367 | ||
367 | /* create and submit bios for the compressed pages */ | 368 | /* create and submit bios for the compressed pages */ |
368 | bytes_left = compressed_len; | 369 | bytes_left = compressed_len; |
369 | for (page_index = 0; page_index < cb->nr_pages; page_index++) { | 370 | for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { |
370 | page = compressed_pages[page_index]; | 371 | page = compressed_pages[pg_index]; |
371 | page->mapping = inode->i_mapping; | 372 | page->mapping = inode->i_mapping; |
372 | if (bio->bi_size) | 373 | if (bio->bi_size) |
373 | ret = io_tree->ops->merge_bio_hook(page, 0, | 374 | ret = io_tree->ops->merge_bio_hook(page, 0, |
@@ -432,7 +433,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, | |||
432 | struct compressed_bio *cb) | 433 | struct compressed_bio *cb) |
433 | { | 434 | { |
434 | unsigned long end_index; | 435 | unsigned long end_index; |
435 | unsigned long page_index; | 436 | unsigned long pg_index; |
436 | u64 last_offset; | 437 | u64 last_offset; |
437 | u64 isize = i_size_read(inode); | 438 | u64 isize = i_size_read(inode); |
438 | int ret; | 439 | int ret; |
@@ -456,13 +457,13 @@ static noinline int add_ra_bio_pages(struct inode *inode, | |||
456 | end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; | 457 | end_index = (i_size_read(inode) - 1) >> PAGE_CACHE_SHIFT; |
457 | 458 | ||
458 | while (last_offset < compressed_end) { | 459 | while (last_offset < compressed_end) { |
459 | page_index = last_offset >> PAGE_CACHE_SHIFT; | 460 | pg_index = last_offset >> PAGE_CACHE_SHIFT; |
460 | 461 | ||
461 | if (page_index > end_index) | 462 | if (pg_index > end_index) |
462 | break; | 463 | break; |
463 | 464 | ||
464 | rcu_read_lock(); | 465 | rcu_read_lock(); |
465 | page = radix_tree_lookup(&mapping->page_tree, page_index); | 466 | page = radix_tree_lookup(&mapping->page_tree, pg_index); |
466 | rcu_read_unlock(); | 467 | rcu_read_unlock(); |
467 | if (page) { | 468 | if (page) { |
468 | misses++; | 469 | misses++; |
@@ -476,7 +477,7 @@ static noinline int add_ra_bio_pages(struct inode *inode, | |||
476 | if (!page) | 477 | if (!page) |
477 | break; | 478 | break; |
478 | 479 | ||
479 | if (add_to_page_cache_lru(page, mapping, page_index, | 480 | if (add_to_page_cache_lru(page, mapping, pg_index, |
480 | GFP_NOFS)) { | 481 | GFP_NOFS)) { |
481 | page_cache_release(page); | 482 | page_cache_release(page); |
482 | goto next; | 483 | goto next; |
@@ -560,7 +561,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
560 | unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; | 561 | unsigned long uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE; |
561 | unsigned long compressed_len; | 562 | unsigned long compressed_len; |
562 | unsigned long nr_pages; | 563 | unsigned long nr_pages; |
563 | unsigned long page_index; | 564 | unsigned long pg_index; |
564 | struct page *page; | 565 | struct page *page; |
565 | struct block_device *bdev; | 566 | struct block_device *bdev; |
566 | struct bio *comp_bio; | 567 | struct bio *comp_bio; |
@@ -613,10 +614,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
613 | 614 | ||
614 | bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; | 615 | bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; |
615 | 616 | ||
616 | for (page_index = 0; page_index < nr_pages; page_index++) { | 617 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
617 | cb->compressed_pages[page_index] = alloc_page(GFP_NOFS | | 618 | cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | |
618 | __GFP_HIGHMEM); | 619 | __GFP_HIGHMEM); |
619 | if (!cb->compressed_pages[page_index]) | 620 | if (!cb->compressed_pages[pg_index]) |
620 | goto fail2; | 621 | goto fail2; |
621 | } | 622 | } |
622 | cb->nr_pages = nr_pages; | 623 | cb->nr_pages = nr_pages; |
@@ -634,8 +635,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
634 | comp_bio->bi_end_io = end_compressed_bio_read; | 635 | comp_bio->bi_end_io = end_compressed_bio_read; |
635 | atomic_inc(&cb->pending_bios); | 636 | atomic_inc(&cb->pending_bios); |
636 | 637 | ||
637 | for (page_index = 0; page_index < nr_pages; page_index++) { | 638 | for (pg_index = 0; pg_index < nr_pages; pg_index++) { |
638 | page = cb->compressed_pages[page_index]; | 639 | page = cb->compressed_pages[pg_index]; |
639 | page->mapping = inode->i_mapping; | 640 | page->mapping = inode->i_mapping; |
640 | page->index = em_start >> PAGE_CACHE_SHIFT; | 641 | page->index = em_start >> PAGE_CACHE_SHIFT; |
641 | 642 | ||
@@ -702,8 +703,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, | |||
702 | return 0; | 703 | return 0; |
703 | 704 | ||
704 | fail2: | 705 | fail2: |
705 | for (page_index = 0; page_index < nr_pages; page_index++) | 706 | for (pg_index = 0; pg_index < nr_pages; pg_index++) |
706 | free_page((unsigned long)cb->compressed_pages[page_index]); | 707 | free_page((unsigned long)cb->compressed_pages[pg_index]); |
707 | 708 | ||
708 | kfree(cb->compressed_pages); | 709 | kfree(cb->compressed_pages); |
709 | fail1: | 710 | fail1: |
@@ -945,7 +946,7 @@ void btrfs_exit_compress(void) | |||
945 | int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, | 946 | int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, |
946 | unsigned long total_out, u64 disk_start, | 947 | unsigned long total_out, u64 disk_start, |
947 | struct bio_vec *bvec, int vcnt, | 948 | struct bio_vec *bvec, int vcnt, |
948 | unsigned long *page_index, | 949 | unsigned long *pg_index, |
949 | unsigned long *pg_offset) | 950 | unsigned long *pg_offset) |
950 | { | 951 | { |
951 | unsigned long buf_offset; | 952 | unsigned long buf_offset; |
@@ -954,7 +955,7 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, | |||
954 | unsigned long working_bytes = total_out - buf_start; | 955 | unsigned long working_bytes = total_out - buf_start; |
955 | unsigned long bytes; | 956 | unsigned long bytes; |
956 | char *kaddr; | 957 | char *kaddr; |
957 | struct page *page_out = bvec[*page_index].bv_page; | 958 | struct page *page_out = bvec[*pg_index].bv_page; |
958 | 959 | ||
959 | /* | 960 | /* |
960 | * start byte is the first byte of the page we're currently | 961 | * start byte is the first byte of the page we're currently |
@@ -995,11 +996,11 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, | |||
995 | 996 | ||
996 | /* check if we need to pick another page */ | 997 | /* check if we need to pick another page */ |
997 | if (*pg_offset == PAGE_CACHE_SIZE) { | 998 | if (*pg_offset == PAGE_CACHE_SIZE) { |
998 | (*page_index)++; | 999 | (*pg_index)++; |
999 | if (*page_index >= vcnt) | 1000 | if (*pg_index >= vcnt) |
1000 | return 0; | 1001 | return 0; |
1001 | 1002 | ||
1002 | page_out = bvec[*page_index].bv_page; | 1003 | page_out = bvec[*pg_index].bv_page; |
1003 | *pg_offset = 0; | 1004 | *pg_offset = 0; |
1004 | start_byte = page_offset(page_out) - disk_start; | 1005 | start_byte = page_offset(page_out) - disk_start; |
1005 | 1006 | ||
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h index 51000174b9d7..a12059f4f0fd 100644 --- a/fs/btrfs/compression.h +++ b/fs/btrfs/compression.h | |||
@@ -37,7 +37,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, | |||
37 | int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, | 37 | int btrfs_decompress_buf2page(char *buf, unsigned long buf_start, |
38 | unsigned long total_out, u64 disk_start, | 38 | unsigned long total_out, u64 disk_start, |
39 | struct bio_vec *bvec, int vcnt, | 39 | struct bio_vec *bvec, int vcnt, |
40 | unsigned long *page_index, | 40 | unsigned long *pg_index, |
41 | unsigned long *pg_offset); | 41 | unsigned long *pg_offset); |
42 | 42 | ||
43 | int btrfs_submit_compressed_write(struct inode *inode, u64 start, | 43 | int btrfs_submit_compressed_write(struct inode *inode, u64 start, |
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c index 84d7ca1fe0ba..b0e18d986e0a 100644 --- a/fs/btrfs/ctree.c +++ b/fs/btrfs/ctree.c | |||
@@ -38,11 +38,6 @@ static int balance_node_right(struct btrfs_trans_handle *trans, | |||
38 | struct extent_buffer *src_buf); | 38 | struct extent_buffer *src_buf); |
39 | static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, | 39 | static int del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
40 | struct btrfs_path *path, int level, int slot); | 40 | struct btrfs_path *path, int level, int slot); |
41 | static int setup_items_for_insert(struct btrfs_trans_handle *trans, | ||
42 | struct btrfs_root *root, struct btrfs_path *path, | ||
43 | struct btrfs_key *cpu_key, u32 *data_size, | ||
44 | u32 total_data, u32 total_size, int nr); | ||
45 | |||
46 | 41 | ||
47 | struct btrfs_path *btrfs_alloc_path(void) | 42 | struct btrfs_path *btrfs_alloc_path(void) |
48 | { | 43 | { |
@@ -107,7 +102,7 @@ void btrfs_free_path(struct btrfs_path *p) | |||
107 | { | 102 | { |
108 | if (!p) | 103 | if (!p) |
109 | return; | 104 | return; |
110 | btrfs_release_path(NULL, p); | 105 | btrfs_release_path(p); |
111 | kmem_cache_free(btrfs_path_cachep, p); | 106 | kmem_cache_free(btrfs_path_cachep, p); |
112 | } | 107 | } |
113 | 108 | ||
@@ -117,7 +112,7 @@ void btrfs_free_path(struct btrfs_path *p) | |||
117 | * | 112 | * |
118 | * It is safe to call this on paths that no locks or extent buffers held. | 113 | * It is safe to call this on paths that no locks or extent buffers held. |
119 | */ | 114 | */ |
120 | noinline void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p) | 115 | noinline void btrfs_release_path(struct btrfs_path *p) |
121 | { | 116 | { |
122 | int i; | 117 | int i; |
123 | 118 | ||
@@ -1328,7 +1323,7 @@ static noinline int reada_for_balance(struct btrfs_root *root, | |||
1328 | ret = -EAGAIN; | 1323 | ret = -EAGAIN; |
1329 | 1324 | ||
1330 | /* release the whole path */ | 1325 | /* release the whole path */ |
1331 | btrfs_release_path(root, path); | 1326 | btrfs_release_path(path); |
1332 | 1327 | ||
1333 | /* read the blocks */ | 1328 | /* read the blocks */ |
1334 | if (block1) | 1329 | if (block1) |
@@ -1475,7 +1470,7 @@ read_block_for_search(struct btrfs_trans_handle *trans, | |||
1475 | return 0; | 1470 | return 0; |
1476 | } | 1471 | } |
1477 | free_extent_buffer(tmp); | 1472 | free_extent_buffer(tmp); |
1478 | btrfs_release_path(NULL, p); | 1473 | btrfs_release_path(p); |
1479 | return -EIO; | 1474 | return -EIO; |
1480 | } | 1475 | } |
1481 | } | 1476 | } |
@@ -1494,7 +1489,7 @@ read_block_for_search(struct btrfs_trans_handle *trans, | |||
1494 | if (p->reada) | 1489 | if (p->reada) |
1495 | reada_for_search(root, p, level, slot, key->objectid); | 1490 | reada_for_search(root, p, level, slot, key->objectid); |
1496 | 1491 | ||
1497 | btrfs_release_path(NULL, p); | 1492 | btrfs_release_path(p); |
1498 | 1493 | ||
1499 | ret = -EAGAIN; | 1494 | ret = -EAGAIN; |
1500 | tmp = read_tree_block(root, blocknr, blocksize, 0); | 1495 | tmp = read_tree_block(root, blocknr, blocksize, 0); |
@@ -1563,7 +1558,7 @@ setup_nodes_for_search(struct btrfs_trans_handle *trans, | |||
1563 | } | 1558 | } |
1564 | b = p->nodes[level]; | 1559 | b = p->nodes[level]; |
1565 | if (!b) { | 1560 | if (!b) { |
1566 | btrfs_release_path(NULL, p); | 1561 | btrfs_release_path(p); |
1567 | goto again; | 1562 | goto again; |
1568 | } | 1563 | } |
1569 | BUG_ON(btrfs_header_nritems(b) == 1); | 1564 | BUG_ON(btrfs_header_nritems(b) == 1); |
@@ -1753,7 +1748,7 @@ done: | |||
1753 | if (!p->leave_spinning) | 1748 | if (!p->leave_spinning) |
1754 | btrfs_set_path_blocking(p); | 1749 | btrfs_set_path_blocking(p); |
1755 | if (ret < 0) | 1750 | if (ret < 0) |
1756 | btrfs_release_path(root, p); | 1751 | btrfs_release_path(p); |
1757 | return ret; | 1752 | return ret; |
1758 | } | 1753 | } |
1759 | 1754 | ||
@@ -3026,7 +3021,7 @@ static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans, | |||
3026 | struct btrfs_file_extent_item); | 3021 | struct btrfs_file_extent_item); |
3027 | extent_len = btrfs_file_extent_num_bytes(leaf, fi); | 3022 | extent_len = btrfs_file_extent_num_bytes(leaf, fi); |
3028 | } | 3023 | } |
3029 | btrfs_release_path(root, path); | 3024 | btrfs_release_path(path); |
3030 | 3025 | ||
3031 | path->keep_locks = 1; | 3026 | path->keep_locks = 1; |
3032 | path->search_for_split = 1; | 3027 | path->search_for_split = 1; |
@@ -3216,7 +3211,6 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans, | |||
3216 | struct btrfs_path *path, | 3211 | struct btrfs_path *path, |
3217 | u32 new_size, int from_end) | 3212 | u32 new_size, int from_end) |
3218 | { | 3213 | { |
3219 | int ret = 0; | ||
3220 | int slot; | 3214 | int slot; |
3221 | struct extent_buffer *leaf; | 3215 | struct extent_buffer *leaf; |
3222 | struct btrfs_item *item; | 3216 | struct btrfs_item *item; |
@@ -3314,12 +3308,11 @@ int btrfs_truncate_item(struct btrfs_trans_handle *trans, | |||
3314 | btrfs_set_item_size(leaf, item, new_size); | 3308 | btrfs_set_item_size(leaf, item, new_size); |
3315 | btrfs_mark_buffer_dirty(leaf); | 3309 | btrfs_mark_buffer_dirty(leaf); |
3316 | 3310 | ||
3317 | ret = 0; | ||
3318 | if (btrfs_leaf_free_space(root, leaf) < 0) { | 3311 | if (btrfs_leaf_free_space(root, leaf) < 0) { |
3319 | btrfs_print_leaf(root, leaf); | 3312 | btrfs_print_leaf(root, leaf); |
3320 | BUG(); | 3313 | BUG(); |
3321 | } | 3314 | } |
3322 | return ret; | 3315 | return 0; |
3323 | } | 3316 | } |
3324 | 3317 | ||
3325 | /* | 3318 | /* |
@@ -3329,7 +3322,6 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans, | |||
3329 | struct btrfs_root *root, struct btrfs_path *path, | 3322 | struct btrfs_root *root, struct btrfs_path *path, |
3330 | u32 data_size) | 3323 | u32 data_size) |
3331 | { | 3324 | { |
3332 | int ret = 0; | ||
3333 | int slot; | 3325 | int slot; |
3334 | struct extent_buffer *leaf; | 3326 | struct extent_buffer *leaf; |
3335 | struct btrfs_item *item; | 3327 | struct btrfs_item *item; |
@@ -3394,12 +3386,11 @@ int btrfs_extend_item(struct btrfs_trans_handle *trans, | |||
3394 | btrfs_set_item_size(leaf, item, old_size + data_size); | 3386 | btrfs_set_item_size(leaf, item, old_size + data_size); |
3395 | btrfs_mark_buffer_dirty(leaf); | 3387 | btrfs_mark_buffer_dirty(leaf); |
3396 | 3388 | ||
3397 | ret = 0; | ||
3398 | if (btrfs_leaf_free_space(root, leaf) < 0) { | 3389 | if (btrfs_leaf_free_space(root, leaf) < 0) { |
3399 | btrfs_print_leaf(root, leaf); | 3390 | btrfs_print_leaf(root, leaf); |
3400 | BUG(); | 3391 | BUG(); |
3401 | } | 3392 | } |
3402 | return ret; | 3393 | return 0; |
3403 | } | 3394 | } |
3404 | 3395 | ||
3405 | /* | 3396 | /* |
@@ -3559,11 +3550,10 @@ out: | |||
3559 | * to save stack depth by doing the bulk of the work in a function | 3550 | * to save stack depth by doing the bulk of the work in a function |
3560 | * that doesn't call btrfs_search_slot | 3551 | * that doesn't call btrfs_search_slot |
3561 | */ | 3552 | */ |
3562 | static noinline_for_stack int | 3553 | int setup_items_for_insert(struct btrfs_trans_handle *trans, |
3563 | setup_items_for_insert(struct btrfs_trans_handle *trans, | 3554 | struct btrfs_root *root, struct btrfs_path *path, |
3564 | struct btrfs_root *root, struct btrfs_path *path, | 3555 | struct btrfs_key *cpu_key, u32 *data_size, |
3565 | struct btrfs_key *cpu_key, u32 *data_size, | 3556 | u32 total_data, u32 total_size, int nr) |
3566 | u32 total_data, u32 total_size, int nr) | ||
3567 | { | 3557 | { |
3568 | struct btrfs_item *item; | 3558 | struct btrfs_item *item; |
3569 | int i; | 3559 | int i; |
@@ -3647,7 +3637,6 @@ setup_items_for_insert(struct btrfs_trans_handle *trans, | |||
3647 | 3637 | ||
3648 | ret = 0; | 3638 | ret = 0; |
3649 | if (slot == 0) { | 3639 | if (slot == 0) { |
3650 | struct btrfs_disk_key disk_key; | ||
3651 | btrfs_cpu_key_to_disk(&disk_key, cpu_key); | 3640 | btrfs_cpu_key_to_disk(&disk_key, cpu_key); |
3652 | ret = fixup_low_keys(trans, root, path, &disk_key, 1); | 3641 | ret = fixup_low_keys(trans, root, path, &disk_key, 1); |
3653 | } | 3642 | } |
@@ -3949,7 +3938,7 @@ int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path) | |||
3949 | else | 3938 | else |
3950 | return 1; | 3939 | return 1; |
3951 | 3940 | ||
3952 | btrfs_release_path(root, path); | 3941 | btrfs_release_path(path); |
3953 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 3942 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
3954 | if (ret < 0) | 3943 | if (ret < 0) |
3955 | return ret; | 3944 | return ret; |
@@ -4073,7 +4062,7 @@ find_next_key: | |||
4073 | sret = btrfs_find_next_key(root, path, min_key, level, | 4062 | sret = btrfs_find_next_key(root, path, min_key, level, |
4074 | cache_only, min_trans); | 4063 | cache_only, min_trans); |
4075 | if (sret == 0) { | 4064 | if (sret == 0) { |
4076 | btrfs_release_path(root, path); | 4065 | btrfs_release_path(path); |
4077 | goto again; | 4066 | goto again; |
4078 | } else { | 4067 | } else { |
4079 | goto out; | 4068 | goto out; |
@@ -4152,7 +4141,7 @@ next: | |||
4152 | btrfs_node_key_to_cpu(c, &cur_key, slot); | 4141 | btrfs_node_key_to_cpu(c, &cur_key, slot); |
4153 | 4142 | ||
4154 | orig_lowest = path->lowest_level; | 4143 | orig_lowest = path->lowest_level; |
4155 | btrfs_release_path(root, path); | 4144 | btrfs_release_path(path); |
4156 | path->lowest_level = level; | 4145 | path->lowest_level = level; |
4157 | ret = btrfs_search_slot(NULL, root, &cur_key, path, | 4146 | ret = btrfs_search_slot(NULL, root, &cur_key, path, |
4158 | 0, 0); | 4147 | 0, 0); |
@@ -4229,7 +4218,7 @@ int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) | |||
4229 | again: | 4218 | again: |
4230 | level = 1; | 4219 | level = 1; |
4231 | next = NULL; | 4220 | next = NULL; |
4232 | btrfs_release_path(root, path); | 4221 | btrfs_release_path(path); |
4233 | 4222 | ||
4234 | path->keep_locks = 1; | 4223 | path->keep_locks = 1; |
4235 | 4224 | ||
@@ -4285,7 +4274,7 @@ again: | |||
4285 | goto again; | 4274 | goto again; |
4286 | 4275 | ||
4287 | if (ret < 0) { | 4276 | if (ret < 0) { |
4288 | btrfs_release_path(root, path); | 4277 | btrfs_release_path(path); |
4289 | goto done; | 4278 | goto done; |
4290 | } | 4279 | } |
4291 | 4280 | ||
@@ -4324,7 +4313,7 @@ again: | |||
4324 | goto again; | 4313 | goto again; |
4325 | 4314 | ||
4326 | if (ret < 0) { | 4315 | if (ret < 0) { |
4327 | btrfs_release_path(root, path); | 4316 | btrfs_release_path(path); |
4328 | goto done; | 4317 | goto done; |
4329 | } | 4318 | } |
4330 | 4319 | ||
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index 8f4b81de3ae2..332323e19dd1 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h | |||
@@ -23,6 +23,7 @@ | |||
23 | #include <linux/mm.h> | 23 | #include <linux/mm.h> |
24 | #include <linux/highmem.h> | 24 | #include <linux/highmem.h> |
25 | #include <linux/fs.h> | 25 | #include <linux/fs.h> |
26 | #include <linux/rwsem.h> | ||
26 | #include <linux/completion.h> | 27 | #include <linux/completion.h> |
27 | #include <linux/backing-dev.h> | 28 | #include <linux/backing-dev.h> |
28 | #include <linux/wait.h> | 29 | #include <linux/wait.h> |
@@ -33,6 +34,7 @@ | |||
33 | #include "extent_io.h" | 34 | #include "extent_io.h" |
34 | #include "extent_map.h" | 35 | #include "extent_map.h" |
35 | #include "async-thread.h" | 36 | #include "async-thread.h" |
37 | #include "ioctl.h" | ||
36 | 38 | ||
37 | struct btrfs_trans_handle; | 39 | struct btrfs_trans_handle; |
38 | struct btrfs_transaction; | 40 | struct btrfs_transaction; |
@@ -105,6 +107,12 @@ struct btrfs_ordered_sum; | |||
105 | /* For storing free space cache */ | 107 | /* For storing free space cache */ |
106 | #define BTRFS_FREE_SPACE_OBJECTID -11ULL | 108 | #define BTRFS_FREE_SPACE_OBJECTID -11ULL |
107 | 109 | ||
110 | /* | ||
111 | * The inode number assigned to the special inode for sotring | ||
112 | * free ino cache | ||
113 | */ | ||
114 | #define BTRFS_FREE_INO_OBJECTID -12ULL | ||
115 | |||
108 | /* dummy objectid represents multiple objectids */ | 116 | /* dummy objectid represents multiple objectids */ |
109 | #define BTRFS_MULTIPLE_OBJECTIDS -255ULL | 117 | #define BTRFS_MULTIPLE_OBJECTIDS -255ULL |
110 | 118 | ||
@@ -187,7 +195,6 @@ struct btrfs_mapping_tree { | |||
187 | struct extent_map_tree map_tree; | 195 | struct extent_map_tree map_tree; |
188 | }; | 196 | }; |
189 | 197 | ||
190 | #define BTRFS_UUID_SIZE 16 | ||
191 | struct btrfs_dev_item { | 198 | struct btrfs_dev_item { |
192 | /* the internal btrfs device id */ | 199 | /* the internal btrfs device id */ |
193 | __le64 devid; | 200 | __le64 devid; |
@@ -294,7 +301,6 @@ static inline unsigned long btrfs_chunk_item_size(int num_stripes) | |||
294 | sizeof(struct btrfs_stripe) * (num_stripes - 1); | 301 | sizeof(struct btrfs_stripe) * (num_stripes - 1); |
295 | } | 302 | } |
296 | 303 | ||
297 | #define BTRFS_FSID_SIZE 16 | ||
298 | #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) | 304 | #define BTRFS_HEADER_FLAG_WRITTEN (1ULL << 0) |
299 | #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) | 305 | #define BTRFS_HEADER_FLAG_RELOC (1ULL << 1) |
300 | 306 | ||
@@ -510,6 +516,12 @@ struct btrfs_extent_item_v0 { | |||
510 | /* use full backrefs for extent pointers in the block */ | 516 | /* use full backrefs for extent pointers in the block */ |
511 | #define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8) | 517 | #define BTRFS_BLOCK_FLAG_FULL_BACKREF (1ULL << 8) |
512 | 518 | ||
519 | /* | ||
520 | * this flag is only used internally by scrub and may be changed at any time | ||
521 | * it is only declared here to avoid collisions | ||
522 | */ | ||
523 | #define BTRFS_EXTENT_FLAG_SUPER (1ULL << 48) | ||
524 | |||
513 | struct btrfs_tree_block_info { | 525 | struct btrfs_tree_block_info { |
514 | struct btrfs_disk_key key; | 526 | struct btrfs_disk_key key; |
515 | u8 level; | 527 | u8 level; |
@@ -740,12 +752,12 @@ struct btrfs_space_info { | |||
740 | */ | 752 | */ |
741 | unsigned long reservation_progress; | 753 | unsigned long reservation_progress; |
742 | 754 | ||
743 | int full:1; /* indicates that we cannot allocate any more | 755 | unsigned int full:1; /* indicates that we cannot allocate any more |
744 | chunks for this space */ | 756 | chunks for this space */ |
745 | int chunk_alloc:1; /* set if we are allocating a chunk */ | 757 | unsigned int chunk_alloc:1; /* set if we are allocating a chunk */ |
746 | 758 | ||
747 | int force_alloc; /* set if we need to force a chunk alloc for | 759 | unsigned int force_alloc; /* set if we need to force a chunk |
748 | this space */ | 760 | alloc for this space */ |
749 | 761 | ||
750 | struct list_head list; | 762 | struct list_head list; |
751 | 763 | ||
@@ -830,9 +842,6 @@ struct btrfs_block_group_cache { | |||
830 | u64 bytes_super; | 842 | u64 bytes_super; |
831 | u64 flags; | 843 | u64 flags; |
832 | u64 sectorsize; | 844 | u64 sectorsize; |
833 | int extents_thresh; | ||
834 | int free_extents; | ||
835 | int total_bitmaps; | ||
836 | unsigned int ro:1; | 845 | unsigned int ro:1; |
837 | unsigned int dirty:1; | 846 | unsigned int dirty:1; |
838 | unsigned int iref:1; | 847 | unsigned int iref:1; |
@@ -847,9 +856,7 @@ struct btrfs_block_group_cache { | |||
847 | struct btrfs_space_info *space_info; | 856 | struct btrfs_space_info *space_info; |
848 | 857 | ||
849 | /* free space cache stuff */ | 858 | /* free space cache stuff */ |
850 | spinlock_t tree_lock; | 859 | struct btrfs_free_space_ctl *free_space_ctl; |
851 | struct rb_root free_space_offset; | ||
852 | u64 free_space; | ||
853 | 860 | ||
854 | /* block group cache stuff */ | 861 | /* block group cache stuff */ |
855 | struct rb_node cache_node; | 862 | struct rb_node cache_node; |
@@ -869,6 +876,7 @@ struct btrfs_block_group_cache { | |||
869 | struct reloc_control; | 876 | struct reloc_control; |
870 | struct btrfs_device; | 877 | struct btrfs_device; |
871 | struct btrfs_fs_devices; | 878 | struct btrfs_fs_devices; |
879 | struct btrfs_delayed_root; | ||
872 | struct btrfs_fs_info { | 880 | struct btrfs_fs_info { |
873 | u8 fsid[BTRFS_FSID_SIZE]; | 881 | u8 fsid[BTRFS_FSID_SIZE]; |
874 | u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; | 882 | u8 chunk_tree_uuid[BTRFS_UUID_SIZE]; |
@@ -895,7 +903,10 @@ struct btrfs_fs_info { | |||
895 | /* logical->physical extent mapping */ | 903 | /* logical->physical extent mapping */ |
896 | struct btrfs_mapping_tree mapping_tree; | 904 | struct btrfs_mapping_tree mapping_tree; |
897 | 905 | ||
898 | /* block reservation for extent, checksum and root tree */ | 906 | /* |
907 | * block reservation for extent, checksum, root tree and | ||
908 | * delayed dir index item | ||
909 | */ | ||
899 | struct btrfs_block_rsv global_block_rsv; | 910 | struct btrfs_block_rsv global_block_rsv; |
900 | /* block reservation for delay allocation */ | 911 | /* block reservation for delay allocation */ |
901 | struct btrfs_block_rsv delalloc_block_rsv; | 912 | struct btrfs_block_rsv delalloc_block_rsv; |
@@ -1022,6 +1033,7 @@ struct btrfs_fs_info { | |||
1022 | * for the sys_munmap function call path | 1033 | * for the sys_munmap function call path |
1023 | */ | 1034 | */ |
1024 | struct btrfs_workers fixup_workers; | 1035 | struct btrfs_workers fixup_workers; |
1036 | struct btrfs_workers delayed_workers; | ||
1025 | struct task_struct *transaction_kthread; | 1037 | struct task_struct *transaction_kthread; |
1026 | struct task_struct *cleaner_kthread; | 1038 | struct task_struct *cleaner_kthread; |
1027 | int thread_pool_size; | 1039 | int thread_pool_size; |
@@ -1062,6 +1074,11 @@ struct btrfs_fs_info { | |||
1062 | /* all metadata allocations go through this cluster */ | 1074 | /* all metadata allocations go through this cluster */ |
1063 | struct btrfs_free_cluster meta_alloc_cluster; | 1075 | struct btrfs_free_cluster meta_alloc_cluster; |
1064 | 1076 | ||
1077 | /* auto defrag inodes go here */ | ||
1078 | spinlock_t defrag_inodes_lock; | ||
1079 | struct rb_root defrag_inodes; | ||
1080 | atomic_t defrag_running; | ||
1081 | |||
1065 | spinlock_t ref_cache_lock; | 1082 | spinlock_t ref_cache_lock; |
1066 | u64 total_ref_cache_size; | 1083 | u64 total_ref_cache_size; |
1067 | 1084 | ||
@@ -1077,8 +1094,21 @@ struct btrfs_fs_info { | |||
1077 | 1094 | ||
1078 | void *bdev_holder; | 1095 | void *bdev_holder; |
1079 | 1096 | ||
1097 | /* private scrub information */ | ||
1098 | struct mutex scrub_lock; | ||
1099 | atomic_t scrubs_running; | ||
1100 | atomic_t scrub_pause_req; | ||
1101 | atomic_t scrubs_paused; | ||
1102 | atomic_t scrub_cancel_req; | ||
1103 | wait_queue_head_t scrub_pause_wait; | ||
1104 | struct rw_semaphore scrub_super_lock; | ||
1105 | int scrub_workers_refcnt; | ||
1106 | struct btrfs_workers scrub_workers; | ||
1107 | |||
1080 | /* filesystem state */ | 1108 | /* filesystem state */ |
1081 | u64 fs_state; | 1109 | u64 fs_state; |
1110 | |||
1111 | struct btrfs_delayed_root *delayed_root; | ||
1082 | }; | 1112 | }; |
1083 | 1113 | ||
1084 | /* | 1114 | /* |
@@ -1088,9 +1118,6 @@ struct btrfs_fs_info { | |||
1088 | struct btrfs_root { | 1118 | struct btrfs_root { |
1089 | struct extent_buffer *node; | 1119 | struct extent_buffer *node; |
1090 | 1120 | ||
1091 | /* the node lock is held while changing the node pointer */ | ||
1092 | spinlock_t node_lock; | ||
1093 | |||
1094 | struct extent_buffer *commit_root; | 1121 | struct extent_buffer *commit_root; |
1095 | struct btrfs_root *log_root; | 1122 | struct btrfs_root *log_root; |
1096 | struct btrfs_root *reloc_root; | 1123 | struct btrfs_root *reloc_root; |
@@ -1107,6 +1134,16 @@ struct btrfs_root { | |||
1107 | spinlock_t accounting_lock; | 1134 | spinlock_t accounting_lock; |
1108 | struct btrfs_block_rsv *block_rsv; | 1135 | struct btrfs_block_rsv *block_rsv; |
1109 | 1136 | ||
1137 | /* free ino cache stuff */ | ||
1138 | struct mutex fs_commit_mutex; | ||
1139 | struct btrfs_free_space_ctl *free_ino_ctl; | ||
1140 | enum btrfs_caching_type cached; | ||
1141 | spinlock_t cache_lock; | ||
1142 | wait_queue_head_t cache_wait; | ||
1143 | struct btrfs_free_space_ctl *free_ino_pinned; | ||
1144 | u64 cache_progress; | ||
1145 | struct inode *cache_inode; | ||
1146 | |||
1110 | struct mutex log_mutex; | 1147 | struct mutex log_mutex; |
1111 | wait_queue_head_t log_writer_wait; | 1148 | wait_queue_head_t log_writer_wait; |
1112 | wait_queue_head_t log_commit_wait[2]; | 1149 | wait_queue_head_t log_commit_wait[2]; |
@@ -1162,12 +1199,49 @@ struct btrfs_root { | |||
1162 | struct rb_root inode_tree; | 1199 | struct rb_root inode_tree; |
1163 | 1200 | ||
1164 | /* | 1201 | /* |
1202 | * radix tree that keeps track of delayed nodes of every inode, | ||
1203 | * protected by inode_lock | ||
1204 | */ | ||
1205 | struct radix_tree_root delayed_nodes_tree; | ||
1206 | /* | ||
1165 | * right now this just gets used so that a root has its own devid | 1207 | * right now this just gets used so that a root has its own devid |
1166 | * for stat. It may be used for more later | 1208 | * for stat. It may be used for more later |
1167 | */ | 1209 | */ |
1168 | struct super_block anon_super; | 1210 | struct super_block anon_super; |
1169 | }; | 1211 | }; |
1170 | 1212 | ||
1213 | struct btrfs_ioctl_defrag_range_args { | ||
1214 | /* start of the defrag operation */ | ||
1215 | __u64 start; | ||
1216 | |||
1217 | /* number of bytes to defrag, use (u64)-1 to say all */ | ||
1218 | __u64 len; | ||
1219 | |||
1220 | /* | ||
1221 | * flags for the operation, which can include turning | ||
1222 | * on compression for this one defrag | ||
1223 | */ | ||
1224 | __u64 flags; | ||
1225 | |||
1226 | /* | ||
1227 | * any extent bigger than this will be considered | ||
1228 | * already defragged. Use 0 to take the kernel default | ||
1229 | * Use 1 to say every single extent must be rewritten | ||
1230 | */ | ||
1231 | __u32 extent_thresh; | ||
1232 | |||
1233 | /* | ||
1234 | * which compression method to use if turning on compression | ||
1235 | * for this defrag operation. If unspecified, zlib will | ||
1236 | * be used | ||
1237 | */ | ||
1238 | __u32 compress_type; | ||
1239 | |||
1240 | /* spare for later */ | ||
1241 | __u32 unused[4]; | ||
1242 | }; | ||
1243 | |||
1244 | |||
1171 | /* | 1245 | /* |
1172 | * inode items have the data typically returned from stat and store other | 1246 | * inode items have the data typically returned from stat and store other |
1173 | * info about object characteristics. There is one for every file and dir in | 1247 | * info about object characteristics. There is one for every file and dir in |
@@ -1265,6 +1339,7 @@ struct btrfs_root { | |||
1265 | #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) | 1339 | #define BTRFS_MOUNT_CLEAR_CACHE (1 << 13) |
1266 | #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) | 1340 | #define BTRFS_MOUNT_USER_SUBVOL_RM_ALLOWED (1 << 14) |
1267 | #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) | 1341 | #define BTRFS_MOUNT_ENOSPC_DEBUG (1 << 15) |
1342 | #define BTRFS_MOUNT_AUTO_DEFRAG (1 << 16) | ||
1268 | 1343 | ||
1269 | #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) | 1344 | #define btrfs_clear_opt(o, opt) ((o) &= ~BTRFS_MOUNT_##opt) |
1270 | #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) | 1345 | #define btrfs_set_opt(o, opt) ((o) |= BTRFS_MOUNT_##opt) |
@@ -1440,26 +1515,12 @@ static inline u64 btrfs_stripe_offset_nr(struct extent_buffer *eb, | |||
1440 | return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); | 1515 | return btrfs_stripe_offset(eb, btrfs_stripe_nr(c, nr)); |
1441 | } | 1516 | } |
1442 | 1517 | ||
1443 | static inline void btrfs_set_stripe_offset_nr(struct extent_buffer *eb, | ||
1444 | struct btrfs_chunk *c, int nr, | ||
1445 | u64 val) | ||
1446 | { | ||
1447 | btrfs_set_stripe_offset(eb, btrfs_stripe_nr(c, nr), val); | ||
1448 | } | ||
1449 | |||
1450 | static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, | 1518 | static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, |
1451 | struct btrfs_chunk *c, int nr) | 1519 | struct btrfs_chunk *c, int nr) |
1452 | { | 1520 | { |
1453 | return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); | 1521 | return btrfs_stripe_devid(eb, btrfs_stripe_nr(c, nr)); |
1454 | } | 1522 | } |
1455 | 1523 | ||
1456 | static inline void btrfs_set_stripe_devid_nr(struct extent_buffer *eb, | ||
1457 | struct btrfs_chunk *c, int nr, | ||
1458 | u64 val) | ||
1459 | { | ||
1460 | btrfs_set_stripe_devid(eb, btrfs_stripe_nr(c, nr), val); | ||
1461 | } | ||
1462 | |||
1463 | /* struct btrfs_block_group_item */ | 1524 | /* struct btrfs_block_group_item */ |
1464 | BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, | 1525 | BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, |
1465 | used, 64); | 1526 | used, 64); |
@@ -1517,14 +1578,6 @@ btrfs_inode_ctime(struct btrfs_inode_item *inode_item) | |||
1517 | return (struct btrfs_timespec *)ptr; | 1578 | return (struct btrfs_timespec *)ptr; |
1518 | } | 1579 | } |
1519 | 1580 | ||
1520 | static inline struct btrfs_timespec * | ||
1521 | btrfs_inode_otime(struct btrfs_inode_item *inode_item) | ||
1522 | { | ||
1523 | unsigned long ptr = (unsigned long)inode_item; | ||
1524 | ptr += offsetof(struct btrfs_inode_item, otime); | ||
1525 | return (struct btrfs_timespec *)ptr; | ||
1526 | } | ||
1527 | |||
1528 | BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); | 1581 | BTRFS_SETGET_FUNCS(timespec_sec, struct btrfs_timespec, sec, 64); |
1529 | BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); | 1582 | BTRFS_SETGET_FUNCS(timespec_nsec, struct btrfs_timespec, nsec, 32); |
1530 | 1583 | ||
@@ -1875,33 +1928,6 @@ static inline u8 *btrfs_header_chunk_tree_uuid(struct extent_buffer *eb) | |||
1875 | return (u8 *)ptr; | 1928 | return (u8 *)ptr; |
1876 | } | 1929 | } |
1877 | 1930 | ||
1878 | static inline u8 *btrfs_super_fsid(struct extent_buffer *eb) | ||
1879 | { | ||
1880 | unsigned long ptr = offsetof(struct btrfs_super_block, fsid); | ||
1881 | return (u8 *)ptr; | ||
1882 | } | ||
1883 | |||
1884 | static inline u8 *btrfs_header_csum(struct extent_buffer *eb) | ||
1885 | { | ||
1886 | unsigned long ptr = offsetof(struct btrfs_header, csum); | ||
1887 | return (u8 *)ptr; | ||
1888 | } | ||
1889 | |||
1890 | static inline struct btrfs_node *btrfs_buffer_node(struct extent_buffer *eb) | ||
1891 | { | ||
1892 | return NULL; | ||
1893 | } | ||
1894 | |||
1895 | static inline struct btrfs_leaf *btrfs_buffer_leaf(struct extent_buffer *eb) | ||
1896 | { | ||
1897 | return NULL; | ||
1898 | } | ||
1899 | |||
1900 | static inline struct btrfs_header *btrfs_buffer_header(struct extent_buffer *eb) | ||
1901 | { | ||
1902 | return NULL; | ||
1903 | } | ||
1904 | |||
1905 | static inline int btrfs_is_leaf(struct extent_buffer *eb) | 1931 | static inline int btrfs_is_leaf(struct extent_buffer *eb) |
1906 | { | 1932 | { |
1907 | return btrfs_header_level(eb) == 0; | 1933 | return btrfs_header_level(eb) == 0; |
@@ -2055,22 +2081,6 @@ static inline struct btrfs_root *btrfs_sb(struct super_block *sb) | |||
2055 | return sb->s_fs_info; | 2081 | return sb->s_fs_info; |
2056 | } | 2082 | } |
2057 | 2083 | ||
2058 | static inline int btrfs_set_root_name(struct btrfs_root *root, | ||
2059 | const char *name, int len) | ||
2060 | { | ||
2061 | /* if we already have a name just free it */ | ||
2062 | kfree(root->name); | ||
2063 | |||
2064 | root->name = kmalloc(len+1, GFP_KERNEL); | ||
2065 | if (!root->name) | ||
2066 | return -ENOMEM; | ||
2067 | |||
2068 | memcpy(root->name, name, len); | ||
2069 | root->name[len] = '\0'; | ||
2070 | |||
2071 | return 0; | ||
2072 | } | ||
2073 | |||
2074 | static inline u32 btrfs_level_size(struct btrfs_root *root, int level) | 2084 | static inline u32 btrfs_level_size(struct btrfs_root *root, int level) |
2075 | { | 2085 | { |
2076 | if (level == 0) | 2086 | if (level == 0) |
@@ -2099,6 +2109,13 @@ static inline bool btrfs_mixed_space_info(struct btrfs_space_info *space_info) | |||
2099 | } | 2109 | } |
2100 | 2110 | ||
2101 | /* extent-tree.c */ | 2111 | /* extent-tree.c */ |
2112 | static inline u64 btrfs_calc_trans_metadata_size(struct btrfs_root *root, | ||
2113 | int num_items) | ||
2114 | { | ||
2115 | return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * | ||
2116 | 3 * num_items; | ||
2117 | } | ||
2118 | |||
2102 | void btrfs_put_block_group(struct btrfs_block_group_cache *cache); | 2119 | void btrfs_put_block_group(struct btrfs_block_group_cache *cache); |
2103 | int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, | 2120 | int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, |
2104 | struct btrfs_root *root, unsigned long count); | 2121 | struct btrfs_root *root, unsigned long count); |
@@ -2108,12 +2125,9 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, | |||
2108 | u64 num_bytes, u64 *refs, u64 *flags); | 2125 | u64 num_bytes, u64 *refs, u64 *flags); |
2109 | int btrfs_pin_extent(struct btrfs_root *root, | 2126 | int btrfs_pin_extent(struct btrfs_root *root, |
2110 | u64 bytenr, u64 num, int reserved); | 2127 | u64 bytenr, u64 num, int reserved); |
2111 | int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans, | ||
2112 | struct btrfs_root *root, struct extent_buffer *leaf); | ||
2113 | int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, | 2128 | int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans, |
2114 | struct btrfs_root *root, | 2129 | struct btrfs_root *root, |
2115 | u64 objectid, u64 offset, u64 bytenr); | 2130 | u64 objectid, u64 offset, u64 bytenr); |
2116 | int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy); | ||
2117 | struct btrfs_block_group_cache *btrfs_lookup_block_group( | 2131 | struct btrfs_block_group_cache *btrfs_lookup_block_group( |
2118 | struct btrfs_fs_info *info, | 2132 | struct btrfs_fs_info *info, |
2119 | u64 bytenr); | 2133 | u64 bytenr); |
@@ -2290,10 +2304,12 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, | |||
2290 | struct btrfs_root *root, struct extent_buffer *parent, | 2304 | struct btrfs_root *root, struct extent_buffer *parent, |
2291 | int start_slot, int cache_only, u64 *last_ret, | 2305 | int start_slot, int cache_only, u64 *last_ret, |
2292 | struct btrfs_key *progress); | 2306 | struct btrfs_key *progress); |
2293 | void btrfs_release_path(struct btrfs_root *root, struct btrfs_path *p); | 2307 | void btrfs_release_path(struct btrfs_path *p); |
2294 | struct btrfs_path *btrfs_alloc_path(void); | 2308 | struct btrfs_path *btrfs_alloc_path(void); |
2295 | void btrfs_free_path(struct btrfs_path *p); | 2309 | void btrfs_free_path(struct btrfs_path *p); |
2296 | void btrfs_set_path_blocking(struct btrfs_path *p); | 2310 | void btrfs_set_path_blocking(struct btrfs_path *p); |
2311 | void btrfs_clear_path_blocking(struct btrfs_path *p, | ||
2312 | struct extent_buffer *held); | ||
2297 | void btrfs_unlock_up_safe(struct btrfs_path *p, int level); | 2313 | void btrfs_unlock_up_safe(struct btrfs_path *p, int level); |
2298 | 2314 | ||
2299 | int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, | 2315 | int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, |
@@ -2305,13 +2321,12 @@ static inline int btrfs_del_item(struct btrfs_trans_handle *trans, | |||
2305 | return btrfs_del_items(trans, root, path, path->slots[0], 1); | 2321 | return btrfs_del_items(trans, root, path, path->slots[0], 1); |
2306 | } | 2322 | } |
2307 | 2323 | ||
2324 | int setup_items_for_insert(struct btrfs_trans_handle *trans, | ||
2325 | struct btrfs_root *root, struct btrfs_path *path, | ||
2326 | struct btrfs_key *cpu_key, u32 *data_size, | ||
2327 | u32 total_data, u32 total_size, int nr); | ||
2308 | int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root | 2328 | int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root |
2309 | *root, struct btrfs_key *key, void *data, u32 data_size); | 2329 | *root, struct btrfs_key *key, void *data, u32 data_size); |
2310 | int btrfs_insert_some_items(struct btrfs_trans_handle *trans, | ||
2311 | struct btrfs_root *root, | ||
2312 | struct btrfs_path *path, | ||
2313 | struct btrfs_key *cpu_key, u32 *data_size, | ||
2314 | int nr); | ||
2315 | int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, | 2330 | int btrfs_insert_empty_items(struct btrfs_trans_handle *trans, |
2316 | struct btrfs_root *root, | 2331 | struct btrfs_root *root, |
2317 | struct btrfs_path *path, | 2332 | struct btrfs_path *path, |
@@ -2357,8 +2372,6 @@ int btrfs_update_root(struct btrfs_trans_handle *trans, struct btrfs_root | |||
2357 | *item); | 2372 | *item); |
2358 | int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct | 2373 | int btrfs_find_last_root(struct btrfs_root *root, u64 objectid, struct |
2359 | btrfs_root_item *item, struct btrfs_key *key); | 2374 | btrfs_root_item *item, struct btrfs_key *key); |
2360 | int btrfs_search_root(struct btrfs_root *root, u64 search_start, | ||
2361 | u64 *found_objectid); | ||
2362 | int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); | 2375 | int btrfs_find_dead_roots(struct btrfs_root *root, u64 objectid); |
2363 | int btrfs_find_orphan_roots(struct btrfs_root *tree_root); | 2376 | int btrfs_find_orphan_roots(struct btrfs_root *tree_root); |
2364 | int btrfs_set_root_node(struct btrfs_root_item *item, | 2377 | int btrfs_set_root_node(struct btrfs_root_item *item, |
@@ -2368,7 +2381,7 @@ void btrfs_check_and_init_root_item(struct btrfs_root_item *item); | |||
2368 | /* dir-item.c */ | 2381 | /* dir-item.c */ |
2369 | int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, | 2382 | int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, |
2370 | struct btrfs_root *root, const char *name, | 2383 | struct btrfs_root *root, const char *name, |
2371 | int name_len, u64 dir, | 2384 | int name_len, struct inode *dir, |
2372 | struct btrfs_key *location, u8 type, u64 index); | 2385 | struct btrfs_key *location, u8 type, u64 index); |
2373 | struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, | 2386 | struct btrfs_dir_item *btrfs_lookup_dir_item(struct btrfs_trans_handle *trans, |
2374 | struct btrfs_root *root, | 2387 | struct btrfs_root *root, |
@@ -2413,12 +2426,6 @@ int btrfs_del_orphan_item(struct btrfs_trans_handle *trans, | |||
2413 | struct btrfs_root *root, u64 offset); | 2426 | struct btrfs_root *root, u64 offset); |
2414 | int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); | 2427 | int btrfs_find_orphan_item(struct btrfs_root *root, u64 offset); |
2415 | 2428 | ||
2416 | /* inode-map.c */ | ||
2417 | int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, | ||
2418 | struct btrfs_root *fs_root, | ||
2419 | u64 dirid, u64 *objectid); | ||
2420 | int btrfs_find_highest_inode(struct btrfs_root *fs_root, u64 *objectid); | ||
2421 | |||
2422 | /* inode-item.c */ | 2429 | /* inode-item.c */ |
2423 | int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, | 2430 | int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, |
2424 | struct btrfs_root *root, | 2431 | struct btrfs_root *root, |
@@ -2463,8 +2470,6 @@ int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, | |||
2463 | struct btrfs_ordered_sum *sums); | 2470 | struct btrfs_ordered_sum *sums); |
2464 | int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, | 2471 | int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode, |
2465 | struct bio *bio, u64 file_start, int contig); | 2472 | struct bio *bio, u64 file_start, int contig); |
2466 | int btrfs_csum_file_bytes(struct btrfs_root *root, struct inode *inode, | ||
2467 | u64 start, unsigned long len); | ||
2468 | struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, | 2473 | struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, |
2469 | struct btrfs_root *root, | 2474 | struct btrfs_root *root, |
2470 | struct btrfs_path *path, | 2475 | struct btrfs_path *path, |
@@ -2472,8 +2477,8 @@ struct btrfs_csum_item *btrfs_lookup_csum(struct btrfs_trans_handle *trans, | |||
2472 | int btrfs_csum_truncate(struct btrfs_trans_handle *trans, | 2477 | int btrfs_csum_truncate(struct btrfs_trans_handle *trans, |
2473 | struct btrfs_root *root, struct btrfs_path *path, | 2478 | struct btrfs_root *root, struct btrfs_path *path, |
2474 | u64 isize); | 2479 | u64 isize); |
2475 | int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, | 2480 | int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, |
2476 | u64 end, struct list_head *list); | 2481 | struct list_head *list, int search_commit); |
2477 | /* inode.c */ | 2482 | /* inode.c */ |
2478 | 2483 | ||
2479 | /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ | 2484 | /* RHEL and EL kernels have a patch that renames PG_checked to FsMisc */ |
@@ -2502,8 +2507,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, | |||
2502 | u32 min_type); | 2507 | u32 min_type); |
2503 | 2508 | ||
2504 | int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); | 2509 | int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput); |
2505 | int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput, | ||
2506 | int sync); | ||
2507 | int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, | 2510 | int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, |
2508 | struct extent_state **cached_state); | 2511 | struct extent_state **cached_state); |
2509 | int btrfs_writepages(struct address_space *mapping, | 2512 | int btrfs_writepages(struct address_space *mapping, |
@@ -2520,7 +2523,6 @@ unsigned long btrfs_force_ra(struct address_space *mapping, | |||
2520 | int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 2523 | int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); |
2521 | int btrfs_readpage(struct file *file, struct page *page); | 2524 | int btrfs_readpage(struct file *file, struct page *page); |
2522 | void btrfs_evict_inode(struct inode *inode); | 2525 | void btrfs_evict_inode(struct inode *inode); |
2523 | void btrfs_put_inode(struct inode *inode); | ||
2524 | int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); | 2526 | int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); |
2525 | void btrfs_dirty_inode(struct inode *inode); | 2527 | void btrfs_dirty_inode(struct inode *inode); |
2526 | struct inode *btrfs_alloc_inode(struct super_block *sb); | 2528 | struct inode *btrfs_alloc_inode(struct super_block *sb); |
@@ -2531,10 +2533,8 @@ void btrfs_destroy_cachep(void); | |||
2531 | long btrfs_ioctl_trans_end(struct file *file); | 2533 | long btrfs_ioctl_trans_end(struct file *file); |
2532 | struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, | 2534 | struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, |
2533 | struct btrfs_root *root, int *was_new); | 2535 | struct btrfs_root *root, int *was_new); |
2534 | int btrfs_commit_write(struct file *file, struct page *page, | ||
2535 | unsigned from, unsigned to); | ||
2536 | struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, | 2536 | struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, |
2537 | size_t page_offset, u64 start, u64 end, | 2537 | size_t pg_offset, u64 start, u64 end, |
2538 | int create); | 2538 | int create); |
2539 | int btrfs_update_inode(struct btrfs_trans_handle *trans, | 2539 | int btrfs_update_inode(struct btrfs_trans_handle *trans, |
2540 | struct btrfs_root *root, | 2540 | struct btrfs_root *root, |
@@ -2566,12 +2566,16 @@ extern const struct dentry_operations btrfs_dentry_operations; | |||
2566 | long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); | 2566 | long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); |
2567 | void btrfs_update_iflags(struct inode *inode); | 2567 | void btrfs_update_iflags(struct inode *inode); |
2568 | void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); | 2568 | void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); |
2569 | 2569 | int btrfs_defrag_file(struct inode *inode, struct file *file, | |
2570 | struct btrfs_ioctl_defrag_range_args *range, | ||
2571 | u64 newer_than, unsigned long max_pages); | ||
2570 | /* file.c */ | 2572 | /* file.c */ |
2573 | int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, | ||
2574 | struct inode *inode); | ||
2575 | int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); | ||
2571 | int btrfs_sync_file(struct file *file, int datasync); | 2576 | int btrfs_sync_file(struct file *file, int datasync); |
2572 | int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | 2577 | int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, |
2573 | int skip_pinned); | 2578 | int skip_pinned); |
2574 | int btrfs_check_file(struct btrfs_root *root, struct inode *inode); | ||
2575 | extern const struct file_operations btrfs_file_operations; | 2579 | extern const struct file_operations btrfs_file_operations; |
2576 | int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, | 2580 | int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, |
2577 | u64 start, u64 end, u64 *hint_byte, int drop_cache); | 2581 | u64 start, u64 end, u64 *hint_byte, int drop_cache); |
@@ -2591,10 +2595,6 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, | |||
2591 | /* sysfs.c */ | 2595 | /* sysfs.c */ |
2592 | int btrfs_init_sysfs(void); | 2596 | int btrfs_init_sysfs(void); |
2593 | void btrfs_exit_sysfs(void); | 2597 | void btrfs_exit_sysfs(void); |
2594 | int btrfs_sysfs_add_super(struct btrfs_fs_info *fs); | ||
2595 | int btrfs_sysfs_add_root(struct btrfs_root *root); | ||
2596 | void btrfs_sysfs_del_root(struct btrfs_root *root); | ||
2597 | void btrfs_sysfs_del_super(struct btrfs_fs_info *root); | ||
2598 | 2598 | ||
2599 | /* xattr.c */ | 2599 | /* xattr.c */ |
2600 | ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); | 2600 | ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size); |
@@ -2637,4 +2637,18 @@ void btrfs_reloc_pre_snapshot(struct btrfs_trans_handle *trans, | |||
2637 | u64 *bytes_to_reserve); | 2637 | u64 *bytes_to_reserve); |
2638 | void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, | 2638 | void btrfs_reloc_post_snapshot(struct btrfs_trans_handle *trans, |
2639 | struct btrfs_pending_snapshot *pending); | 2639 | struct btrfs_pending_snapshot *pending); |
2640 | |||
2641 | /* scrub.c */ | ||
2642 | int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, | ||
2643 | struct btrfs_scrub_progress *progress, int readonly); | ||
2644 | int btrfs_scrub_pause(struct btrfs_root *root); | ||
2645 | int btrfs_scrub_pause_super(struct btrfs_root *root); | ||
2646 | int btrfs_scrub_continue(struct btrfs_root *root); | ||
2647 | int btrfs_scrub_continue_super(struct btrfs_root *root); | ||
2648 | int btrfs_scrub_cancel(struct btrfs_root *root); | ||
2649 | int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev); | ||
2650 | int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid); | ||
2651 | int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, | ||
2652 | struct btrfs_scrub_progress *progress); | ||
2653 | |||
2640 | #endif | 2654 | #endif |
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c new file mode 100644 index 000000000000..01e29503a54b --- /dev/null +++ b/fs/btrfs/delayed-inode.c | |||
@@ -0,0 +1,1695 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Fujitsu. All rights reserved. | ||
3 | * Written by Miao Xie <miaox@cn.fujitsu.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public | ||
7 | * License v2 as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
12 | * General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public | ||
15 | * License along with this program; if not, write to the | ||
16 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
17 | * Boston, MA 021110-1307, USA. | ||
18 | */ | ||
19 | |||
20 | #include <linux/slab.h> | ||
21 | #include "delayed-inode.h" | ||
22 | #include "disk-io.h" | ||
23 | #include "transaction.h" | ||
24 | |||
25 | #define BTRFS_DELAYED_WRITEBACK 400 | ||
26 | #define BTRFS_DELAYED_BACKGROUND 100 | ||
27 | |||
28 | static struct kmem_cache *delayed_node_cache; | ||
29 | |||
30 | int __init btrfs_delayed_inode_init(void) | ||
31 | { | ||
32 | delayed_node_cache = kmem_cache_create("delayed_node", | ||
33 | sizeof(struct btrfs_delayed_node), | ||
34 | 0, | ||
35 | SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, | ||
36 | NULL); | ||
37 | if (!delayed_node_cache) | ||
38 | return -ENOMEM; | ||
39 | return 0; | ||
40 | } | ||
41 | |||
42 | void btrfs_delayed_inode_exit(void) | ||
43 | { | ||
44 | if (delayed_node_cache) | ||
45 | kmem_cache_destroy(delayed_node_cache); | ||
46 | } | ||
47 | |||
48 | static inline void btrfs_init_delayed_node( | ||
49 | struct btrfs_delayed_node *delayed_node, | ||
50 | struct btrfs_root *root, u64 inode_id) | ||
51 | { | ||
52 | delayed_node->root = root; | ||
53 | delayed_node->inode_id = inode_id; | ||
54 | atomic_set(&delayed_node->refs, 0); | ||
55 | delayed_node->count = 0; | ||
56 | delayed_node->in_list = 0; | ||
57 | delayed_node->inode_dirty = 0; | ||
58 | delayed_node->ins_root = RB_ROOT; | ||
59 | delayed_node->del_root = RB_ROOT; | ||
60 | mutex_init(&delayed_node->mutex); | ||
61 | delayed_node->index_cnt = 0; | ||
62 | INIT_LIST_HEAD(&delayed_node->n_list); | ||
63 | INIT_LIST_HEAD(&delayed_node->p_list); | ||
64 | delayed_node->bytes_reserved = 0; | ||
65 | } | ||
66 | |||
67 | static inline int btrfs_is_continuous_delayed_item( | ||
68 | struct btrfs_delayed_item *item1, | ||
69 | struct btrfs_delayed_item *item2) | ||
70 | { | ||
71 | if (item1->key.type == BTRFS_DIR_INDEX_KEY && | ||
72 | item1->key.objectid == item2->key.objectid && | ||
73 | item1->key.type == item2->key.type && | ||
74 | item1->key.offset + 1 == item2->key.offset) | ||
75 | return 1; | ||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | static inline struct btrfs_delayed_root *btrfs_get_delayed_root( | ||
80 | struct btrfs_root *root) | ||
81 | { | ||
82 | return root->fs_info->delayed_root; | ||
83 | } | ||
84 | |||
85 | static struct btrfs_delayed_node *btrfs_get_or_create_delayed_node( | ||
86 | struct inode *inode) | ||
87 | { | ||
88 | struct btrfs_delayed_node *node; | ||
89 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); | ||
90 | struct btrfs_root *root = btrfs_inode->root; | ||
91 | u64 ino = btrfs_ino(inode); | ||
92 | int ret; | ||
93 | |||
94 | again: | ||
95 | node = ACCESS_ONCE(btrfs_inode->delayed_node); | ||
96 | if (node) { | ||
97 | atomic_inc(&node->refs); /* can be accessed */ | ||
98 | return node; | ||
99 | } | ||
100 | |||
101 | spin_lock(&root->inode_lock); | ||
102 | node = radix_tree_lookup(&root->delayed_nodes_tree, ino); | ||
103 | if (node) { | ||
104 | if (btrfs_inode->delayed_node) { | ||
105 | spin_unlock(&root->inode_lock); | ||
106 | goto again; | ||
107 | } | ||
108 | btrfs_inode->delayed_node = node; | ||
109 | atomic_inc(&node->refs); /* can be accessed */ | ||
110 | atomic_inc(&node->refs); /* cached in the inode */ | ||
111 | spin_unlock(&root->inode_lock); | ||
112 | return node; | ||
113 | } | ||
114 | spin_unlock(&root->inode_lock); | ||
115 | |||
116 | node = kmem_cache_alloc(delayed_node_cache, GFP_NOFS); | ||
117 | if (!node) | ||
118 | return ERR_PTR(-ENOMEM); | ||
119 | btrfs_init_delayed_node(node, root, ino); | ||
120 | |||
121 | atomic_inc(&node->refs); /* cached in the btrfs inode */ | ||
122 | atomic_inc(&node->refs); /* can be accessed */ | ||
123 | |||
124 | ret = radix_tree_preload(GFP_NOFS & ~__GFP_HIGHMEM); | ||
125 | if (ret) { | ||
126 | kmem_cache_free(delayed_node_cache, node); | ||
127 | return ERR_PTR(ret); | ||
128 | } | ||
129 | |||
130 | spin_lock(&root->inode_lock); | ||
131 | ret = radix_tree_insert(&root->delayed_nodes_tree, ino, node); | ||
132 | if (ret == -EEXIST) { | ||
133 | kmem_cache_free(delayed_node_cache, node); | ||
134 | spin_unlock(&root->inode_lock); | ||
135 | radix_tree_preload_end(); | ||
136 | goto again; | ||
137 | } | ||
138 | btrfs_inode->delayed_node = node; | ||
139 | spin_unlock(&root->inode_lock); | ||
140 | radix_tree_preload_end(); | ||
141 | |||
142 | return node; | ||
143 | } | ||
144 | |||
145 | /* | ||
146 | * Call it when holding delayed_node->mutex | ||
147 | * | ||
148 | * If mod = 1, add this node into the prepared list. | ||
149 | */ | ||
150 | static void btrfs_queue_delayed_node(struct btrfs_delayed_root *root, | ||
151 | struct btrfs_delayed_node *node, | ||
152 | int mod) | ||
153 | { | ||
154 | spin_lock(&root->lock); | ||
155 | if (node->in_list) { | ||
156 | if (!list_empty(&node->p_list)) | ||
157 | list_move_tail(&node->p_list, &root->prepare_list); | ||
158 | else if (mod) | ||
159 | list_add_tail(&node->p_list, &root->prepare_list); | ||
160 | } else { | ||
161 | list_add_tail(&node->n_list, &root->node_list); | ||
162 | list_add_tail(&node->p_list, &root->prepare_list); | ||
163 | atomic_inc(&node->refs); /* inserted into list */ | ||
164 | root->nodes++; | ||
165 | node->in_list = 1; | ||
166 | } | ||
167 | spin_unlock(&root->lock); | ||
168 | } | ||
169 | |||
170 | /* Call it when holding delayed_node->mutex */ | ||
171 | static void btrfs_dequeue_delayed_node(struct btrfs_delayed_root *root, | ||
172 | struct btrfs_delayed_node *node) | ||
173 | { | ||
174 | spin_lock(&root->lock); | ||
175 | if (node->in_list) { | ||
176 | root->nodes--; | ||
177 | atomic_dec(&node->refs); /* not in the list */ | ||
178 | list_del_init(&node->n_list); | ||
179 | if (!list_empty(&node->p_list)) | ||
180 | list_del_init(&node->p_list); | ||
181 | node->in_list = 0; | ||
182 | } | ||
183 | spin_unlock(&root->lock); | ||
184 | } | ||
185 | |||
186 | struct btrfs_delayed_node *btrfs_first_delayed_node( | ||
187 | struct btrfs_delayed_root *delayed_root) | ||
188 | { | ||
189 | struct list_head *p; | ||
190 | struct btrfs_delayed_node *node = NULL; | ||
191 | |||
192 | spin_lock(&delayed_root->lock); | ||
193 | if (list_empty(&delayed_root->node_list)) | ||
194 | goto out; | ||
195 | |||
196 | p = delayed_root->node_list.next; | ||
197 | node = list_entry(p, struct btrfs_delayed_node, n_list); | ||
198 | atomic_inc(&node->refs); | ||
199 | out: | ||
200 | spin_unlock(&delayed_root->lock); | ||
201 | |||
202 | return node; | ||
203 | } | ||
204 | |||
205 | struct btrfs_delayed_node *btrfs_next_delayed_node( | ||
206 | struct btrfs_delayed_node *node) | ||
207 | { | ||
208 | struct btrfs_delayed_root *delayed_root; | ||
209 | struct list_head *p; | ||
210 | struct btrfs_delayed_node *next = NULL; | ||
211 | |||
212 | delayed_root = node->root->fs_info->delayed_root; | ||
213 | spin_lock(&delayed_root->lock); | ||
214 | if (!node->in_list) { /* not in the list */ | ||
215 | if (list_empty(&delayed_root->node_list)) | ||
216 | goto out; | ||
217 | p = delayed_root->node_list.next; | ||
218 | } else if (list_is_last(&node->n_list, &delayed_root->node_list)) | ||
219 | goto out; | ||
220 | else | ||
221 | p = node->n_list.next; | ||
222 | |||
223 | next = list_entry(p, struct btrfs_delayed_node, n_list); | ||
224 | atomic_inc(&next->refs); | ||
225 | out: | ||
226 | spin_unlock(&delayed_root->lock); | ||
227 | |||
228 | return next; | ||
229 | } | ||
230 | |||
231 | static void __btrfs_release_delayed_node( | ||
232 | struct btrfs_delayed_node *delayed_node, | ||
233 | int mod) | ||
234 | { | ||
235 | struct btrfs_delayed_root *delayed_root; | ||
236 | |||
237 | if (!delayed_node) | ||
238 | return; | ||
239 | |||
240 | delayed_root = delayed_node->root->fs_info->delayed_root; | ||
241 | |||
242 | mutex_lock(&delayed_node->mutex); | ||
243 | if (delayed_node->count) | ||
244 | btrfs_queue_delayed_node(delayed_root, delayed_node, mod); | ||
245 | else | ||
246 | btrfs_dequeue_delayed_node(delayed_root, delayed_node); | ||
247 | mutex_unlock(&delayed_node->mutex); | ||
248 | |||
249 | if (atomic_dec_and_test(&delayed_node->refs)) { | ||
250 | struct btrfs_root *root = delayed_node->root; | ||
251 | spin_lock(&root->inode_lock); | ||
252 | if (atomic_read(&delayed_node->refs) == 0) { | ||
253 | radix_tree_delete(&root->delayed_nodes_tree, | ||
254 | delayed_node->inode_id); | ||
255 | kmem_cache_free(delayed_node_cache, delayed_node); | ||
256 | } | ||
257 | spin_unlock(&root->inode_lock); | ||
258 | } | ||
259 | } | ||
260 | |||
261 | static inline void btrfs_release_delayed_node(struct btrfs_delayed_node *node) | ||
262 | { | ||
263 | __btrfs_release_delayed_node(node, 0); | ||
264 | } | ||
265 | |||
266 | struct btrfs_delayed_node *btrfs_first_prepared_delayed_node( | ||
267 | struct btrfs_delayed_root *delayed_root) | ||
268 | { | ||
269 | struct list_head *p; | ||
270 | struct btrfs_delayed_node *node = NULL; | ||
271 | |||
272 | spin_lock(&delayed_root->lock); | ||
273 | if (list_empty(&delayed_root->prepare_list)) | ||
274 | goto out; | ||
275 | |||
276 | p = delayed_root->prepare_list.next; | ||
277 | list_del_init(p); | ||
278 | node = list_entry(p, struct btrfs_delayed_node, p_list); | ||
279 | atomic_inc(&node->refs); | ||
280 | out: | ||
281 | spin_unlock(&delayed_root->lock); | ||
282 | |||
283 | return node; | ||
284 | } | ||
285 | |||
286 | static inline void btrfs_release_prepared_delayed_node( | ||
287 | struct btrfs_delayed_node *node) | ||
288 | { | ||
289 | __btrfs_release_delayed_node(node, 1); | ||
290 | } | ||
291 | |||
292 | struct btrfs_delayed_item *btrfs_alloc_delayed_item(u32 data_len) | ||
293 | { | ||
294 | struct btrfs_delayed_item *item; | ||
295 | item = kmalloc(sizeof(*item) + data_len, GFP_NOFS); | ||
296 | if (item) { | ||
297 | item->data_len = data_len; | ||
298 | item->ins_or_del = 0; | ||
299 | item->bytes_reserved = 0; | ||
300 | item->block_rsv = NULL; | ||
301 | item->delayed_node = NULL; | ||
302 | atomic_set(&item->refs, 1); | ||
303 | } | ||
304 | return item; | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * __btrfs_lookup_delayed_item - look up the delayed item by key | ||
309 | * @delayed_node: pointer to the delayed node | ||
310 | * @key: the key to look up | ||
311 | * @prev: used to store the prev item if the right item isn't found | ||
312 | * @next: used to store the next item if the right item isn't found | ||
313 | * | ||
314 | * Note: if we don't find the right item, we will return the prev item and | ||
315 | * the next item. | ||
316 | */ | ||
317 | static struct btrfs_delayed_item *__btrfs_lookup_delayed_item( | ||
318 | struct rb_root *root, | ||
319 | struct btrfs_key *key, | ||
320 | struct btrfs_delayed_item **prev, | ||
321 | struct btrfs_delayed_item **next) | ||
322 | { | ||
323 | struct rb_node *node, *prev_node = NULL; | ||
324 | struct btrfs_delayed_item *delayed_item = NULL; | ||
325 | int ret = 0; | ||
326 | |||
327 | node = root->rb_node; | ||
328 | |||
329 | while (node) { | ||
330 | delayed_item = rb_entry(node, struct btrfs_delayed_item, | ||
331 | rb_node); | ||
332 | prev_node = node; | ||
333 | ret = btrfs_comp_cpu_keys(&delayed_item->key, key); | ||
334 | if (ret < 0) | ||
335 | node = node->rb_right; | ||
336 | else if (ret > 0) | ||
337 | node = node->rb_left; | ||
338 | else | ||
339 | return delayed_item; | ||
340 | } | ||
341 | |||
342 | if (prev) { | ||
343 | if (!prev_node) | ||
344 | *prev = NULL; | ||
345 | else if (ret < 0) | ||
346 | *prev = delayed_item; | ||
347 | else if ((node = rb_prev(prev_node)) != NULL) { | ||
348 | *prev = rb_entry(node, struct btrfs_delayed_item, | ||
349 | rb_node); | ||
350 | } else | ||
351 | *prev = NULL; | ||
352 | } | ||
353 | |||
354 | if (next) { | ||
355 | if (!prev_node) | ||
356 | *next = NULL; | ||
357 | else if (ret > 0) | ||
358 | *next = delayed_item; | ||
359 | else if ((node = rb_next(prev_node)) != NULL) { | ||
360 | *next = rb_entry(node, struct btrfs_delayed_item, | ||
361 | rb_node); | ||
362 | } else | ||
363 | *next = NULL; | ||
364 | } | ||
365 | return NULL; | ||
366 | } | ||
367 | |||
368 | struct btrfs_delayed_item *__btrfs_lookup_delayed_insertion_item( | ||
369 | struct btrfs_delayed_node *delayed_node, | ||
370 | struct btrfs_key *key) | ||
371 | { | ||
372 | struct btrfs_delayed_item *item; | ||
373 | |||
374 | item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, | ||
375 | NULL, NULL); | ||
376 | return item; | ||
377 | } | ||
378 | |||
379 | struct btrfs_delayed_item *__btrfs_lookup_delayed_deletion_item( | ||
380 | struct btrfs_delayed_node *delayed_node, | ||
381 | struct btrfs_key *key) | ||
382 | { | ||
383 | struct btrfs_delayed_item *item; | ||
384 | |||
385 | item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, | ||
386 | NULL, NULL); | ||
387 | return item; | ||
388 | } | ||
389 | |||
390 | struct btrfs_delayed_item *__btrfs_search_delayed_insertion_item( | ||
391 | struct btrfs_delayed_node *delayed_node, | ||
392 | struct btrfs_key *key) | ||
393 | { | ||
394 | struct btrfs_delayed_item *item, *next; | ||
395 | |||
396 | item = __btrfs_lookup_delayed_item(&delayed_node->ins_root, key, | ||
397 | NULL, &next); | ||
398 | if (!item) | ||
399 | item = next; | ||
400 | |||
401 | return item; | ||
402 | } | ||
403 | |||
404 | struct btrfs_delayed_item *__btrfs_search_delayed_deletion_item( | ||
405 | struct btrfs_delayed_node *delayed_node, | ||
406 | struct btrfs_key *key) | ||
407 | { | ||
408 | struct btrfs_delayed_item *item, *next; | ||
409 | |||
410 | item = __btrfs_lookup_delayed_item(&delayed_node->del_root, key, | ||
411 | NULL, &next); | ||
412 | if (!item) | ||
413 | item = next; | ||
414 | |||
415 | return item; | ||
416 | } | ||
417 | |||
418 | static int __btrfs_add_delayed_item(struct btrfs_delayed_node *delayed_node, | ||
419 | struct btrfs_delayed_item *ins, | ||
420 | int action) | ||
421 | { | ||
422 | struct rb_node **p, *node; | ||
423 | struct rb_node *parent_node = NULL; | ||
424 | struct rb_root *root; | ||
425 | struct btrfs_delayed_item *item; | ||
426 | int cmp; | ||
427 | |||
428 | if (action == BTRFS_DELAYED_INSERTION_ITEM) | ||
429 | root = &delayed_node->ins_root; | ||
430 | else if (action == BTRFS_DELAYED_DELETION_ITEM) | ||
431 | root = &delayed_node->del_root; | ||
432 | else | ||
433 | BUG(); | ||
434 | p = &root->rb_node; | ||
435 | node = &ins->rb_node; | ||
436 | |||
437 | while (*p) { | ||
438 | parent_node = *p; | ||
439 | item = rb_entry(parent_node, struct btrfs_delayed_item, | ||
440 | rb_node); | ||
441 | |||
442 | cmp = btrfs_comp_cpu_keys(&item->key, &ins->key); | ||
443 | if (cmp < 0) | ||
444 | p = &(*p)->rb_right; | ||
445 | else if (cmp > 0) | ||
446 | p = &(*p)->rb_left; | ||
447 | else | ||
448 | return -EEXIST; | ||
449 | } | ||
450 | |||
451 | rb_link_node(node, parent_node, p); | ||
452 | rb_insert_color(node, root); | ||
453 | ins->delayed_node = delayed_node; | ||
454 | ins->ins_or_del = action; | ||
455 | |||
456 | if (ins->key.type == BTRFS_DIR_INDEX_KEY && | ||
457 | action == BTRFS_DELAYED_INSERTION_ITEM && | ||
458 | ins->key.offset >= delayed_node->index_cnt) | ||
459 | delayed_node->index_cnt = ins->key.offset + 1; | ||
460 | |||
461 | delayed_node->count++; | ||
462 | atomic_inc(&delayed_node->root->fs_info->delayed_root->items); | ||
463 | return 0; | ||
464 | } | ||
465 | |||
466 | static int __btrfs_add_delayed_insertion_item(struct btrfs_delayed_node *node, | ||
467 | struct btrfs_delayed_item *item) | ||
468 | { | ||
469 | return __btrfs_add_delayed_item(node, item, | ||
470 | BTRFS_DELAYED_INSERTION_ITEM); | ||
471 | } | ||
472 | |||
473 | static int __btrfs_add_delayed_deletion_item(struct btrfs_delayed_node *node, | ||
474 | struct btrfs_delayed_item *item) | ||
475 | { | ||
476 | return __btrfs_add_delayed_item(node, item, | ||
477 | BTRFS_DELAYED_DELETION_ITEM); | ||
478 | } | ||
479 | |||
480 | static void __btrfs_remove_delayed_item(struct btrfs_delayed_item *delayed_item) | ||
481 | { | ||
482 | struct rb_root *root; | ||
483 | struct btrfs_delayed_root *delayed_root; | ||
484 | |||
485 | delayed_root = delayed_item->delayed_node->root->fs_info->delayed_root; | ||
486 | |||
487 | BUG_ON(!delayed_root); | ||
488 | BUG_ON(delayed_item->ins_or_del != BTRFS_DELAYED_DELETION_ITEM && | ||
489 | delayed_item->ins_or_del != BTRFS_DELAYED_INSERTION_ITEM); | ||
490 | |||
491 | if (delayed_item->ins_or_del == BTRFS_DELAYED_INSERTION_ITEM) | ||
492 | root = &delayed_item->delayed_node->ins_root; | ||
493 | else | ||
494 | root = &delayed_item->delayed_node->del_root; | ||
495 | |||
496 | rb_erase(&delayed_item->rb_node, root); | ||
497 | delayed_item->delayed_node->count--; | ||
498 | atomic_dec(&delayed_root->items); | ||
499 | if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND && | ||
500 | waitqueue_active(&delayed_root->wait)) | ||
501 | wake_up(&delayed_root->wait); | ||
502 | } | ||
503 | |||
504 | static void btrfs_release_delayed_item(struct btrfs_delayed_item *item) | ||
505 | { | ||
506 | if (item) { | ||
507 | __btrfs_remove_delayed_item(item); | ||
508 | if (atomic_dec_and_test(&item->refs)) | ||
509 | kfree(item); | ||
510 | } | ||
511 | } | ||
512 | |||
513 | struct btrfs_delayed_item *__btrfs_first_delayed_insertion_item( | ||
514 | struct btrfs_delayed_node *delayed_node) | ||
515 | { | ||
516 | struct rb_node *p; | ||
517 | struct btrfs_delayed_item *item = NULL; | ||
518 | |||
519 | p = rb_first(&delayed_node->ins_root); | ||
520 | if (p) | ||
521 | item = rb_entry(p, struct btrfs_delayed_item, rb_node); | ||
522 | |||
523 | return item; | ||
524 | } | ||
525 | |||
526 | struct btrfs_delayed_item *__btrfs_first_delayed_deletion_item( | ||
527 | struct btrfs_delayed_node *delayed_node) | ||
528 | { | ||
529 | struct rb_node *p; | ||
530 | struct btrfs_delayed_item *item = NULL; | ||
531 | |||
532 | p = rb_first(&delayed_node->del_root); | ||
533 | if (p) | ||
534 | item = rb_entry(p, struct btrfs_delayed_item, rb_node); | ||
535 | |||
536 | return item; | ||
537 | } | ||
538 | |||
539 | struct btrfs_delayed_item *__btrfs_next_delayed_item( | ||
540 | struct btrfs_delayed_item *item) | ||
541 | { | ||
542 | struct rb_node *p; | ||
543 | struct btrfs_delayed_item *next = NULL; | ||
544 | |||
545 | p = rb_next(&item->rb_node); | ||
546 | if (p) | ||
547 | next = rb_entry(p, struct btrfs_delayed_item, rb_node); | ||
548 | |||
549 | return next; | ||
550 | } | ||
551 | |||
552 | static inline struct btrfs_delayed_node *btrfs_get_delayed_node( | ||
553 | struct inode *inode) | ||
554 | { | ||
555 | struct btrfs_inode *btrfs_inode = BTRFS_I(inode); | ||
556 | struct btrfs_delayed_node *delayed_node; | ||
557 | |||
558 | delayed_node = btrfs_inode->delayed_node; | ||
559 | if (delayed_node) | ||
560 | atomic_inc(&delayed_node->refs); | ||
561 | |||
562 | return delayed_node; | ||
563 | } | ||
564 | |||
565 | static inline struct btrfs_root *btrfs_get_fs_root(struct btrfs_root *root, | ||
566 | u64 root_id) | ||
567 | { | ||
568 | struct btrfs_key root_key; | ||
569 | |||
570 | if (root->objectid == root_id) | ||
571 | return root; | ||
572 | |||
573 | root_key.objectid = root_id; | ||
574 | root_key.type = BTRFS_ROOT_ITEM_KEY; | ||
575 | root_key.offset = (u64)-1; | ||
576 | return btrfs_read_fs_root_no_name(root->fs_info, &root_key); | ||
577 | } | ||
578 | |||
579 | static int btrfs_delayed_item_reserve_metadata(struct btrfs_trans_handle *trans, | ||
580 | struct btrfs_root *root, | ||
581 | struct btrfs_delayed_item *item) | ||
582 | { | ||
583 | struct btrfs_block_rsv *src_rsv; | ||
584 | struct btrfs_block_rsv *dst_rsv; | ||
585 | u64 num_bytes; | ||
586 | int ret; | ||
587 | |||
588 | if (!trans->bytes_reserved) | ||
589 | return 0; | ||
590 | |||
591 | src_rsv = trans->block_rsv; | ||
592 | dst_rsv = &root->fs_info->global_block_rsv; | ||
593 | |||
594 | num_bytes = btrfs_calc_trans_metadata_size(root, 1); | ||
595 | ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); | ||
596 | if (!ret) { | ||
597 | item->bytes_reserved = num_bytes; | ||
598 | item->block_rsv = dst_rsv; | ||
599 | } | ||
600 | |||
601 | return ret; | ||
602 | } | ||
603 | |||
604 | static void btrfs_delayed_item_release_metadata(struct btrfs_root *root, | ||
605 | struct btrfs_delayed_item *item) | ||
606 | { | ||
607 | if (!item->bytes_reserved) | ||
608 | return; | ||
609 | |||
610 | btrfs_block_rsv_release(root, item->block_rsv, | ||
611 | item->bytes_reserved); | ||
612 | } | ||
613 | |||
614 | static int btrfs_delayed_inode_reserve_metadata( | ||
615 | struct btrfs_trans_handle *trans, | ||
616 | struct btrfs_root *root, | ||
617 | struct btrfs_delayed_node *node) | ||
618 | { | ||
619 | struct btrfs_block_rsv *src_rsv; | ||
620 | struct btrfs_block_rsv *dst_rsv; | ||
621 | u64 num_bytes; | ||
622 | int ret; | ||
623 | |||
624 | if (!trans->bytes_reserved) | ||
625 | return 0; | ||
626 | |||
627 | src_rsv = trans->block_rsv; | ||
628 | dst_rsv = &root->fs_info->global_block_rsv; | ||
629 | |||
630 | num_bytes = btrfs_calc_trans_metadata_size(root, 1); | ||
631 | ret = btrfs_block_rsv_migrate(src_rsv, dst_rsv, num_bytes); | ||
632 | if (!ret) | ||
633 | node->bytes_reserved = num_bytes; | ||
634 | |||
635 | return ret; | ||
636 | } | ||
637 | |||
638 | static void btrfs_delayed_inode_release_metadata(struct btrfs_root *root, | ||
639 | struct btrfs_delayed_node *node) | ||
640 | { | ||
641 | struct btrfs_block_rsv *rsv; | ||
642 | |||
643 | if (!node->bytes_reserved) | ||
644 | return; | ||
645 | |||
646 | rsv = &root->fs_info->global_block_rsv; | ||
647 | btrfs_block_rsv_release(root, rsv, | ||
648 | node->bytes_reserved); | ||
649 | node->bytes_reserved = 0; | ||
650 | } | ||
651 | |||
652 | /* | ||
653 | * This helper will insert some continuous items into the same leaf according | ||
654 | * to the free space of the leaf. | ||
655 | */ | ||
656 | static int btrfs_batch_insert_items(struct btrfs_trans_handle *trans, | ||
657 | struct btrfs_root *root, | ||
658 | struct btrfs_path *path, | ||
659 | struct btrfs_delayed_item *item) | ||
660 | { | ||
661 | struct btrfs_delayed_item *curr, *next; | ||
662 | int free_space; | ||
663 | int total_data_size = 0, total_size = 0; | ||
664 | struct extent_buffer *leaf; | ||
665 | char *data_ptr; | ||
666 | struct btrfs_key *keys; | ||
667 | u32 *data_size; | ||
668 | struct list_head head; | ||
669 | int slot; | ||
670 | int nitems; | ||
671 | int i; | ||
672 | int ret = 0; | ||
673 | |||
674 | BUG_ON(!path->nodes[0]); | ||
675 | |||
676 | leaf = path->nodes[0]; | ||
677 | free_space = btrfs_leaf_free_space(root, leaf); | ||
678 | INIT_LIST_HEAD(&head); | ||
679 | |||
680 | next = item; | ||
681 | |||
682 | /* | ||
683 | * count the number of the continuous items that we can insert in batch | ||
684 | */ | ||
685 | while (total_size + next->data_len + sizeof(struct btrfs_item) <= | ||
686 | free_space) { | ||
687 | total_data_size += next->data_len; | ||
688 | total_size += next->data_len + sizeof(struct btrfs_item); | ||
689 | list_add_tail(&next->tree_list, &head); | ||
690 | nitems++; | ||
691 | |||
692 | curr = next; | ||
693 | next = __btrfs_next_delayed_item(curr); | ||
694 | if (!next) | ||
695 | break; | ||
696 | |||
697 | if (!btrfs_is_continuous_delayed_item(curr, next)) | ||
698 | break; | ||
699 | } | ||
700 | |||
701 | if (!nitems) { | ||
702 | ret = 0; | ||
703 | goto out; | ||
704 | } | ||
705 | |||
706 | /* | ||
707 | * we need allocate some memory space, but it might cause the task | ||
708 | * to sleep, so we set all locked nodes in the path to blocking locks | ||
709 | * first. | ||
710 | */ | ||
711 | btrfs_set_path_blocking(path); | ||
712 | |||
713 | keys = kmalloc(sizeof(struct btrfs_key) * nitems, GFP_NOFS); | ||
714 | if (!keys) { | ||
715 | ret = -ENOMEM; | ||
716 | goto out; | ||
717 | } | ||
718 | |||
719 | data_size = kmalloc(sizeof(u32) * nitems, GFP_NOFS); | ||
720 | if (!data_size) { | ||
721 | ret = -ENOMEM; | ||
722 | goto error; | ||
723 | } | ||
724 | |||
725 | /* get keys of all the delayed items */ | ||
726 | i = 0; | ||
727 | list_for_each_entry(next, &head, tree_list) { | ||
728 | keys[i] = next->key; | ||
729 | data_size[i] = next->data_len; | ||
730 | i++; | ||
731 | } | ||
732 | |||
733 | /* reset all the locked nodes in the patch to spinning locks. */ | ||
734 | btrfs_clear_path_blocking(path, NULL); | ||
735 | |||
736 | /* insert the keys of the items */ | ||
737 | ret = setup_items_for_insert(trans, root, path, keys, data_size, | ||
738 | total_data_size, total_size, nitems); | ||
739 | if (ret) | ||
740 | goto error; | ||
741 | |||
742 | /* insert the dir index items */ | ||
743 | slot = path->slots[0]; | ||
744 | list_for_each_entry_safe(curr, next, &head, tree_list) { | ||
745 | data_ptr = btrfs_item_ptr(leaf, slot, char); | ||
746 | write_extent_buffer(leaf, &curr->data, | ||
747 | (unsigned long)data_ptr, | ||
748 | curr->data_len); | ||
749 | slot++; | ||
750 | |||
751 | btrfs_delayed_item_release_metadata(root, curr); | ||
752 | |||
753 | list_del(&curr->tree_list); | ||
754 | btrfs_release_delayed_item(curr); | ||
755 | } | ||
756 | |||
757 | error: | ||
758 | kfree(data_size); | ||
759 | kfree(keys); | ||
760 | out: | ||
761 | return ret; | ||
762 | } | ||
763 | |||
764 | /* | ||
765 | * This helper can just do simple insertion that needn't extend item for new | ||
766 | * data, such as directory name index insertion, inode insertion. | ||
767 | */ | ||
768 | static int btrfs_insert_delayed_item(struct btrfs_trans_handle *trans, | ||
769 | struct btrfs_root *root, | ||
770 | struct btrfs_path *path, | ||
771 | struct btrfs_delayed_item *delayed_item) | ||
772 | { | ||
773 | struct extent_buffer *leaf; | ||
774 | struct btrfs_item *item; | ||
775 | char *ptr; | ||
776 | int ret; | ||
777 | |||
778 | ret = btrfs_insert_empty_item(trans, root, path, &delayed_item->key, | ||
779 | delayed_item->data_len); | ||
780 | if (ret < 0 && ret != -EEXIST) | ||
781 | return ret; | ||
782 | |||
783 | leaf = path->nodes[0]; | ||
784 | |||
785 | item = btrfs_item_nr(leaf, path->slots[0]); | ||
786 | ptr = btrfs_item_ptr(leaf, path->slots[0], char); | ||
787 | |||
788 | write_extent_buffer(leaf, delayed_item->data, (unsigned long)ptr, | ||
789 | delayed_item->data_len); | ||
790 | btrfs_mark_buffer_dirty(leaf); | ||
791 | |||
792 | btrfs_delayed_item_release_metadata(root, delayed_item); | ||
793 | return 0; | ||
794 | } | ||
795 | |||
796 | /* | ||
797 | * we insert an item first, then if there are some continuous items, we try | ||
798 | * to insert those items into the same leaf. | ||
799 | */ | ||
800 | static int btrfs_insert_delayed_items(struct btrfs_trans_handle *trans, | ||
801 | struct btrfs_path *path, | ||
802 | struct btrfs_root *root, | ||
803 | struct btrfs_delayed_node *node) | ||
804 | { | ||
805 | struct btrfs_delayed_item *curr, *prev; | ||
806 | int ret = 0; | ||
807 | |||
808 | do_again: | ||
809 | mutex_lock(&node->mutex); | ||
810 | curr = __btrfs_first_delayed_insertion_item(node); | ||
811 | if (!curr) | ||
812 | goto insert_end; | ||
813 | |||
814 | ret = btrfs_insert_delayed_item(trans, root, path, curr); | ||
815 | if (ret < 0) { | ||
816 | btrfs_release_path(path); | ||
817 | goto insert_end; | ||
818 | } | ||
819 | |||
820 | prev = curr; | ||
821 | curr = __btrfs_next_delayed_item(prev); | ||
822 | if (curr && btrfs_is_continuous_delayed_item(prev, curr)) { | ||
823 | /* insert the continuous items into the same leaf */ | ||
824 | path->slots[0]++; | ||
825 | btrfs_batch_insert_items(trans, root, path, curr); | ||
826 | } | ||
827 | btrfs_release_delayed_item(prev); | ||
828 | btrfs_mark_buffer_dirty(path->nodes[0]); | ||
829 | |||
830 | btrfs_release_path(path); | ||
831 | mutex_unlock(&node->mutex); | ||
832 | goto do_again; | ||
833 | |||
834 | insert_end: | ||
835 | mutex_unlock(&node->mutex); | ||
836 | return ret; | ||
837 | } | ||
838 | |||
839 | static int btrfs_batch_delete_items(struct btrfs_trans_handle *trans, | ||
840 | struct btrfs_root *root, | ||
841 | struct btrfs_path *path, | ||
842 | struct btrfs_delayed_item *item) | ||
843 | { | ||
844 | struct btrfs_delayed_item *curr, *next; | ||
845 | struct extent_buffer *leaf; | ||
846 | struct btrfs_key key; | ||
847 | struct list_head head; | ||
848 | int nitems, i, last_item; | ||
849 | int ret = 0; | ||
850 | |||
851 | BUG_ON(!path->nodes[0]); | ||
852 | |||
853 | leaf = path->nodes[0]; | ||
854 | |||
855 | i = path->slots[0]; | ||
856 | last_item = btrfs_header_nritems(leaf) - 1; | ||
857 | if (i > last_item) | ||
858 | return -ENOENT; /* FIXME: Is errno suitable? */ | ||
859 | |||
860 | next = item; | ||
861 | INIT_LIST_HEAD(&head); | ||
862 | btrfs_item_key_to_cpu(leaf, &key, i); | ||
863 | nitems = 0; | ||
864 | /* | ||
865 | * count the number of the dir index items that we can delete in batch | ||
866 | */ | ||
867 | while (btrfs_comp_cpu_keys(&next->key, &key) == 0) { | ||
868 | list_add_tail(&next->tree_list, &head); | ||
869 | nitems++; | ||
870 | |||
871 | curr = next; | ||
872 | next = __btrfs_next_delayed_item(curr); | ||
873 | if (!next) | ||
874 | break; | ||
875 | |||
876 | if (!btrfs_is_continuous_delayed_item(curr, next)) | ||
877 | break; | ||
878 | |||
879 | i++; | ||
880 | if (i > last_item) | ||
881 | break; | ||
882 | btrfs_item_key_to_cpu(leaf, &key, i); | ||
883 | } | ||
884 | |||
885 | if (!nitems) | ||
886 | return 0; | ||
887 | |||
888 | ret = btrfs_del_items(trans, root, path, path->slots[0], nitems); | ||
889 | if (ret) | ||
890 | goto out; | ||
891 | |||
892 | list_for_each_entry_safe(curr, next, &head, tree_list) { | ||
893 | btrfs_delayed_item_release_metadata(root, curr); | ||
894 | list_del(&curr->tree_list); | ||
895 | btrfs_release_delayed_item(curr); | ||
896 | } | ||
897 | |||
898 | out: | ||
899 | return ret; | ||
900 | } | ||
901 | |||
902 | static int btrfs_delete_delayed_items(struct btrfs_trans_handle *trans, | ||
903 | struct btrfs_path *path, | ||
904 | struct btrfs_root *root, | ||
905 | struct btrfs_delayed_node *node) | ||
906 | { | ||
907 | struct btrfs_delayed_item *curr, *prev; | ||
908 | int ret = 0; | ||
909 | |||
910 | do_again: | ||
911 | mutex_lock(&node->mutex); | ||
912 | curr = __btrfs_first_delayed_deletion_item(node); | ||
913 | if (!curr) | ||
914 | goto delete_fail; | ||
915 | |||
916 | ret = btrfs_search_slot(trans, root, &curr->key, path, -1, 1); | ||
917 | if (ret < 0) | ||
918 | goto delete_fail; | ||
919 | else if (ret > 0) { | ||
920 | /* | ||
921 | * can't find the item which the node points to, so this node | ||
922 | * is invalid, just drop it. | ||
923 | */ | ||
924 | prev = curr; | ||
925 | curr = __btrfs_next_delayed_item(prev); | ||
926 | btrfs_release_delayed_item(prev); | ||
927 | ret = 0; | ||
928 | btrfs_release_path(path); | ||
929 | if (curr) | ||
930 | goto do_again; | ||
931 | else | ||
932 | goto delete_fail; | ||
933 | } | ||
934 | |||
935 | btrfs_batch_delete_items(trans, root, path, curr); | ||
936 | btrfs_release_path(path); | ||
937 | mutex_unlock(&node->mutex); | ||
938 | goto do_again; | ||
939 | |||
940 | delete_fail: | ||
941 | btrfs_release_path(path); | ||
942 | mutex_unlock(&node->mutex); | ||
943 | return ret; | ||
944 | } | ||
945 | |||
946 | static void btrfs_release_delayed_inode(struct btrfs_delayed_node *delayed_node) | ||
947 | { | ||
948 | struct btrfs_delayed_root *delayed_root; | ||
949 | |||
950 | if (delayed_node && delayed_node->inode_dirty) { | ||
951 | BUG_ON(!delayed_node->root); | ||
952 | delayed_node->inode_dirty = 0; | ||
953 | delayed_node->count--; | ||
954 | |||
955 | delayed_root = delayed_node->root->fs_info->delayed_root; | ||
956 | atomic_dec(&delayed_root->items); | ||
957 | if (atomic_read(&delayed_root->items) < | ||
958 | BTRFS_DELAYED_BACKGROUND && | ||
959 | waitqueue_active(&delayed_root->wait)) | ||
960 | wake_up(&delayed_root->wait); | ||
961 | } | ||
962 | } | ||
963 | |||
964 | static int btrfs_update_delayed_inode(struct btrfs_trans_handle *trans, | ||
965 | struct btrfs_root *root, | ||
966 | struct btrfs_path *path, | ||
967 | struct btrfs_delayed_node *node) | ||
968 | { | ||
969 | struct btrfs_key key; | ||
970 | struct btrfs_inode_item *inode_item; | ||
971 | struct extent_buffer *leaf; | ||
972 | int ret; | ||
973 | |||
974 | mutex_lock(&node->mutex); | ||
975 | if (!node->inode_dirty) { | ||
976 | mutex_unlock(&node->mutex); | ||
977 | return 0; | ||
978 | } | ||
979 | |||
980 | key.objectid = node->inode_id; | ||
981 | btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); | ||
982 | key.offset = 0; | ||
983 | ret = btrfs_lookup_inode(trans, root, path, &key, 1); | ||
984 | if (ret > 0) { | ||
985 | btrfs_release_path(path); | ||
986 | mutex_unlock(&node->mutex); | ||
987 | return -ENOENT; | ||
988 | } else if (ret < 0) { | ||
989 | mutex_unlock(&node->mutex); | ||
990 | return ret; | ||
991 | } | ||
992 | |||
993 | btrfs_unlock_up_safe(path, 1); | ||
994 | leaf = path->nodes[0]; | ||
995 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | ||
996 | struct btrfs_inode_item); | ||
997 | write_extent_buffer(leaf, &node->inode_item, (unsigned long)inode_item, | ||
998 | sizeof(struct btrfs_inode_item)); | ||
999 | btrfs_mark_buffer_dirty(leaf); | ||
1000 | btrfs_release_path(path); | ||
1001 | |||
1002 | btrfs_delayed_inode_release_metadata(root, node); | ||
1003 | btrfs_release_delayed_inode(node); | ||
1004 | mutex_unlock(&node->mutex); | ||
1005 | |||
1006 | return 0; | ||
1007 | } | ||
1008 | |||
1009 | /* Called when committing the transaction. */ | ||
1010 | int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, | ||
1011 | struct btrfs_root *root) | ||
1012 | { | ||
1013 | struct btrfs_delayed_root *delayed_root; | ||
1014 | struct btrfs_delayed_node *curr_node, *prev_node; | ||
1015 | struct btrfs_path *path; | ||
1016 | int ret = 0; | ||
1017 | |||
1018 | path = btrfs_alloc_path(); | ||
1019 | if (!path) | ||
1020 | return -ENOMEM; | ||
1021 | path->leave_spinning = 1; | ||
1022 | |||
1023 | delayed_root = btrfs_get_delayed_root(root); | ||
1024 | |||
1025 | curr_node = btrfs_first_delayed_node(delayed_root); | ||
1026 | while (curr_node) { | ||
1027 | root = curr_node->root; | ||
1028 | ret = btrfs_insert_delayed_items(trans, path, root, | ||
1029 | curr_node); | ||
1030 | if (!ret) | ||
1031 | ret = btrfs_delete_delayed_items(trans, path, root, | ||
1032 | curr_node); | ||
1033 | if (!ret) | ||
1034 | ret = btrfs_update_delayed_inode(trans, root, path, | ||
1035 | curr_node); | ||
1036 | if (ret) { | ||
1037 | btrfs_release_delayed_node(curr_node); | ||
1038 | break; | ||
1039 | } | ||
1040 | |||
1041 | prev_node = curr_node; | ||
1042 | curr_node = btrfs_next_delayed_node(curr_node); | ||
1043 | btrfs_release_delayed_node(prev_node); | ||
1044 | } | ||
1045 | |||
1046 | btrfs_free_path(path); | ||
1047 | return ret; | ||
1048 | } | ||
1049 | |||
1050 | static int __btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, | ||
1051 | struct btrfs_delayed_node *node) | ||
1052 | { | ||
1053 | struct btrfs_path *path; | ||
1054 | int ret; | ||
1055 | |||
1056 | path = btrfs_alloc_path(); | ||
1057 | if (!path) | ||
1058 | return -ENOMEM; | ||
1059 | path->leave_spinning = 1; | ||
1060 | |||
1061 | ret = btrfs_insert_delayed_items(trans, path, node->root, node); | ||
1062 | if (!ret) | ||
1063 | ret = btrfs_delete_delayed_items(trans, path, node->root, node); | ||
1064 | if (!ret) | ||
1065 | ret = btrfs_update_delayed_inode(trans, node->root, path, node); | ||
1066 | btrfs_free_path(path); | ||
1067 | |||
1068 | return ret; | ||
1069 | } | ||
1070 | |||
1071 | int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, | ||
1072 | struct inode *inode) | ||
1073 | { | ||
1074 | struct btrfs_delayed_node *delayed_node = btrfs_get_delayed_node(inode); | ||
1075 | int ret; | ||
1076 | |||
1077 | if (!delayed_node) | ||
1078 | return 0; | ||
1079 | |||
1080 | mutex_lock(&delayed_node->mutex); | ||
1081 | if (!delayed_node->count) { | ||
1082 | mutex_unlock(&delayed_node->mutex); | ||
1083 | btrfs_release_delayed_node(delayed_node); | ||
1084 | return 0; | ||
1085 | } | ||
1086 | mutex_unlock(&delayed_node->mutex); | ||
1087 | |||
1088 | ret = __btrfs_commit_inode_delayed_items(trans, delayed_node); | ||
1089 | btrfs_release_delayed_node(delayed_node); | ||
1090 | return ret; | ||
1091 | } | ||
1092 | |||
1093 | void btrfs_remove_delayed_node(struct inode *inode) | ||
1094 | { | ||
1095 | struct btrfs_delayed_node *delayed_node; | ||
1096 | |||
1097 | delayed_node = ACCESS_ONCE(BTRFS_I(inode)->delayed_node); | ||
1098 | if (!delayed_node) | ||
1099 | return; | ||
1100 | |||
1101 | BTRFS_I(inode)->delayed_node = NULL; | ||
1102 | btrfs_release_delayed_node(delayed_node); | ||
1103 | } | ||
1104 | |||
1105 | struct btrfs_async_delayed_node { | ||
1106 | struct btrfs_root *root; | ||
1107 | struct btrfs_delayed_node *delayed_node; | ||
1108 | struct btrfs_work work; | ||
1109 | }; | ||
1110 | |||
1111 | static void btrfs_async_run_delayed_node_done(struct btrfs_work *work) | ||
1112 | { | ||
1113 | struct btrfs_async_delayed_node *async_node; | ||
1114 | struct btrfs_trans_handle *trans; | ||
1115 | struct btrfs_path *path; | ||
1116 | struct btrfs_delayed_node *delayed_node = NULL; | ||
1117 | struct btrfs_root *root; | ||
1118 | unsigned long nr = 0; | ||
1119 | int need_requeue = 0; | ||
1120 | int ret; | ||
1121 | |||
1122 | async_node = container_of(work, struct btrfs_async_delayed_node, work); | ||
1123 | |||
1124 | path = btrfs_alloc_path(); | ||
1125 | if (!path) | ||
1126 | goto out; | ||
1127 | path->leave_spinning = 1; | ||
1128 | |||
1129 | delayed_node = async_node->delayed_node; | ||
1130 | root = delayed_node->root; | ||
1131 | |||
1132 | trans = btrfs_join_transaction(root, 0); | ||
1133 | if (IS_ERR(trans)) | ||
1134 | goto free_path; | ||
1135 | |||
1136 | ret = btrfs_insert_delayed_items(trans, path, root, delayed_node); | ||
1137 | if (!ret) | ||
1138 | ret = btrfs_delete_delayed_items(trans, path, root, | ||
1139 | delayed_node); | ||
1140 | |||
1141 | if (!ret) | ||
1142 | btrfs_update_delayed_inode(trans, root, path, delayed_node); | ||
1143 | |||
1144 | /* | ||
1145 | * Maybe new delayed items have been inserted, so we need requeue | ||
1146 | * the work. Besides that, we must dequeue the empty delayed nodes | ||
1147 | * to avoid the race between delayed items balance and the worker. | ||
1148 | * The race like this: | ||
1149 | * Task1 Worker thread | ||
1150 | * count == 0, needn't requeue | ||
1151 | * also needn't insert the | ||
1152 | * delayed node into prepare | ||
1153 | * list again. | ||
1154 | * add lots of delayed items | ||
1155 | * queue the delayed node | ||
1156 | * already in the list, | ||
1157 | * and not in the prepare | ||
1158 | * list, it means the delayed | ||
1159 | * node is being dealt with | ||
1160 | * by the worker. | ||
1161 | * do delayed items balance | ||
1162 | * the delayed node is being | ||
1163 | * dealt with by the worker | ||
1164 | * now, just wait. | ||
1165 | * the worker goto idle. | ||
1166 | * Task1 will sleep until the transaction is commited. | ||
1167 | */ | ||
1168 | mutex_lock(&delayed_node->mutex); | ||
1169 | if (delayed_node->count) | ||
1170 | need_requeue = 1; | ||
1171 | else | ||
1172 | btrfs_dequeue_delayed_node(root->fs_info->delayed_root, | ||
1173 | delayed_node); | ||
1174 | mutex_unlock(&delayed_node->mutex); | ||
1175 | |||
1176 | nr = trans->blocks_used; | ||
1177 | |||
1178 | btrfs_end_transaction_dmeta(trans, root); | ||
1179 | __btrfs_btree_balance_dirty(root, nr); | ||
1180 | free_path: | ||
1181 | btrfs_free_path(path); | ||
1182 | out: | ||
1183 | if (need_requeue) | ||
1184 | btrfs_requeue_work(&async_node->work); | ||
1185 | else { | ||
1186 | btrfs_release_prepared_delayed_node(delayed_node); | ||
1187 | kfree(async_node); | ||
1188 | } | ||
1189 | } | ||
1190 | |||
1191 | static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, | ||
1192 | struct btrfs_root *root, int all) | ||
1193 | { | ||
1194 | struct btrfs_async_delayed_node *async_node; | ||
1195 | struct btrfs_delayed_node *curr; | ||
1196 | int count = 0; | ||
1197 | |||
1198 | again: | ||
1199 | curr = btrfs_first_prepared_delayed_node(delayed_root); | ||
1200 | if (!curr) | ||
1201 | return 0; | ||
1202 | |||
1203 | async_node = kmalloc(sizeof(*async_node), GFP_NOFS); | ||
1204 | if (!async_node) { | ||
1205 | btrfs_release_prepared_delayed_node(curr); | ||
1206 | return -ENOMEM; | ||
1207 | } | ||
1208 | |||
1209 | async_node->root = root; | ||
1210 | async_node->delayed_node = curr; | ||
1211 | |||
1212 | async_node->work.func = btrfs_async_run_delayed_node_done; | ||
1213 | async_node->work.flags = 0; | ||
1214 | |||
1215 | btrfs_queue_worker(&root->fs_info->delayed_workers, &async_node->work); | ||
1216 | count++; | ||
1217 | |||
1218 | if (all || count < 4) | ||
1219 | goto again; | ||
1220 | |||
1221 | return 0; | ||
1222 | } | ||
1223 | |||
1224 | void btrfs_balance_delayed_items(struct btrfs_root *root) | ||
1225 | { | ||
1226 | struct btrfs_delayed_root *delayed_root; | ||
1227 | |||
1228 | delayed_root = btrfs_get_delayed_root(root); | ||
1229 | |||
1230 | if (atomic_read(&delayed_root->items) < BTRFS_DELAYED_BACKGROUND) | ||
1231 | return; | ||
1232 | |||
1233 | if (atomic_read(&delayed_root->items) >= BTRFS_DELAYED_WRITEBACK) { | ||
1234 | int ret; | ||
1235 | ret = btrfs_wq_run_delayed_node(delayed_root, root, 1); | ||
1236 | if (ret) | ||
1237 | return; | ||
1238 | |||
1239 | wait_event_interruptible_timeout( | ||
1240 | delayed_root->wait, | ||
1241 | (atomic_read(&delayed_root->items) < | ||
1242 | BTRFS_DELAYED_BACKGROUND), | ||
1243 | HZ); | ||
1244 | return; | ||
1245 | } | ||
1246 | |||
1247 | btrfs_wq_run_delayed_node(delayed_root, root, 0); | ||
1248 | } | ||
1249 | |||
1250 | int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, | ||
1251 | struct btrfs_root *root, const char *name, | ||
1252 | int name_len, struct inode *dir, | ||
1253 | struct btrfs_disk_key *disk_key, u8 type, | ||
1254 | u64 index) | ||
1255 | { | ||
1256 | struct btrfs_delayed_node *delayed_node; | ||
1257 | struct btrfs_delayed_item *delayed_item; | ||
1258 | struct btrfs_dir_item *dir_item; | ||
1259 | int ret; | ||
1260 | |||
1261 | delayed_node = btrfs_get_or_create_delayed_node(dir); | ||
1262 | if (IS_ERR(delayed_node)) | ||
1263 | return PTR_ERR(delayed_node); | ||
1264 | |||
1265 | delayed_item = btrfs_alloc_delayed_item(sizeof(*dir_item) + name_len); | ||
1266 | if (!delayed_item) { | ||
1267 | ret = -ENOMEM; | ||
1268 | goto release_node; | ||
1269 | } | ||
1270 | |||
1271 | ret = btrfs_delayed_item_reserve_metadata(trans, root, delayed_item); | ||
1272 | /* | ||
1273 | * we have reserved enough space when we start a new transaction, | ||
1274 | * so reserving metadata failure is impossible | ||
1275 | */ | ||
1276 | BUG_ON(ret); | ||
1277 | |||
1278 | delayed_item->key.objectid = btrfs_ino(dir); | ||
1279 | btrfs_set_key_type(&delayed_item->key, BTRFS_DIR_INDEX_KEY); | ||
1280 | delayed_item->key.offset = index; | ||
1281 | |||
1282 | dir_item = (struct btrfs_dir_item *)delayed_item->data; | ||
1283 | dir_item->location = *disk_key; | ||
1284 | dir_item->transid = cpu_to_le64(trans->transid); | ||
1285 | dir_item->data_len = 0; | ||
1286 | dir_item->name_len = cpu_to_le16(name_len); | ||
1287 | dir_item->type = type; | ||
1288 | memcpy((char *)(dir_item + 1), name, name_len); | ||
1289 | |||
1290 | mutex_lock(&delayed_node->mutex); | ||
1291 | ret = __btrfs_add_delayed_insertion_item(delayed_node, delayed_item); | ||
1292 | if (unlikely(ret)) { | ||
1293 | printk(KERN_ERR "err add delayed dir index item(name: %s) into " | ||
1294 | "the insertion tree of the delayed node" | ||
1295 | "(root id: %llu, inode id: %llu, errno: %d)\n", | ||
1296 | name, | ||
1297 | (unsigned long long)delayed_node->root->objectid, | ||
1298 | (unsigned long long)delayed_node->inode_id, | ||
1299 | ret); | ||
1300 | BUG(); | ||
1301 | } | ||
1302 | mutex_unlock(&delayed_node->mutex); | ||
1303 | |||
1304 | release_node: | ||
1305 | btrfs_release_delayed_node(delayed_node); | ||
1306 | return ret; | ||
1307 | } | ||
1308 | |||
1309 | static int btrfs_delete_delayed_insertion_item(struct btrfs_root *root, | ||
1310 | struct btrfs_delayed_node *node, | ||
1311 | struct btrfs_key *key) | ||
1312 | { | ||
1313 | struct btrfs_delayed_item *item; | ||
1314 | |||
1315 | mutex_lock(&node->mutex); | ||
1316 | item = __btrfs_lookup_delayed_insertion_item(node, key); | ||
1317 | if (!item) { | ||
1318 | mutex_unlock(&node->mutex); | ||
1319 | return 1; | ||
1320 | } | ||
1321 | |||
1322 | btrfs_delayed_item_release_metadata(root, item); | ||
1323 | btrfs_release_delayed_item(item); | ||
1324 | mutex_unlock(&node->mutex); | ||
1325 | return 0; | ||
1326 | } | ||
1327 | |||
1328 | int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, | ||
1329 | struct btrfs_root *root, struct inode *dir, | ||
1330 | u64 index) | ||
1331 | { | ||
1332 | struct btrfs_delayed_node *node; | ||
1333 | struct btrfs_delayed_item *item; | ||
1334 | struct btrfs_key item_key; | ||
1335 | int ret; | ||
1336 | |||
1337 | node = btrfs_get_or_create_delayed_node(dir); | ||
1338 | if (IS_ERR(node)) | ||
1339 | return PTR_ERR(node); | ||
1340 | |||
1341 | item_key.objectid = btrfs_ino(dir); | ||
1342 | btrfs_set_key_type(&item_key, BTRFS_DIR_INDEX_KEY); | ||
1343 | item_key.offset = index; | ||
1344 | |||
1345 | ret = btrfs_delete_delayed_insertion_item(root, node, &item_key); | ||
1346 | if (!ret) | ||
1347 | goto end; | ||
1348 | |||
1349 | item = btrfs_alloc_delayed_item(0); | ||
1350 | if (!item) { | ||
1351 | ret = -ENOMEM; | ||
1352 | goto end; | ||
1353 | } | ||
1354 | |||
1355 | item->key = item_key; | ||
1356 | |||
1357 | ret = btrfs_delayed_item_reserve_metadata(trans, root, item); | ||
1358 | /* | ||
1359 | * we have reserved enough space when we start a new transaction, | ||
1360 | * so reserving metadata failure is impossible. | ||
1361 | */ | ||
1362 | BUG_ON(ret); | ||
1363 | |||
1364 | mutex_lock(&node->mutex); | ||
1365 | ret = __btrfs_add_delayed_deletion_item(node, item); | ||
1366 | if (unlikely(ret)) { | ||
1367 | printk(KERN_ERR "err add delayed dir index item(index: %llu) " | ||
1368 | "into the deletion tree of the delayed node" | ||
1369 | "(root id: %llu, inode id: %llu, errno: %d)\n", | ||
1370 | (unsigned long long)index, | ||
1371 | (unsigned long long)node->root->objectid, | ||
1372 | (unsigned long long)node->inode_id, | ||
1373 | ret); | ||
1374 | BUG(); | ||
1375 | } | ||
1376 | mutex_unlock(&node->mutex); | ||
1377 | end: | ||
1378 | btrfs_release_delayed_node(node); | ||
1379 | return ret; | ||
1380 | } | ||
1381 | |||
1382 | int btrfs_inode_delayed_dir_index_count(struct inode *inode) | ||
1383 | { | ||
1384 | struct btrfs_delayed_node *delayed_node = BTRFS_I(inode)->delayed_node; | ||
1385 | int ret = 0; | ||
1386 | |||
1387 | if (!delayed_node) | ||
1388 | return -ENOENT; | ||
1389 | |||
1390 | /* | ||
1391 | * Since we have held i_mutex of this directory, it is impossible that | ||
1392 | * a new directory index is added into the delayed node and index_cnt | ||
1393 | * is updated now. So we needn't lock the delayed node. | ||
1394 | */ | ||
1395 | if (!delayed_node->index_cnt) | ||
1396 | return -EINVAL; | ||
1397 | |||
1398 | BTRFS_I(inode)->index_cnt = delayed_node->index_cnt; | ||
1399 | return ret; | ||
1400 | } | ||
1401 | |||
1402 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, | ||
1403 | struct list_head *del_list) | ||
1404 | { | ||
1405 | struct btrfs_delayed_node *delayed_node; | ||
1406 | struct btrfs_delayed_item *item; | ||
1407 | |||
1408 | delayed_node = btrfs_get_delayed_node(inode); | ||
1409 | if (!delayed_node) | ||
1410 | return; | ||
1411 | |||
1412 | mutex_lock(&delayed_node->mutex); | ||
1413 | item = __btrfs_first_delayed_insertion_item(delayed_node); | ||
1414 | while (item) { | ||
1415 | atomic_inc(&item->refs); | ||
1416 | list_add_tail(&item->readdir_list, ins_list); | ||
1417 | item = __btrfs_next_delayed_item(item); | ||
1418 | } | ||
1419 | |||
1420 | item = __btrfs_first_delayed_deletion_item(delayed_node); | ||
1421 | while (item) { | ||
1422 | atomic_inc(&item->refs); | ||
1423 | list_add_tail(&item->readdir_list, del_list); | ||
1424 | item = __btrfs_next_delayed_item(item); | ||
1425 | } | ||
1426 | mutex_unlock(&delayed_node->mutex); | ||
1427 | /* | ||
1428 | * This delayed node is still cached in the btrfs inode, so refs | ||
1429 | * must be > 1 now, and we needn't check it is going to be freed | ||
1430 | * or not. | ||
1431 | * | ||
1432 | * Besides that, this function is used to read dir, we do not | ||
1433 | * insert/delete delayed items in this period. So we also needn't | ||
1434 | * requeue or dequeue this delayed node. | ||
1435 | */ | ||
1436 | atomic_dec(&delayed_node->refs); | ||
1437 | } | ||
1438 | |||
1439 | void btrfs_put_delayed_items(struct list_head *ins_list, | ||
1440 | struct list_head *del_list) | ||
1441 | { | ||
1442 | struct btrfs_delayed_item *curr, *next; | ||
1443 | |||
1444 | list_for_each_entry_safe(curr, next, ins_list, readdir_list) { | ||
1445 | list_del(&curr->readdir_list); | ||
1446 | if (atomic_dec_and_test(&curr->refs)) | ||
1447 | kfree(curr); | ||
1448 | } | ||
1449 | |||
1450 | list_for_each_entry_safe(curr, next, del_list, readdir_list) { | ||
1451 | list_del(&curr->readdir_list); | ||
1452 | if (atomic_dec_and_test(&curr->refs)) | ||
1453 | kfree(curr); | ||
1454 | } | ||
1455 | } | ||
1456 | |||
1457 | int btrfs_should_delete_dir_index(struct list_head *del_list, | ||
1458 | u64 index) | ||
1459 | { | ||
1460 | struct btrfs_delayed_item *curr, *next; | ||
1461 | int ret; | ||
1462 | |||
1463 | if (list_empty(del_list)) | ||
1464 | return 0; | ||
1465 | |||
1466 | list_for_each_entry_safe(curr, next, del_list, readdir_list) { | ||
1467 | if (curr->key.offset > index) | ||
1468 | break; | ||
1469 | |||
1470 | list_del(&curr->readdir_list); | ||
1471 | ret = (curr->key.offset == index); | ||
1472 | |||
1473 | if (atomic_dec_and_test(&curr->refs)) | ||
1474 | kfree(curr); | ||
1475 | |||
1476 | if (ret) | ||
1477 | return 1; | ||
1478 | else | ||
1479 | continue; | ||
1480 | } | ||
1481 | return 0; | ||
1482 | } | ||
1483 | |||
1484 | /* | ||
1485 | * btrfs_readdir_delayed_dir_index - read dir info stored in the delayed tree | ||
1486 | * | ||
1487 | */ | ||
1488 | int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, | ||
1489 | filldir_t filldir, | ||
1490 | struct list_head *ins_list) | ||
1491 | { | ||
1492 | struct btrfs_dir_item *di; | ||
1493 | struct btrfs_delayed_item *curr, *next; | ||
1494 | struct btrfs_key location; | ||
1495 | char *name; | ||
1496 | int name_len; | ||
1497 | int over = 0; | ||
1498 | unsigned char d_type; | ||
1499 | |||
1500 | if (list_empty(ins_list)) | ||
1501 | return 0; | ||
1502 | |||
1503 | /* | ||
1504 | * Changing the data of the delayed item is impossible. So | ||
1505 | * we needn't lock them. And we have held i_mutex of the | ||
1506 | * directory, nobody can delete any directory indexes now. | ||
1507 | */ | ||
1508 | list_for_each_entry_safe(curr, next, ins_list, readdir_list) { | ||
1509 | list_del(&curr->readdir_list); | ||
1510 | |||
1511 | if (curr->key.offset < filp->f_pos) { | ||
1512 | if (atomic_dec_and_test(&curr->refs)) | ||
1513 | kfree(curr); | ||
1514 | continue; | ||
1515 | } | ||
1516 | |||
1517 | filp->f_pos = curr->key.offset; | ||
1518 | |||
1519 | di = (struct btrfs_dir_item *)curr->data; | ||
1520 | name = (char *)(di + 1); | ||
1521 | name_len = le16_to_cpu(di->name_len); | ||
1522 | |||
1523 | d_type = btrfs_filetype_table[di->type]; | ||
1524 | btrfs_disk_key_to_cpu(&location, &di->location); | ||
1525 | |||
1526 | over = filldir(dirent, name, name_len, curr->key.offset, | ||
1527 | location.objectid, d_type); | ||
1528 | |||
1529 | if (atomic_dec_and_test(&curr->refs)) | ||
1530 | kfree(curr); | ||
1531 | |||
1532 | if (over) | ||
1533 | return 1; | ||
1534 | } | ||
1535 | return 0; | ||
1536 | } | ||
1537 | |||
1538 | BTRFS_SETGET_STACK_FUNCS(stack_inode_generation, struct btrfs_inode_item, | ||
1539 | generation, 64); | ||
1540 | BTRFS_SETGET_STACK_FUNCS(stack_inode_sequence, struct btrfs_inode_item, | ||
1541 | sequence, 64); | ||
1542 | BTRFS_SETGET_STACK_FUNCS(stack_inode_transid, struct btrfs_inode_item, | ||
1543 | transid, 64); | ||
1544 | BTRFS_SETGET_STACK_FUNCS(stack_inode_size, struct btrfs_inode_item, size, 64); | ||
1545 | BTRFS_SETGET_STACK_FUNCS(stack_inode_nbytes, struct btrfs_inode_item, | ||
1546 | nbytes, 64); | ||
1547 | BTRFS_SETGET_STACK_FUNCS(stack_inode_block_group, struct btrfs_inode_item, | ||
1548 | block_group, 64); | ||
1549 | BTRFS_SETGET_STACK_FUNCS(stack_inode_nlink, struct btrfs_inode_item, nlink, 32); | ||
1550 | BTRFS_SETGET_STACK_FUNCS(stack_inode_uid, struct btrfs_inode_item, uid, 32); | ||
1551 | BTRFS_SETGET_STACK_FUNCS(stack_inode_gid, struct btrfs_inode_item, gid, 32); | ||
1552 | BTRFS_SETGET_STACK_FUNCS(stack_inode_mode, struct btrfs_inode_item, mode, 32); | ||
1553 | BTRFS_SETGET_STACK_FUNCS(stack_inode_rdev, struct btrfs_inode_item, rdev, 64); | ||
1554 | BTRFS_SETGET_STACK_FUNCS(stack_inode_flags, struct btrfs_inode_item, flags, 64); | ||
1555 | |||
1556 | BTRFS_SETGET_STACK_FUNCS(stack_timespec_sec, struct btrfs_timespec, sec, 64); | ||
1557 | BTRFS_SETGET_STACK_FUNCS(stack_timespec_nsec, struct btrfs_timespec, nsec, 32); | ||
1558 | |||
1559 | static void fill_stack_inode_item(struct btrfs_trans_handle *trans, | ||
1560 | struct btrfs_inode_item *inode_item, | ||
1561 | struct inode *inode) | ||
1562 | { | ||
1563 | btrfs_set_stack_inode_uid(inode_item, inode->i_uid); | ||
1564 | btrfs_set_stack_inode_gid(inode_item, inode->i_gid); | ||
1565 | btrfs_set_stack_inode_size(inode_item, BTRFS_I(inode)->disk_i_size); | ||
1566 | btrfs_set_stack_inode_mode(inode_item, inode->i_mode); | ||
1567 | btrfs_set_stack_inode_nlink(inode_item, inode->i_nlink); | ||
1568 | btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode)); | ||
1569 | btrfs_set_stack_inode_generation(inode_item, | ||
1570 | BTRFS_I(inode)->generation); | ||
1571 | btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence); | ||
1572 | btrfs_set_stack_inode_transid(inode_item, trans->transid); | ||
1573 | btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); | ||
1574 | btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); | ||
1575 | btrfs_set_stack_inode_block_group(inode_item, | ||
1576 | BTRFS_I(inode)->block_group); | ||
1577 | |||
1578 | btrfs_set_stack_timespec_sec(btrfs_inode_atime(inode_item), | ||
1579 | inode->i_atime.tv_sec); | ||
1580 | btrfs_set_stack_timespec_nsec(btrfs_inode_atime(inode_item), | ||
1581 | inode->i_atime.tv_nsec); | ||
1582 | |||
1583 | btrfs_set_stack_timespec_sec(btrfs_inode_mtime(inode_item), | ||
1584 | inode->i_mtime.tv_sec); | ||
1585 | btrfs_set_stack_timespec_nsec(btrfs_inode_mtime(inode_item), | ||
1586 | inode->i_mtime.tv_nsec); | ||
1587 | |||
1588 | btrfs_set_stack_timespec_sec(btrfs_inode_ctime(inode_item), | ||
1589 | inode->i_ctime.tv_sec); | ||
1590 | btrfs_set_stack_timespec_nsec(btrfs_inode_ctime(inode_item), | ||
1591 | inode->i_ctime.tv_nsec); | ||
1592 | } | ||
1593 | |||
1594 | int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, | ||
1595 | struct btrfs_root *root, struct inode *inode) | ||
1596 | { | ||
1597 | struct btrfs_delayed_node *delayed_node; | ||
1598 | int ret; | ||
1599 | |||
1600 | delayed_node = btrfs_get_or_create_delayed_node(inode); | ||
1601 | if (IS_ERR(delayed_node)) | ||
1602 | return PTR_ERR(delayed_node); | ||
1603 | |||
1604 | mutex_lock(&delayed_node->mutex); | ||
1605 | if (delayed_node->inode_dirty) { | ||
1606 | fill_stack_inode_item(trans, &delayed_node->inode_item, inode); | ||
1607 | goto release_node; | ||
1608 | } | ||
1609 | |||
1610 | ret = btrfs_delayed_inode_reserve_metadata(trans, root, delayed_node); | ||
1611 | /* | ||
1612 | * we must reserve enough space when we start a new transaction, | ||
1613 | * so reserving metadata failure is impossible | ||
1614 | */ | ||
1615 | BUG_ON(ret); | ||
1616 | |||
1617 | fill_stack_inode_item(trans, &delayed_node->inode_item, inode); | ||
1618 | delayed_node->inode_dirty = 1; | ||
1619 | delayed_node->count++; | ||
1620 | atomic_inc(&root->fs_info->delayed_root->items); | ||
1621 | release_node: | ||
1622 | mutex_unlock(&delayed_node->mutex); | ||
1623 | btrfs_release_delayed_node(delayed_node); | ||
1624 | return ret; | ||
1625 | } | ||
1626 | |||
1627 | static void __btrfs_kill_delayed_node(struct btrfs_delayed_node *delayed_node) | ||
1628 | { | ||
1629 | struct btrfs_root *root = delayed_node->root; | ||
1630 | struct btrfs_delayed_item *curr_item, *prev_item; | ||
1631 | |||
1632 | mutex_lock(&delayed_node->mutex); | ||
1633 | curr_item = __btrfs_first_delayed_insertion_item(delayed_node); | ||
1634 | while (curr_item) { | ||
1635 | btrfs_delayed_item_release_metadata(root, curr_item); | ||
1636 | prev_item = curr_item; | ||
1637 | curr_item = __btrfs_next_delayed_item(prev_item); | ||
1638 | btrfs_release_delayed_item(prev_item); | ||
1639 | } | ||
1640 | |||
1641 | curr_item = __btrfs_first_delayed_deletion_item(delayed_node); | ||
1642 | while (curr_item) { | ||
1643 | btrfs_delayed_item_release_metadata(root, curr_item); | ||
1644 | prev_item = curr_item; | ||
1645 | curr_item = __btrfs_next_delayed_item(prev_item); | ||
1646 | btrfs_release_delayed_item(prev_item); | ||
1647 | } | ||
1648 | |||
1649 | if (delayed_node->inode_dirty) { | ||
1650 | btrfs_delayed_inode_release_metadata(root, delayed_node); | ||
1651 | btrfs_release_delayed_inode(delayed_node); | ||
1652 | } | ||
1653 | mutex_unlock(&delayed_node->mutex); | ||
1654 | } | ||
1655 | |||
1656 | void btrfs_kill_delayed_inode_items(struct inode *inode) | ||
1657 | { | ||
1658 | struct btrfs_delayed_node *delayed_node; | ||
1659 | |||
1660 | delayed_node = btrfs_get_delayed_node(inode); | ||
1661 | if (!delayed_node) | ||
1662 | return; | ||
1663 | |||
1664 | __btrfs_kill_delayed_node(delayed_node); | ||
1665 | btrfs_release_delayed_node(delayed_node); | ||
1666 | } | ||
1667 | |||
1668 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) | ||
1669 | { | ||
1670 | u64 inode_id = 0; | ||
1671 | struct btrfs_delayed_node *delayed_nodes[8]; | ||
1672 | int i, n; | ||
1673 | |||
1674 | while (1) { | ||
1675 | spin_lock(&root->inode_lock); | ||
1676 | n = radix_tree_gang_lookup(&root->delayed_nodes_tree, | ||
1677 | (void **)delayed_nodes, inode_id, | ||
1678 | ARRAY_SIZE(delayed_nodes)); | ||
1679 | if (!n) { | ||
1680 | spin_unlock(&root->inode_lock); | ||
1681 | break; | ||
1682 | } | ||
1683 | |||
1684 | inode_id = delayed_nodes[n - 1]->inode_id + 1; | ||
1685 | |||
1686 | for (i = 0; i < n; i++) | ||
1687 | atomic_inc(&delayed_nodes[i]->refs); | ||
1688 | spin_unlock(&root->inode_lock); | ||
1689 | |||
1690 | for (i = 0; i < n; i++) { | ||
1691 | __btrfs_kill_delayed_node(delayed_nodes[i]); | ||
1692 | btrfs_release_delayed_node(delayed_nodes[i]); | ||
1693 | } | ||
1694 | } | ||
1695 | } | ||
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h new file mode 100644 index 000000000000..eb7d240aa648 --- /dev/null +++ b/fs/btrfs/delayed-inode.h | |||
@@ -0,0 +1,141 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 Fujitsu. All rights reserved. | ||
3 | * Written by Miao Xie <miaox@cn.fujitsu.com> | ||
4 | * | ||
5 | * This program is free software; you can redistribute it and/or | ||
6 | * modify it under the terms of the GNU General Public | ||
7 | * License v2 as published by the Free Software Foundation. | ||
8 | * | ||
9 | * This program is distributed in the hope that it will be useful, | ||
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
12 | * General Public License for more details. | ||
13 | * | ||
14 | * You should have received a copy of the GNU General Public | ||
15 | * License along with this program; if not, write to the | ||
16 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
17 | * Boston, MA 021110-1307, USA. | ||
18 | */ | ||
19 | |||
20 | #ifndef __DELAYED_TREE_OPERATION_H | ||
21 | #define __DELAYED_TREE_OPERATION_H | ||
22 | |||
23 | #include <linux/rbtree.h> | ||
24 | #include <linux/spinlock.h> | ||
25 | #include <linux/mutex.h> | ||
26 | #include <linux/list.h> | ||
27 | #include <linux/wait.h> | ||
28 | #include <asm/atomic.h> | ||
29 | |||
30 | #include "ctree.h" | ||
31 | |||
32 | /* types of the delayed item */ | ||
33 | #define BTRFS_DELAYED_INSERTION_ITEM 1 | ||
34 | #define BTRFS_DELAYED_DELETION_ITEM 2 | ||
35 | |||
36 | struct btrfs_delayed_root { | ||
37 | spinlock_t lock; | ||
38 | struct list_head node_list; | ||
39 | /* | ||
40 | * Used for delayed nodes which is waiting to be dealt with by the | ||
41 | * worker. If the delayed node is inserted into the work queue, we | ||
42 | * drop it from this list. | ||
43 | */ | ||
44 | struct list_head prepare_list; | ||
45 | atomic_t items; /* for delayed items */ | ||
46 | int nodes; /* for delayed nodes */ | ||
47 | wait_queue_head_t wait; | ||
48 | }; | ||
49 | |||
50 | struct btrfs_delayed_node { | ||
51 | u64 inode_id; | ||
52 | u64 bytes_reserved; | ||
53 | struct btrfs_root *root; | ||
54 | /* Used to add the node into the delayed root's node list. */ | ||
55 | struct list_head n_list; | ||
56 | /* | ||
57 | * Used to add the node into the prepare list, the nodes in this list | ||
58 | * is waiting to be dealt with by the async worker. | ||
59 | */ | ||
60 | struct list_head p_list; | ||
61 | struct rb_root ins_root; | ||
62 | struct rb_root del_root; | ||
63 | struct mutex mutex; | ||
64 | struct btrfs_inode_item inode_item; | ||
65 | atomic_t refs; | ||
66 | u64 index_cnt; | ||
67 | bool in_list; | ||
68 | bool inode_dirty; | ||
69 | int count; | ||
70 | }; | ||
71 | |||
72 | struct btrfs_delayed_item { | ||
73 | struct rb_node rb_node; | ||
74 | struct btrfs_key key; | ||
75 | struct list_head tree_list; /* used for batch insert/delete items */ | ||
76 | struct list_head readdir_list; /* used for readdir items */ | ||
77 | u64 bytes_reserved; | ||
78 | struct btrfs_block_rsv *block_rsv; | ||
79 | struct btrfs_delayed_node *delayed_node; | ||
80 | atomic_t refs; | ||
81 | int ins_or_del; | ||
82 | u32 data_len; | ||
83 | char data[0]; | ||
84 | }; | ||
85 | |||
86 | static inline void btrfs_init_delayed_root( | ||
87 | struct btrfs_delayed_root *delayed_root) | ||
88 | { | ||
89 | atomic_set(&delayed_root->items, 0); | ||
90 | delayed_root->nodes = 0; | ||
91 | spin_lock_init(&delayed_root->lock); | ||
92 | init_waitqueue_head(&delayed_root->wait); | ||
93 | INIT_LIST_HEAD(&delayed_root->node_list); | ||
94 | INIT_LIST_HEAD(&delayed_root->prepare_list); | ||
95 | } | ||
96 | |||
97 | int btrfs_insert_delayed_dir_index(struct btrfs_trans_handle *trans, | ||
98 | struct btrfs_root *root, const char *name, | ||
99 | int name_len, struct inode *dir, | ||
100 | struct btrfs_disk_key *disk_key, u8 type, | ||
101 | u64 index); | ||
102 | |||
103 | int btrfs_delete_delayed_dir_index(struct btrfs_trans_handle *trans, | ||
104 | struct btrfs_root *root, struct inode *dir, | ||
105 | u64 index); | ||
106 | |||
107 | int btrfs_inode_delayed_dir_index_count(struct inode *inode); | ||
108 | |||
109 | int btrfs_run_delayed_items(struct btrfs_trans_handle *trans, | ||
110 | struct btrfs_root *root); | ||
111 | |||
112 | void btrfs_balance_delayed_items(struct btrfs_root *root); | ||
113 | |||
114 | int btrfs_commit_inode_delayed_items(struct btrfs_trans_handle *trans, | ||
115 | struct inode *inode); | ||
116 | /* Used for evicting the inode. */ | ||
117 | void btrfs_remove_delayed_node(struct inode *inode); | ||
118 | void btrfs_kill_delayed_inode_items(struct inode *inode); | ||
119 | |||
120 | |||
121 | int btrfs_delayed_update_inode(struct btrfs_trans_handle *trans, | ||
122 | struct btrfs_root *root, struct inode *inode); | ||
123 | |||
124 | /* Used for drop dead root */ | ||
125 | void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); | ||
126 | |||
127 | /* Used for readdir() */ | ||
128 | void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, | ||
129 | struct list_head *del_list); | ||
130 | void btrfs_put_delayed_items(struct list_head *ins_list, | ||
131 | struct list_head *del_list); | ||
132 | int btrfs_should_delete_dir_index(struct list_head *del_list, | ||
133 | u64 index); | ||
134 | int btrfs_readdir_delayed_dir_index(struct file *filp, void *dirent, | ||
135 | filldir_t filldir, | ||
136 | struct list_head *ins_list); | ||
137 | |||
138 | /* for init */ | ||
139 | int __init btrfs_delayed_inode_init(void); | ||
140 | void btrfs_delayed_inode_exit(void); | ||
141 | #endif | ||
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c index bce28f653899..125cf76fcd08 100644 --- a/fs/btrfs/delayed-ref.c +++ b/fs/btrfs/delayed-ref.c | |||
@@ -281,44 +281,6 @@ again: | |||
281 | } | 281 | } |
282 | 282 | ||
283 | /* | 283 | /* |
284 | * This checks to see if there are any delayed refs in the | ||
285 | * btree for a given bytenr. It returns one if it finds any | ||
286 | * and zero otherwise. | ||
287 | * | ||
288 | * If it only finds a head node, it returns 0. | ||
289 | * | ||
290 | * The idea is to use this when deciding if you can safely delete an | ||
291 | * extent from the extent allocation tree. There may be a pending | ||
292 | * ref in the rbtree that adds or removes references, so as long as this | ||
293 | * returns one you need to leave the BTRFS_EXTENT_ITEM in the extent | ||
294 | * allocation tree. | ||
295 | */ | ||
296 | int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr) | ||
297 | { | ||
298 | struct btrfs_delayed_ref_node *ref; | ||
299 | struct btrfs_delayed_ref_root *delayed_refs; | ||
300 | struct rb_node *prev_node; | ||
301 | int ret = 0; | ||
302 | |||
303 | delayed_refs = &trans->transaction->delayed_refs; | ||
304 | spin_lock(&delayed_refs->lock); | ||
305 | |||
306 | ref = find_ref_head(&delayed_refs->root, bytenr, NULL); | ||
307 | if (ref) { | ||
308 | prev_node = rb_prev(&ref->rb_node); | ||
309 | if (!prev_node) | ||
310 | goto out; | ||
311 | ref = rb_entry(prev_node, struct btrfs_delayed_ref_node, | ||
312 | rb_node); | ||
313 | if (ref->bytenr == bytenr) | ||
314 | ret = 1; | ||
315 | } | ||
316 | out: | ||
317 | spin_unlock(&delayed_refs->lock); | ||
318 | return ret; | ||
319 | } | ||
320 | |||
321 | /* | ||
322 | * helper function to update an extent delayed ref in the | 284 | * helper function to update an extent delayed ref in the |
323 | * rbtree. existing and update must both have the same | 285 | * rbtree. existing and update must both have the same |
324 | * bytenr and parent | 286 | * bytenr and parent |
@@ -747,79 +709,3 @@ btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr) | |||
747 | return btrfs_delayed_node_to_head(ref); | 709 | return btrfs_delayed_node_to_head(ref); |
748 | return NULL; | 710 | return NULL; |
749 | } | 711 | } |
750 | |||
751 | /* | ||
752 | * add a delayed ref to the tree. This does all of the accounting required | ||
753 | * to make sure the delayed ref is eventually processed before this | ||
754 | * transaction commits. | ||
755 | * | ||
756 | * The main point of this call is to add and remove a backreference in a single | ||
757 | * shot, taking the lock only once, and only searching for the head node once. | ||
758 | * | ||
759 | * It is the same as doing a ref add and delete in two separate calls. | ||
760 | */ | ||
761 | #if 0 | ||
762 | int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans, | ||
763 | u64 bytenr, u64 num_bytes, u64 orig_parent, | ||
764 | u64 parent, u64 orig_ref_root, u64 ref_root, | ||
765 | u64 orig_ref_generation, u64 ref_generation, | ||
766 | u64 owner_objectid, int pin) | ||
767 | { | ||
768 | struct btrfs_delayed_ref *ref; | ||
769 | struct btrfs_delayed_ref *old_ref; | ||
770 | struct btrfs_delayed_ref_head *head_ref; | ||
771 | struct btrfs_delayed_ref_root *delayed_refs; | ||
772 | int ret; | ||
773 | |||
774 | ref = kmalloc(sizeof(*ref), GFP_NOFS); | ||
775 | if (!ref) | ||
776 | return -ENOMEM; | ||
777 | |||
778 | old_ref = kmalloc(sizeof(*old_ref), GFP_NOFS); | ||
779 | if (!old_ref) { | ||
780 | kfree(ref); | ||
781 | return -ENOMEM; | ||
782 | } | ||
783 | |||
784 | /* | ||
785 | * the parent = 0 case comes from cases where we don't actually | ||
786 | * know the parent yet. It will get updated later via a add/drop | ||
787 | * pair. | ||
788 | */ | ||
789 | if (parent == 0) | ||
790 | parent = bytenr; | ||
791 | if (orig_parent == 0) | ||
792 | orig_parent = bytenr; | ||
793 | |||
794 | head_ref = kmalloc(sizeof(*head_ref), GFP_NOFS); | ||
795 | if (!head_ref) { | ||
796 | kfree(ref); | ||
797 | kfree(old_ref); | ||
798 | return -ENOMEM; | ||
799 | } | ||
800 | delayed_refs = &trans->transaction->delayed_refs; | ||
801 | spin_lock(&delayed_refs->lock); | ||
802 | |||
803 | /* | ||
804 | * insert both the head node and the new ref without dropping | ||
805 | * the spin lock | ||
806 | */ | ||
807 | ret = __btrfs_add_delayed_ref(trans, &head_ref->node, bytenr, num_bytes, | ||
808 | (u64)-1, 0, 0, 0, | ||
809 | BTRFS_UPDATE_DELAYED_HEAD, 0); | ||
810 | BUG_ON(ret); | ||
811 | |||
812 | ret = __btrfs_add_delayed_ref(trans, &ref->node, bytenr, num_bytes, | ||
813 | parent, ref_root, ref_generation, | ||
814 | owner_objectid, BTRFS_ADD_DELAYED_REF, 0); | ||
815 | BUG_ON(ret); | ||
816 | |||
817 | ret = __btrfs_add_delayed_ref(trans, &old_ref->node, bytenr, num_bytes, | ||
818 | orig_parent, orig_ref_root, | ||
819 | orig_ref_generation, owner_objectid, | ||
820 | BTRFS_DROP_DELAYED_REF, pin); | ||
821 | BUG_ON(ret); | ||
822 | spin_unlock(&delayed_refs->lock); | ||
823 | return 0; | ||
824 | } | ||
825 | #endif | ||
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h index 50e3cf92fbda..e287e3b0eab0 100644 --- a/fs/btrfs/delayed-ref.h +++ b/fs/btrfs/delayed-ref.h | |||
@@ -166,12 +166,6 @@ int btrfs_add_delayed_extent_op(struct btrfs_trans_handle *trans, | |||
166 | 166 | ||
167 | struct btrfs_delayed_ref_head * | 167 | struct btrfs_delayed_ref_head * |
168 | btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); | 168 | btrfs_find_delayed_ref_head(struct btrfs_trans_handle *trans, u64 bytenr); |
169 | int btrfs_delayed_ref_pending(struct btrfs_trans_handle *trans, u64 bytenr); | ||
170 | int btrfs_update_delayed_ref(struct btrfs_trans_handle *trans, | ||
171 | u64 bytenr, u64 num_bytes, u64 orig_parent, | ||
172 | u64 parent, u64 orig_ref_root, u64 ref_root, | ||
173 | u64 orig_ref_generation, u64 ref_generation, | ||
174 | u64 owner_objectid, int pin); | ||
175 | int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, | 169 | int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans, |
176 | struct btrfs_delayed_ref_head *head); | 170 | struct btrfs_delayed_ref_head *head); |
177 | int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, | 171 | int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, |
diff --git a/fs/btrfs/dir-item.c b/fs/btrfs/dir-item.c index c62f02f6ae69..685f2593c4f0 100644 --- a/fs/btrfs/dir-item.c +++ b/fs/btrfs/dir-item.c | |||
@@ -50,7 +50,6 @@ static struct btrfs_dir_item *insert_with_overflow(struct btrfs_trans_handle | |||
50 | if (di) | 50 | if (di) |
51 | return ERR_PTR(-EEXIST); | 51 | return ERR_PTR(-EEXIST); |
52 | ret = btrfs_extend_item(trans, root, path, data_size); | 52 | ret = btrfs_extend_item(trans, root, path, data_size); |
53 | WARN_ON(ret > 0); | ||
54 | } | 53 | } |
55 | if (ret < 0) | 54 | if (ret < 0) |
56 | return ERR_PTR(ret); | 55 | return ERR_PTR(ret); |
@@ -124,8 +123,9 @@ int btrfs_insert_xattr_item(struct btrfs_trans_handle *trans, | |||
124 | * to use for the second index (if one is created). | 123 | * to use for the second index (if one is created). |
125 | */ | 124 | */ |
126 | int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root | 125 | int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root |
127 | *root, const char *name, int name_len, u64 dir, | 126 | *root, const char *name, int name_len, |
128 | struct btrfs_key *location, u8 type, u64 index) | 127 | struct inode *dir, struct btrfs_key *location, |
128 | u8 type, u64 index) | ||
129 | { | 129 | { |
130 | int ret = 0; | 130 | int ret = 0; |
131 | int ret2 = 0; | 131 | int ret2 = 0; |
@@ -137,13 +137,17 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root | |||
137 | struct btrfs_disk_key disk_key; | 137 | struct btrfs_disk_key disk_key; |
138 | u32 data_size; | 138 | u32 data_size; |
139 | 139 | ||
140 | key.objectid = dir; | 140 | key.objectid = btrfs_ino(dir); |
141 | btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); | 141 | btrfs_set_key_type(&key, BTRFS_DIR_ITEM_KEY); |
142 | key.offset = btrfs_name_hash(name, name_len); | 142 | key.offset = btrfs_name_hash(name, name_len); |
143 | 143 | ||
144 | path = btrfs_alloc_path(); | 144 | path = btrfs_alloc_path(); |
145 | if (!path) | ||
146 | return -ENOMEM; | ||
145 | path->leave_spinning = 1; | 147 | path->leave_spinning = 1; |
146 | 148 | ||
149 | btrfs_cpu_key_to_disk(&disk_key, location); | ||
150 | |||
147 | data_size = sizeof(*dir_item) + name_len; | 151 | data_size = sizeof(*dir_item) + name_len; |
148 | dir_item = insert_with_overflow(trans, root, path, &key, data_size, | 152 | dir_item = insert_with_overflow(trans, root, path, &key, data_size, |
149 | name, name_len); | 153 | name, name_len); |
@@ -155,7 +159,6 @@ int btrfs_insert_dir_item(struct btrfs_trans_handle *trans, struct btrfs_root | |||
155 | } | 159 | } |
156 | 160 | ||
157 | leaf = path->nodes[0]; | 161 | leaf = path->nodes[0]; |
158 | btrfs_cpu_key_to_disk(&disk_key, location); | ||
159 | btrfs_set_dir_item_key(leaf, dir_item, &disk_key); | 162 | btrfs_set_dir_item_key(leaf, dir_item, &disk_key); |
160 | btrfs_set_dir_type(leaf, dir_item, type); | 163 | btrfs_set_dir_type(leaf, dir_item, type); |
161 | btrfs_set_dir_data_len(leaf, dir_item, 0); | 164 | btrfs_set_dir_data_len(leaf, dir_item, 0); |
@@ -172,29 +175,11 @@ second_insert: | |||
172 | ret = 0; | 175 | ret = 0; |
173 | goto out_free; | 176 | goto out_free; |
174 | } | 177 | } |
175 | btrfs_release_path(root, path); | 178 | btrfs_release_path(path); |
176 | |||
177 | btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); | ||
178 | key.offset = index; | ||
179 | dir_item = insert_with_overflow(trans, root, path, &key, data_size, | ||
180 | name, name_len); | ||
181 | if (IS_ERR(dir_item)) { | ||
182 | ret2 = PTR_ERR(dir_item); | ||
183 | goto out_free; | ||
184 | } | ||
185 | leaf = path->nodes[0]; | ||
186 | btrfs_cpu_key_to_disk(&disk_key, location); | ||
187 | btrfs_set_dir_item_key(leaf, dir_item, &disk_key); | ||
188 | btrfs_set_dir_type(leaf, dir_item, type); | ||
189 | btrfs_set_dir_data_len(leaf, dir_item, 0); | ||
190 | btrfs_set_dir_name_len(leaf, dir_item, name_len); | ||
191 | btrfs_set_dir_transid(leaf, dir_item, trans->transid); | ||
192 | name_ptr = (unsigned long)(dir_item + 1); | ||
193 | write_extent_buffer(leaf, name, name_ptr, name_len); | ||
194 | btrfs_mark_buffer_dirty(leaf); | ||
195 | 179 | ||
180 | ret2 = btrfs_insert_delayed_dir_index(trans, root, name, name_len, dir, | ||
181 | &disk_key, type, index); | ||
196 | out_free: | 182 | out_free: |
197 | |||
198 | btrfs_free_path(path); | 183 | btrfs_free_path(path); |
199 | if (ret) | 184 | if (ret) |
200 | return ret; | 185 | return ret; |
@@ -452,7 +437,7 @@ int verify_dir_item(struct btrfs_root *root, | |||
452 | namelen = XATTR_NAME_MAX; | 437 | namelen = XATTR_NAME_MAX; |
453 | 438 | ||
454 | if (btrfs_dir_name_len(leaf, dir_item) > namelen) { | 439 | if (btrfs_dir_name_len(leaf, dir_item) > namelen) { |
455 | printk(KERN_CRIT "btrfS: invalid dir item name len: %u\n", | 440 | printk(KERN_CRIT "btrfs: invalid dir item name len: %u\n", |
456 | (unsigned)btrfs_dir_data_len(leaf, dir_item)); | 441 | (unsigned)btrfs_dir_data_len(leaf, dir_item)); |
457 | return 1; | 442 | return 1; |
458 | } | 443 | } |
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index 228cf36ece83..98b6a71decba 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/crc32c.h> | 29 | #include <linux/crc32c.h> |
30 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
31 | #include <linux/migrate.h> | 31 | #include <linux/migrate.h> |
32 | #include <linux/ratelimit.h> | ||
32 | #include <asm/unaligned.h> | 33 | #include <asm/unaligned.h> |
33 | #include "compat.h" | 34 | #include "compat.h" |
34 | #include "ctree.h" | 35 | #include "ctree.h" |
@@ -41,6 +42,7 @@ | |||
41 | #include "locking.h" | 42 | #include "locking.h" |
42 | #include "tree-log.h" | 43 | #include "tree-log.h" |
43 | #include "free-space-cache.h" | 44 | #include "free-space-cache.h" |
45 | #include "inode-map.h" | ||
44 | 46 | ||
45 | static struct extent_io_ops btree_extent_io_ops; | 47 | static struct extent_io_ops btree_extent_io_ops; |
46 | static void end_workqueue_fn(struct btrfs_work *work); | 48 | static void end_workqueue_fn(struct btrfs_work *work); |
@@ -137,7 +139,7 @@ static const char *btrfs_eb_name[BTRFS_MAX_LEVEL + 1] = { | |||
137 | * that covers the entire device | 139 | * that covers the entire device |
138 | */ | 140 | */ |
139 | static struct extent_map *btree_get_extent(struct inode *inode, | 141 | static struct extent_map *btree_get_extent(struct inode *inode, |
140 | struct page *page, size_t page_offset, u64 start, u64 len, | 142 | struct page *page, size_t pg_offset, u64 start, u64 len, |
141 | int create) | 143 | int create) |
142 | { | 144 | { |
143 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 145 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
@@ -154,7 +156,7 @@ static struct extent_map *btree_get_extent(struct inode *inode, | |||
154 | } | 156 | } |
155 | read_unlock(&em_tree->lock); | 157 | read_unlock(&em_tree->lock); |
156 | 158 | ||
157 | em = alloc_extent_map(GFP_NOFS); | 159 | em = alloc_extent_map(); |
158 | if (!em) { | 160 | if (!em) { |
159 | em = ERR_PTR(-ENOMEM); | 161 | em = ERR_PTR(-ENOMEM); |
160 | goto out; | 162 | goto out; |
@@ -254,14 +256,12 @@ static int csum_tree_block(struct btrfs_root *root, struct extent_buffer *buf, | |||
254 | memcpy(&found, result, csum_size); | 256 | memcpy(&found, result, csum_size); |
255 | 257 | ||
256 | read_extent_buffer(buf, &val, 0, csum_size); | 258 | read_extent_buffer(buf, &val, 0, csum_size); |
257 | if (printk_ratelimit()) { | 259 | printk_ratelimited(KERN_INFO "btrfs: %s checksum verify " |
258 | printk(KERN_INFO "btrfs: %s checksum verify " | ||
259 | "failed on %llu wanted %X found %X " | 260 | "failed on %llu wanted %X found %X " |
260 | "level %d\n", | 261 | "level %d\n", |
261 | root->fs_info->sb->s_id, | 262 | root->fs_info->sb->s_id, |
262 | (unsigned long long)buf->start, val, found, | 263 | (unsigned long long)buf->start, val, found, |
263 | btrfs_header_level(buf)); | 264 | btrfs_header_level(buf)); |
264 | } | ||
265 | if (result != (char *)&inline_result) | 265 | if (result != (char *)&inline_result) |
266 | kfree(result); | 266 | kfree(result); |
267 | return 1; | 267 | return 1; |
@@ -296,13 +296,11 @@ static int verify_parent_transid(struct extent_io_tree *io_tree, | |||
296 | ret = 0; | 296 | ret = 0; |
297 | goto out; | 297 | goto out; |
298 | } | 298 | } |
299 | if (printk_ratelimit()) { | 299 | printk_ratelimited("parent transid verify failed on %llu wanted %llu " |
300 | printk("parent transid verify failed on %llu wanted %llu " | ||
301 | "found %llu\n", | 300 | "found %llu\n", |
302 | (unsigned long long)eb->start, | 301 | (unsigned long long)eb->start, |
303 | (unsigned long long)parent_transid, | 302 | (unsigned long long)parent_transid, |
304 | (unsigned long long)btrfs_header_generation(eb)); | 303 | (unsigned long long)btrfs_header_generation(eb)); |
305 | } | ||
306 | ret = 1; | 304 | ret = 1; |
307 | clear_extent_buffer_uptodate(io_tree, eb, &cached_state); | 305 | clear_extent_buffer_uptodate(io_tree, eb, &cached_state); |
308 | out: | 306 | out: |
@@ -380,7 +378,7 @@ static int csum_dirty_buffer(struct btrfs_root *root, struct page *page) | |||
380 | len = page->private >> 2; | 378 | len = page->private >> 2; |
381 | WARN_ON(len == 0); | 379 | WARN_ON(len == 0); |
382 | 380 | ||
383 | eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); | 381 | eb = alloc_extent_buffer(tree, start, len, page); |
384 | if (eb == NULL) { | 382 | if (eb == NULL) { |
385 | WARN_ON(1); | 383 | WARN_ON(1); |
386 | goto out; | 384 | goto out; |
@@ -525,7 +523,7 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, | |||
525 | len = page->private >> 2; | 523 | len = page->private >> 2; |
526 | WARN_ON(len == 0); | 524 | WARN_ON(len == 0); |
527 | 525 | ||
528 | eb = alloc_extent_buffer(tree, start, len, page, GFP_NOFS); | 526 | eb = alloc_extent_buffer(tree, start, len, page); |
529 | if (eb == NULL) { | 527 | if (eb == NULL) { |
530 | ret = -EIO; | 528 | ret = -EIO; |
531 | goto out; | 529 | goto out; |
@@ -533,12 +531,10 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, | |||
533 | 531 | ||
534 | found_start = btrfs_header_bytenr(eb); | 532 | found_start = btrfs_header_bytenr(eb); |
535 | if (found_start != start) { | 533 | if (found_start != start) { |
536 | if (printk_ratelimit()) { | 534 | printk_ratelimited(KERN_INFO "btrfs bad tree block start " |
537 | printk(KERN_INFO "btrfs bad tree block start " | ||
538 | "%llu %llu\n", | 535 | "%llu %llu\n", |
539 | (unsigned long long)found_start, | 536 | (unsigned long long)found_start, |
540 | (unsigned long long)eb->start); | 537 | (unsigned long long)eb->start); |
541 | } | ||
542 | ret = -EIO; | 538 | ret = -EIO; |
543 | goto err; | 539 | goto err; |
544 | } | 540 | } |
@@ -550,10 +546,8 @@ static int btree_readpage_end_io_hook(struct page *page, u64 start, u64 end, | |||
550 | goto err; | 546 | goto err; |
551 | } | 547 | } |
552 | if (check_tree_block_fsid(root, eb)) { | 548 | if (check_tree_block_fsid(root, eb)) { |
553 | if (printk_ratelimit()) { | 549 | printk_ratelimited(KERN_INFO "btrfs bad fsid on block %llu\n", |
554 | printk(KERN_INFO "btrfs bad fsid on block %llu\n", | ||
555 | (unsigned long long)eb->start); | 550 | (unsigned long long)eb->start); |
556 | } | ||
557 | ret = -EIO; | 551 | ret = -EIO; |
558 | goto err; | 552 | goto err; |
559 | } | 553 | } |
@@ -650,12 +644,6 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) | |||
650 | return 256 * limit; | 644 | return 256 * limit; |
651 | } | 645 | } |
652 | 646 | ||
653 | int btrfs_congested_async(struct btrfs_fs_info *info, int iodone) | ||
654 | { | ||
655 | return atomic_read(&info->nr_async_bios) > | ||
656 | btrfs_async_submit_limit(info); | ||
657 | } | ||
658 | |||
659 | static void run_one_async_start(struct btrfs_work *work) | 647 | static void run_one_async_start(struct btrfs_work *work) |
660 | { | 648 | { |
661 | struct async_submit_bio *async; | 649 | struct async_submit_bio *async; |
@@ -963,7 +951,7 @@ struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, | |||
963 | struct inode *btree_inode = root->fs_info->btree_inode; | 951 | struct inode *btree_inode = root->fs_info->btree_inode; |
964 | struct extent_buffer *eb; | 952 | struct extent_buffer *eb; |
965 | eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree, | 953 | eb = find_extent_buffer(&BTRFS_I(btree_inode)->io_tree, |
966 | bytenr, blocksize, GFP_NOFS); | 954 | bytenr, blocksize); |
967 | return eb; | 955 | return eb; |
968 | } | 956 | } |
969 | 957 | ||
@@ -974,7 +962,7 @@ struct extent_buffer *btrfs_find_create_tree_block(struct btrfs_root *root, | |||
974 | struct extent_buffer *eb; | 962 | struct extent_buffer *eb; |
975 | 963 | ||
976 | eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree, | 964 | eb = alloc_extent_buffer(&BTRFS_I(btree_inode)->io_tree, |
977 | bytenr, blocksize, NULL, GFP_NOFS); | 965 | bytenr, blocksize, NULL); |
978 | return eb; | 966 | return eb; |
979 | } | 967 | } |
980 | 968 | ||
@@ -1058,13 +1046,13 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, | |||
1058 | root->name = NULL; | 1046 | root->name = NULL; |
1059 | root->in_sysfs = 0; | 1047 | root->in_sysfs = 0; |
1060 | root->inode_tree = RB_ROOT; | 1048 | root->inode_tree = RB_ROOT; |
1049 | INIT_RADIX_TREE(&root->delayed_nodes_tree, GFP_ATOMIC); | ||
1061 | root->block_rsv = NULL; | 1050 | root->block_rsv = NULL; |
1062 | root->orphan_block_rsv = NULL; | 1051 | root->orphan_block_rsv = NULL; |
1063 | 1052 | ||
1064 | INIT_LIST_HEAD(&root->dirty_list); | 1053 | INIT_LIST_HEAD(&root->dirty_list); |
1065 | INIT_LIST_HEAD(&root->orphan_list); | 1054 | INIT_LIST_HEAD(&root->orphan_list); |
1066 | INIT_LIST_HEAD(&root->root_list); | 1055 | INIT_LIST_HEAD(&root->root_list); |
1067 | spin_lock_init(&root->node_lock); | ||
1068 | spin_lock_init(&root->orphan_lock); | 1056 | spin_lock_init(&root->orphan_lock); |
1069 | spin_lock_init(&root->inode_lock); | 1057 | spin_lock_init(&root->inode_lock); |
1070 | spin_lock_init(&root->accounting_lock); | 1058 | spin_lock_init(&root->accounting_lock); |
@@ -1080,7 +1068,7 @@ static int __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize, | |||
1080 | root->log_transid = 0; | 1068 | root->log_transid = 0; |
1081 | root->last_log_commit = 0; | 1069 | root->last_log_commit = 0; |
1082 | extent_io_tree_init(&root->dirty_log_pages, | 1070 | extent_io_tree_init(&root->dirty_log_pages, |
1083 | fs_info->btree_inode->i_mapping, GFP_NOFS); | 1071 | fs_info->btree_inode->i_mapping); |
1084 | 1072 | ||
1085 | memset(&root->root_key, 0, sizeof(root->root_key)); | 1073 | memset(&root->root_key, 0, sizeof(root->root_key)); |
1086 | memset(&root->root_item, 0, sizeof(root->root_item)); | 1074 | memset(&root->root_item, 0, sizeof(root->root_item)); |
@@ -1283,21 +1271,6 @@ out: | |||
1283 | return root; | 1271 | return root; |
1284 | } | 1272 | } |
1285 | 1273 | ||
1286 | struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, | ||
1287 | u64 root_objectid) | ||
1288 | { | ||
1289 | struct btrfs_root *root; | ||
1290 | |||
1291 | if (root_objectid == BTRFS_ROOT_TREE_OBJECTID) | ||
1292 | return fs_info->tree_root; | ||
1293 | if (root_objectid == BTRFS_EXTENT_TREE_OBJECTID) | ||
1294 | return fs_info->extent_root; | ||
1295 | |||
1296 | root = radix_tree_lookup(&fs_info->fs_roots_radix, | ||
1297 | (unsigned long)root_objectid); | ||
1298 | return root; | ||
1299 | } | ||
1300 | |||
1301 | struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, | 1274 | struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, |
1302 | struct btrfs_key *location) | 1275 | struct btrfs_key *location) |
1303 | { | 1276 | { |
@@ -1326,6 +1299,19 @@ again: | |||
1326 | if (IS_ERR(root)) | 1299 | if (IS_ERR(root)) |
1327 | return root; | 1300 | return root; |
1328 | 1301 | ||
1302 | root->free_ino_ctl = kzalloc(sizeof(*root->free_ino_ctl), GFP_NOFS); | ||
1303 | if (!root->free_ino_ctl) | ||
1304 | goto fail; | ||
1305 | root->free_ino_pinned = kzalloc(sizeof(*root->free_ino_pinned), | ||
1306 | GFP_NOFS); | ||
1307 | if (!root->free_ino_pinned) | ||
1308 | goto fail; | ||
1309 | |||
1310 | btrfs_init_free_ino_ctl(root); | ||
1311 | mutex_init(&root->fs_commit_mutex); | ||
1312 | spin_lock_init(&root->cache_lock); | ||
1313 | init_waitqueue_head(&root->cache_wait); | ||
1314 | |||
1329 | set_anon_super(&root->anon_super, NULL); | 1315 | set_anon_super(&root->anon_super, NULL); |
1330 | 1316 | ||
1331 | if (btrfs_root_refs(&root->root_item) == 0) { | 1317 | if (btrfs_root_refs(&root->root_item) == 0) { |
@@ -1369,41 +1355,6 @@ fail: | |||
1369 | return ERR_PTR(ret); | 1355 | return ERR_PTR(ret); |
1370 | } | 1356 | } |
1371 | 1357 | ||
1372 | struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, | ||
1373 | struct btrfs_key *location, | ||
1374 | const char *name, int namelen) | ||
1375 | { | ||
1376 | return btrfs_read_fs_root_no_name(fs_info, location); | ||
1377 | #if 0 | ||
1378 | struct btrfs_root *root; | ||
1379 | int ret; | ||
1380 | |||
1381 | root = btrfs_read_fs_root_no_name(fs_info, location); | ||
1382 | if (!root) | ||
1383 | return NULL; | ||
1384 | |||
1385 | if (root->in_sysfs) | ||
1386 | return root; | ||
1387 | |||
1388 | ret = btrfs_set_root_name(root, name, namelen); | ||
1389 | if (ret) { | ||
1390 | free_extent_buffer(root->node); | ||
1391 | kfree(root); | ||
1392 | return ERR_PTR(ret); | ||
1393 | } | ||
1394 | |||
1395 | ret = btrfs_sysfs_add_root(root); | ||
1396 | if (ret) { | ||
1397 | free_extent_buffer(root->node); | ||
1398 | kfree(root->name); | ||
1399 | kfree(root); | ||
1400 | return ERR_PTR(ret); | ||
1401 | } | ||
1402 | root->in_sysfs = 1; | ||
1403 | return root; | ||
1404 | #endif | ||
1405 | } | ||
1406 | |||
1407 | static int btrfs_congested_fn(void *congested_data, int bdi_bits) | 1358 | static int btrfs_congested_fn(void *congested_data, int bdi_bits) |
1408 | { | 1359 | { |
1409 | struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; | 1360 | struct btrfs_fs_info *info = (struct btrfs_fs_info *)congested_data; |
@@ -1411,7 +1362,8 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) | |||
1411 | struct btrfs_device *device; | 1362 | struct btrfs_device *device; |
1412 | struct backing_dev_info *bdi; | 1363 | struct backing_dev_info *bdi; |
1413 | 1364 | ||
1414 | list_for_each_entry(device, &info->fs_devices->devices, dev_list) { | 1365 | rcu_read_lock(); |
1366 | list_for_each_entry_rcu(device, &info->fs_devices->devices, dev_list) { | ||
1415 | if (!device->bdev) | 1367 | if (!device->bdev) |
1416 | continue; | 1368 | continue; |
1417 | bdi = blk_get_backing_dev_info(device->bdev); | 1369 | bdi = blk_get_backing_dev_info(device->bdev); |
@@ -1420,6 +1372,7 @@ static int btrfs_congested_fn(void *congested_data, int bdi_bits) | |||
1420 | break; | 1372 | break; |
1421 | } | 1373 | } |
1422 | } | 1374 | } |
1375 | rcu_read_unlock(); | ||
1423 | return ret; | 1376 | return ret; |
1424 | } | 1377 | } |
1425 | 1378 | ||
@@ -1522,6 +1475,7 @@ static int cleaner_kthread(void *arg) | |||
1522 | btrfs_run_delayed_iputs(root); | 1475 | btrfs_run_delayed_iputs(root); |
1523 | btrfs_clean_old_snapshots(root); | 1476 | btrfs_clean_old_snapshots(root); |
1524 | mutex_unlock(&root->fs_info->cleaner_mutex); | 1477 | mutex_unlock(&root->fs_info->cleaner_mutex); |
1478 | btrfs_run_defrag_inodes(root->fs_info); | ||
1525 | } | 1479 | } |
1526 | 1480 | ||
1527 | if (freezing(current)) { | 1481 | if (freezing(current)) { |
@@ -1611,7 +1565,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1611 | struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), | 1565 | struct btrfs_root *csum_root = kzalloc(sizeof(struct btrfs_root), |
1612 | GFP_NOFS); | 1566 | GFP_NOFS); |
1613 | struct btrfs_root *tree_root = btrfs_sb(sb); | 1567 | struct btrfs_root *tree_root = btrfs_sb(sb); |
1614 | struct btrfs_fs_info *fs_info = tree_root->fs_info; | 1568 | struct btrfs_fs_info *fs_info = NULL; |
1615 | struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), | 1569 | struct btrfs_root *chunk_root = kzalloc(sizeof(struct btrfs_root), |
1616 | GFP_NOFS); | 1570 | GFP_NOFS); |
1617 | struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), | 1571 | struct btrfs_root *dev_root = kzalloc(sizeof(struct btrfs_root), |
@@ -1623,11 +1577,12 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1623 | 1577 | ||
1624 | struct btrfs_super_block *disk_super; | 1578 | struct btrfs_super_block *disk_super; |
1625 | 1579 | ||
1626 | if (!extent_root || !tree_root || !fs_info || | 1580 | if (!extent_root || !tree_root || !tree_root->fs_info || |
1627 | !chunk_root || !dev_root || !csum_root) { | 1581 | !chunk_root || !dev_root || !csum_root) { |
1628 | err = -ENOMEM; | 1582 | err = -ENOMEM; |
1629 | goto fail; | 1583 | goto fail; |
1630 | } | 1584 | } |
1585 | fs_info = tree_root->fs_info; | ||
1631 | 1586 | ||
1632 | ret = init_srcu_struct(&fs_info->subvol_srcu); | 1587 | ret = init_srcu_struct(&fs_info->subvol_srcu); |
1633 | if (ret) { | 1588 | if (ret) { |
@@ -1662,6 +1617,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1662 | spin_lock_init(&fs_info->ref_cache_lock); | 1617 | spin_lock_init(&fs_info->ref_cache_lock); |
1663 | spin_lock_init(&fs_info->fs_roots_radix_lock); | 1618 | spin_lock_init(&fs_info->fs_roots_radix_lock); |
1664 | spin_lock_init(&fs_info->delayed_iput_lock); | 1619 | spin_lock_init(&fs_info->delayed_iput_lock); |
1620 | spin_lock_init(&fs_info->defrag_inodes_lock); | ||
1665 | 1621 | ||
1666 | init_completion(&fs_info->kobj_unregister); | 1622 | init_completion(&fs_info->kobj_unregister); |
1667 | fs_info->tree_root = tree_root; | 1623 | fs_info->tree_root = tree_root; |
@@ -1684,15 +1640,35 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1684 | atomic_set(&fs_info->async_delalloc_pages, 0); | 1640 | atomic_set(&fs_info->async_delalloc_pages, 0); |
1685 | atomic_set(&fs_info->async_submit_draining, 0); | 1641 | atomic_set(&fs_info->async_submit_draining, 0); |
1686 | atomic_set(&fs_info->nr_async_bios, 0); | 1642 | atomic_set(&fs_info->nr_async_bios, 0); |
1643 | atomic_set(&fs_info->defrag_running, 0); | ||
1687 | fs_info->sb = sb; | 1644 | fs_info->sb = sb; |
1688 | fs_info->max_inline = 8192 * 1024; | 1645 | fs_info->max_inline = 8192 * 1024; |
1689 | fs_info->metadata_ratio = 0; | 1646 | fs_info->metadata_ratio = 0; |
1647 | fs_info->defrag_inodes = RB_ROOT; | ||
1690 | 1648 | ||
1691 | fs_info->thread_pool_size = min_t(unsigned long, | 1649 | fs_info->thread_pool_size = min_t(unsigned long, |
1692 | num_online_cpus() + 2, 8); | 1650 | num_online_cpus() + 2, 8); |
1693 | 1651 | ||
1694 | INIT_LIST_HEAD(&fs_info->ordered_extents); | 1652 | INIT_LIST_HEAD(&fs_info->ordered_extents); |
1695 | spin_lock_init(&fs_info->ordered_extent_lock); | 1653 | spin_lock_init(&fs_info->ordered_extent_lock); |
1654 | fs_info->delayed_root = kmalloc(sizeof(struct btrfs_delayed_root), | ||
1655 | GFP_NOFS); | ||
1656 | if (!fs_info->delayed_root) { | ||
1657 | err = -ENOMEM; | ||
1658 | goto fail_iput; | ||
1659 | } | ||
1660 | btrfs_init_delayed_root(fs_info->delayed_root); | ||
1661 | |||
1662 | mutex_init(&fs_info->scrub_lock); | ||
1663 | atomic_set(&fs_info->scrubs_running, 0); | ||
1664 | atomic_set(&fs_info->scrub_pause_req, 0); | ||
1665 | atomic_set(&fs_info->scrubs_paused, 0); | ||
1666 | atomic_set(&fs_info->scrub_cancel_req, 0); | ||
1667 | init_waitqueue_head(&fs_info->scrub_pause_wait); | ||
1668 | init_rwsem(&fs_info->scrub_super_lock); | ||
1669 | fs_info->scrub_workers_refcnt = 0; | ||
1670 | btrfs_init_workers(&fs_info->scrub_workers, "scrub", | ||
1671 | fs_info->thread_pool_size, &fs_info->generic_worker); | ||
1696 | 1672 | ||
1697 | sb->s_blocksize = 4096; | 1673 | sb->s_blocksize = 4096; |
1698 | sb->s_blocksize_bits = blksize_bits(4096); | 1674 | sb->s_blocksize_bits = blksize_bits(4096); |
@@ -1711,10 +1687,8 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1711 | 1687 | ||
1712 | RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); | 1688 | RB_CLEAR_NODE(&BTRFS_I(fs_info->btree_inode)->rb_node); |
1713 | extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, | 1689 | extent_io_tree_init(&BTRFS_I(fs_info->btree_inode)->io_tree, |
1714 | fs_info->btree_inode->i_mapping, | 1690 | fs_info->btree_inode->i_mapping); |
1715 | GFP_NOFS); | 1691 | extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree); |
1716 | extent_map_tree_init(&BTRFS_I(fs_info->btree_inode)->extent_tree, | ||
1717 | GFP_NOFS); | ||
1718 | 1692 | ||
1719 | BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; | 1693 | BTRFS_I(fs_info->btree_inode)->io_tree.ops = &btree_extent_io_ops; |
1720 | 1694 | ||
@@ -1728,9 +1702,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1728 | fs_info->block_group_cache_tree = RB_ROOT; | 1702 | fs_info->block_group_cache_tree = RB_ROOT; |
1729 | 1703 | ||
1730 | extent_io_tree_init(&fs_info->freed_extents[0], | 1704 | extent_io_tree_init(&fs_info->freed_extents[0], |
1731 | fs_info->btree_inode->i_mapping, GFP_NOFS); | 1705 | fs_info->btree_inode->i_mapping); |
1732 | extent_io_tree_init(&fs_info->freed_extents[1], | 1706 | extent_io_tree_init(&fs_info->freed_extents[1], |
1733 | fs_info->btree_inode->i_mapping, GFP_NOFS); | 1707 | fs_info->btree_inode->i_mapping); |
1734 | fs_info->pinned_extents = &fs_info->freed_extents[0]; | 1708 | fs_info->pinned_extents = &fs_info->freed_extents[0]; |
1735 | fs_info->do_barriers = 1; | 1709 | fs_info->do_barriers = 1; |
1736 | 1710 | ||
@@ -1760,7 +1734,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1760 | bh = btrfs_read_dev_super(fs_devices->latest_bdev); | 1734 | bh = btrfs_read_dev_super(fs_devices->latest_bdev); |
1761 | if (!bh) { | 1735 | if (!bh) { |
1762 | err = -EINVAL; | 1736 | err = -EINVAL; |
1763 | goto fail_iput; | 1737 | goto fail_alloc; |
1764 | } | 1738 | } |
1765 | 1739 | ||
1766 | memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy)); | 1740 | memcpy(&fs_info->super_copy, bh->b_data, sizeof(fs_info->super_copy)); |
@@ -1772,7 +1746,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1772 | 1746 | ||
1773 | disk_super = &fs_info->super_copy; | 1747 | disk_super = &fs_info->super_copy; |
1774 | if (!btrfs_super_root(disk_super)) | 1748 | if (!btrfs_super_root(disk_super)) |
1775 | goto fail_iput; | 1749 | goto fail_alloc; |
1776 | 1750 | ||
1777 | /* check FS state, whether FS is broken. */ | 1751 | /* check FS state, whether FS is broken. */ |
1778 | fs_info->fs_state |= btrfs_super_flags(disk_super); | 1752 | fs_info->fs_state |= btrfs_super_flags(disk_super); |
@@ -1788,7 +1762,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1788 | ret = btrfs_parse_options(tree_root, options); | 1762 | ret = btrfs_parse_options(tree_root, options); |
1789 | if (ret) { | 1763 | if (ret) { |
1790 | err = ret; | 1764 | err = ret; |
1791 | goto fail_iput; | 1765 | goto fail_alloc; |
1792 | } | 1766 | } |
1793 | 1767 | ||
1794 | features = btrfs_super_incompat_flags(disk_super) & | 1768 | features = btrfs_super_incompat_flags(disk_super) & |
@@ -1798,7 +1772,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1798 | "unsupported optional features (%Lx).\n", | 1772 | "unsupported optional features (%Lx).\n", |
1799 | (unsigned long long)features); | 1773 | (unsigned long long)features); |
1800 | err = -EINVAL; | 1774 | err = -EINVAL; |
1801 | goto fail_iput; | 1775 | goto fail_alloc; |
1802 | } | 1776 | } |
1803 | 1777 | ||
1804 | features = btrfs_super_incompat_flags(disk_super); | 1778 | features = btrfs_super_incompat_flags(disk_super); |
@@ -1814,7 +1788,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1814 | "unsupported option features (%Lx).\n", | 1788 | "unsupported option features (%Lx).\n", |
1815 | (unsigned long long)features); | 1789 | (unsigned long long)features); |
1816 | err = -EINVAL; | 1790 | err = -EINVAL; |
1817 | goto fail_iput; | 1791 | goto fail_alloc; |
1818 | } | 1792 | } |
1819 | 1793 | ||
1820 | btrfs_init_workers(&fs_info->generic_worker, | 1794 | btrfs_init_workers(&fs_info->generic_worker, |
@@ -1861,6 +1835,9 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1861 | &fs_info->generic_worker); | 1835 | &fs_info->generic_worker); |
1862 | btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write", | 1836 | btrfs_init_workers(&fs_info->endio_freespace_worker, "freespace-write", |
1863 | 1, &fs_info->generic_worker); | 1837 | 1, &fs_info->generic_worker); |
1838 | btrfs_init_workers(&fs_info->delayed_workers, "delayed-meta", | ||
1839 | fs_info->thread_pool_size, | ||
1840 | &fs_info->generic_worker); | ||
1864 | 1841 | ||
1865 | /* | 1842 | /* |
1866 | * endios are largely parallel and should have a very | 1843 | * endios are largely parallel and should have a very |
@@ -1882,6 +1859,7 @@ struct btrfs_root *open_ctree(struct super_block *sb, | |||
1882 | btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); | 1859 | btrfs_start_workers(&fs_info->endio_meta_write_workers, 1); |
1883 | btrfs_start_workers(&fs_info->endio_write_workers, 1); | 1860 | btrfs_start_workers(&fs_info->endio_write_workers, 1); |
1884 | btrfs_start_workers(&fs_info->endio_freespace_worker, 1); | 1861 | btrfs_start_workers(&fs_info->endio_freespace_worker, 1); |
1862 | btrfs_start_workers(&fs_info->delayed_workers, 1); | ||
1885 | 1863 | ||
1886 | fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); | 1864 | fs_info->bdi.ra_pages *= btrfs_super_num_devices(disk_super); |
1887 | fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, | 1865 | fs_info->bdi.ra_pages = max(fs_info->bdi.ra_pages, |
@@ -2138,6 +2116,9 @@ fail_sb_buffer: | |||
2138 | btrfs_stop_workers(&fs_info->endio_write_workers); | 2116 | btrfs_stop_workers(&fs_info->endio_write_workers); |
2139 | btrfs_stop_workers(&fs_info->endio_freespace_worker); | 2117 | btrfs_stop_workers(&fs_info->endio_freespace_worker); |
2140 | btrfs_stop_workers(&fs_info->submit_workers); | 2118 | btrfs_stop_workers(&fs_info->submit_workers); |
2119 | btrfs_stop_workers(&fs_info->delayed_workers); | ||
2120 | fail_alloc: | ||
2121 | kfree(fs_info->delayed_root); | ||
2141 | fail_iput: | 2122 | fail_iput: |
2142 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); | 2123 | invalidate_inode_pages2(fs_info->btree_inode->i_mapping); |
2143 | iput(fs_info->btree_inode); | 2124 | iput(fs_info->btree_inode); |
@@ -2165,11 +2146,9 @@ static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) | |||
2165 | if (uptodate) { | 2146 | if (uptodate) { |
2166 | set_buffer_uptodate(bh); | 2147 | set_buffer_uptodate(bh); |
2167 | } else { | 2148 | } else { |
2168 | if (printk_ratelimit()) { | 2149 | printk_ratelimited(KERN_WARNING "lost page write due to " |
2169 | printk(KERN_WARNING "lost page write due to " | ||
2170 | "I/O error on %s\n", | 2150 | "I/O error on %s\n", |
2171 | bdevname(bh->b_bdev, b)); | 2151 | bdevname(bh->b_bdev, b)); |
2172 | } | ||
2173 | /* note, we dont' set_buffer_write_io_error because we have | 2152 | /* note, we dont' set_buffer_write_io_error because we have |
2174 | * our own ways of dealing with the IO errors | 2153 | * our own ways of dealing with the IO errors |
2175 | */ | 2154 | */ |
@@ -2333,7 +2312,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) | |||
2333 | 2312 | ||
2334 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | 2313 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); |
2335 | head = &root->fs_info->fs_devices->devices; | 2314 | head = &root->fs_info->fs_devices->devices; |
2336 | list_for_each_entry(dev, head, dev_list) { | 2315 | list_for_each_entry_rcu(dev, head, dev_list) { |
2337 | if (!dev->bdev) { | 2316 | if (!dev->bdev) { |
2338 | total_errors++; | 2317 | total_errors++; |
2339 | continue; | 2318 | continue; |
@@ -2366,7 +2345,7 @@ int write_all_supers(struct btrfs_root *root, int max_mirrors) | |||
2366 | } | 2345 | } |
2367 | 2346 | ||
2368 | total_errors = 0; | 2347 | total_errors = 0; |
2369 | list_for_each_entry(dev, head, dev_list) { | 2348 | list_for_each_entry_rcu(dev, head, dev_list) { |
2370 | if (!dev->bdev) | 2349 | if (!dev->bdev) |
2371 | continue; | 2350 | continue; |
2372 | if (!dev->in_fs_metadata || !dev->writeable) | 2351 | if (!dev->in_fs_metadata || !dev->writeable) |
@@ -2404,12 +2383,15 @@ int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) | |||
2404 | if (btrfs_root_refs(&root->root_item) == 0) | 2383 | if (btrfs_root_refs(&root->root_item) == 0) |
2405 | synchronize_srcu(&fs_info->subvol_srcu); | 2384 | synchronize_srcu(&fs_info->subvol_srcu); |
2406 | 2385 | ||
2386 | __btrfs_remove_free_space_cache(root->free_ino_pinned); | ||
2387 | __btrfs_remove_free_space_cache(root->free_ino_ctl); | ||
2407 | free_fs_root(root); | 2388 | free_fs_root(root); |
2408 | return 0; | 2389 | return 0; |
2409 | } | 2390 | } |
2410 | 2391 | ||
2411 | static void free_fs_root(struct btrfs_root *root) | 2392 | static void free_fs_root(struct btrfs_root *root) |
2412 | { | 2393 | { |
2394 | iput(root->cache_inode); | ||
2413 | WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); | 2395 | WARN_ON(!RB_EMPTY_ROOT(&root->inode_tree)); |
2414 | if (root->anon_super.s_dev) { | 2396 | if (root->anon_super.s_dev) { |
2415 | down_write(&root->anon_super.s_umount); | 2397 | down_write(&root->anon_super.s_umount); |
@@ -2417,6 +2399,8 @@ static void free_fs_root(struct btrfs_root *root) | |||
2417 | } | 2399 | } |
2418 | free_extent_buffer(root->node); | 2400 | free_extent_buffer(root->node); |
2419 | free_extent_buffer(root->commit_root); | 2401 | free_extent_buffer(root->commit_root); |
2402 | kfree(root->free_ino_ctl); | ||
2403 | kfree(root->free_ino_pinned); | ||
2420 | kfree(root->name); | 2404 | kfree(root->name); |
2421 | kfree(root); | 2405 | kfree(root); |
2422 | } | 2406 | } |
@@ -2520,6 +2504,15 @@ int close_ctree(struct btrfs_root *root) | |||
2520 | fs_info->closing = 1; | 2504 | fs_info->closing = 1; |
2521 | smp_mb(); | 2505 | smp_mb(); |
2522 | 2506 | ||
2507 | btrfs_scrub_cancel(root); | ||
2508 | |||
2509 | /* wait for any defraggers to finish */ | ||
2510 | wait_event(fs_info->transaction_wait, | ||
2511 | (atomic_read(&fs_info->defrag_running) == 0)); | ||
2512 | |||
2513 | /* clear out the rbtree of defraggable inodes */ | ||
2514 | btrfs_run_defrag_inodes(root->fs_info); | ||
2515 | |||
2523 | btrfs_put_block_group_cache(fs_info); | 2516 | btrfs_put_block_group_cache(fs_info); |
2524 | 2517 | ||
2525 | /* | 2518 | /* |
@@ -2578,6 +2571,7 @@ int close_ctree(struct btrfs_root *root) | |||
2578 | del_fs_roots(fs_info); | 2571 | del_fs_roots(fs_info); |
2579 | 2572 | ||
2580 | iput(fs_info->btree_inode); | 2573 | iput(fs_info->btree_inode); |
2574 | kfree(fs_info->delayed_root); | ||
2581 | 2575 | ||
2582 | btrfs_stop_workers(&fs_info->generic_worker); | 2576 | btrfs_stop_workers(&fs_info->generic_worker); |
2583 | btrfs_stop_workers(&fs_info->fixup_workers); | 2577 | btrfs_stop_workers(&fs_info->fixup_workers); |
@@ -2589,6 +2583,7 @@ int close_ctree(struct btrfs_root *root) | |||
2589 | btrfs_stop_workers(&fs_info->endio_write_workers); | 2583 | btrfs_stop_workers(&fs_info->endio_write_workers); |
2590 | btrfs_stop_workers(&fs_info->endio_freespace_worker); | 2584 | btrfs_stop_workers(&fs_info->endio_freespace_worker); |
2591 | btrfs_stop_workers(&fs_info->submit_workers); | 2585 | btrfs_stop_workers(&fs_info->submit_workers); |
2586 | btrfs_stop_workers(&fs_info->delayed_workers); | ||
2592 | 2587 | ||
2593 | btrfs_close_devices(fs_info->fs_devices); | 2588 | btrfs_close_devices(fs_info->fs_devices); |
2594 | btrfs_mapping_tree_free(&fs_info->mapping_tree); | 2589 | btrfs_mapping_tree_free(&fs_info->mapping_tree); |
@@ -2665,6 +2660,29 @@ void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) | |||
2665 | if (current->flags & PF_MEMALLOC) | 2660 | if (current->flags & PF_MEMALLOC) |
2666 | return; | 2661 | return; |
2667 | 2662 | ||
2663 | btrfs_balance_delayed_items(root); | ||
2664 | |||
2665 | num_dirty = root->fs_info->dirty_metadata_bytes; | ||
2666 | |||
2667 | if (num_dirty > thresh) { | ||
2668 | balance_dirty_pages_ratelimited_nr( | ||
2669 | root->fs_info->btree_inode->i_mapping, 1); | ||
2670 | } | ||
2671 | return; | ||
2672 | } | ||
2673 | |||
2674 | void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr) | ||
2675 | { | ||
2676 | /* | ||
2677 | * looks as though older kernels can get into trouble with | ||
2678 | * this code, they end up stuck in balance_dirty_pages forever | ||
2679 | */ | ||
2680 | u64 num_dirty; | ||
2681 | unsigned long thresh = 32 * 1024 * 1024; | ||
2682 | |||
2683 | if (current->flags & PF_MEMALLOC) | ||
2684 | return; | ||
2685 | |||
2668 | num_dirty = root->fs_info->dirty_metadata_bytes; | 2686 | num_dirty = root->fs_info->dirty_metadata_bytes; |
2669 | 2687 | ||
2670 | if (num_dirty > thresh) { | 2688 | if (num_dirty > thresh) { |
@@ -2697,7 +2715,7 @@ int btree_lock_page_hook(struct page *page) | |||
2697 | goto out; | 2715 | goto out; |
2698 | 2716 | ||
2699 | len = page->private >> 2; | 2717 | len = page->private >> 2; |
2700 | eb = find_extent_buffer(io_tree, bytenr, len, GFP_NOFS); | 2718 | eb = find_extent_buffer(io_tree, bytenr, len); |
2701 | if (!eb) | 2719 | if (!eb) |
2702 | goto out; | 2720 | goto out; |
2703 | 2721 | ||
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h index 07b20dc2fd95..a0b610a67aae 100644 --- a/fs/btrfs/disk-io.h +++ b/fs/btrfs/disk-io.h | |||
@@ -55,35 +55,20 @@ int btrfs_commit_super(struct btrfs_root *root); | |||
55 | int btrfs_error_commit_super(struct btrfs_root *root); | 55 | int btrfs_error_commit_super(struct btrfs_root *root); |
56 | struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, | 56 | struct extent_buffer *btrfs_find_tree_block(struct btrfs_root *root, |
57 | u64 bytenr, u32 blocksize); | 57 | u64 bytenr, u32 blocksize); |
58 | struct btrfs_root *btrfs_lookup_fs_root(struct btrfs_fs_info *fs_info, | ||
59 | u64 root_objectid); | ||
60 | struct btrfs_root *btrfs_read_fs_root(struct btrfs_fs_info *fs_info, | ||
61 | struct btrfs_key *location, | ||
62 | const char *name, int namelen); | ||
63 | struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, | 58 | struct btrfs_root *btrfs_read_fs_root_no_radix(struct btrfs_root *tree_root, |
64 | struct btrfs_key *location); | 59 | struct btrfs_key *location); |
65 | struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, | 60 | struct btrfs_root *btrfs_read_fs_root_no_name(struct btrfs_fs_info *fs_info, |
66 | struct btrfs_key *location); | 61 | struct btrfs_key *location); |
67 | int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); | 62 | int btrfs_cleanup_fs_roots(struct btrfs_fs_info *fs_info); |
68 | int btrfs_insert_dev_radix(struct btrfs_root *root, | ||
69 | struct block_device *bdev, | ||
70 | u64 device_id, | ||
71 | u64 block_start, | ||
72 | u64 num_blocks); | ||
73 | void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); | 63 | void btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); |
64 | void __btrfs_btree_balance_dirty(struct btrfs_root *root, unsigned long nr); | ||
74 | int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); | 65 | int btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root); |
75 | void btrfs_mark_buffer_dirty(struct extent_buffer *buf); | 66 | void btrfs_mark_buffer_dirty(struct extent_buffer *buf); |
76 | void btrfs_mark_buffer_dirty_nonblocking(struct extent_buffer *buf); | ||
77 | int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid); | 67 | int btrfs_buffer_uptodate(struct extent_buffer *buf, u64 parent_transid); |
78 | int btrfs_set_buffer_uptodate(struct extent_buffer *buf); | 68 | int btrfs_set_buffer_uptodate(struct extent_buffer *buf); |
79 | int wait_on_tree_block_writeback(struct btrfs_root *root, | ||
80 | struct extent_buffer *buf); | ||
81 | int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); | 69 | int btrfs_read_buffer(struct extent_buffer *buf, u64 parent_transid); |
82 | u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len); | 70 | u32 btrfs_csum_data(struct btrfs_root *root, char *data, u32 seed, size_t len); |
83 | void btrfs_csum_final(u32 crc, char *result); | 71 | void btrfs_csum_final(u32 crc, char *result); |
84 | int btrfs_open_device(struct btrfs_device *dev); | ||
85 | int btrfs_verify_block_csum(struct btrfs_root *root, | ||
86 | struct extent_buffer *buf); | ||
87 | int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, | 72 | int btrfs_bio_wq_end_io(struct btrfs_fs_info *info, struct bio *bio, |
88 | int metadata); | 73 | int metadata); |
89 | int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | 74 | int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, |
@@ -91,8 +76,6 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode, | |||
91 | unsigned long bio_flags, u64 bio_offset, | 76 | unsigned long bio_flags, u64 bio_offset, |
92 | extent_submit_bio_hook_t *submit_bio_start, | 77 | extent_submit_bio_hook_t *submit_bio_start, |
93 | extent_submit_bio_hook_t *submit_bio_done); | 78 | extent_submit_bio_hook_t *submit_bio_done); |
94 | |||
95 | int btrfs_congested_async(struct btrfs_fs_info *info, int iodone); | ||
96 | unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); | 79 | unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info); |
97 | int btrfs_write_tree_block(struct extent_buffer *buf); | 80 | int btrfs_write_tree_block(struct extent_buffer *buf); |
98 | int btrfs_wait_tree_block_writeback(struct extent_buffer *buf); | 81 | int btrfs_wait_tree_block_writeback(struct extent_buffer *buf); |
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c index b4ffad859adb..1b8dc33778f9 100644 --- a/fs/btrfs/export.c +++ b/fs/btrfs/export.c | |||
@@ -32,7 +32,7 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, | |||
32 | len = BTRFS_FID_SIZE_NON_CONNECTABLE; | 32 | len = BTRFS_FID_SIZE_NON_CONNECTABLE; |
33 | type = FILEID_BTRFS_WITHOUT_PARENT; | 33 | type = FILEID_BTRFS_WITHOUT_PARENT; |
34 | 34 | ||
35 | fid->objectid = inode->i_ino; | 35 | fid->objectid = btrfs_ino(inode); |
36 | fid->root_objectid = BTRFS_I(inode)->root->objectid; | 36 | fid->root_objectid = BTRFS_I(inode)->root->objectid; |
37 | fid->gen = inode->i_generation; | 37 | fid->gen = inode->i_generation; |
38 | 38 | ||
@@ -178,13 +178,13 @@ static struct dentry *btrfs_get_parent(struct dentry *child) | |||
178 | if (!path) | 178 | if (!path) |
179 | return ERR_PTR(-ENOMEM); | 179 | return ERR_PTR(-ENOMEM); |
180 | 180 | ||
181 | if (dir->i_ino == BTRFS_FIRST_FREE_OBJECTID) { | 181 | if (btrfs_ino(dir) == BTRFS_FIRST_FREE_OBJECTID) { |
182 | key.objectid = root->root_key.objectid; | 182 | key.objectid = root->root_key.objectid; |
183 | key.type = BTRFS_ROOT_BACKREF_KEY; | 183 | key.type = BTRFS_ROOT_BACKREF_KEY; |
184 | key.offset = (u64)-1; | 184 | key.offset = (u64)-1; |
185 | root = root->fs_info->tree_root; | 185 | root = root->fs_info->tree_root; |
186 | } else { | 186 | } else { |
187 | key.objectid = dir->i_ino; | 187 | key.objectid = btrfs_ino(dir); |
188 | key.type = BTRFS_INODE_REF_KEY; | 188 | key.type = BTRFS_INODE_REF_KEY; |
189 | key.offset = (u64)-1; | 189 | key.offset = (u64)-1; |
190 | } | 190 | } |
@@ -244,6 +244,7 @@ static int btrfs_get_name(struct dentry *parent, char *name, | |||
244 | struct btrfs_key key; | 244 | struct btrfs_key key; |
245 | int name_len; | 245 | int name_len; |
246 | int ret; | 246 | int ret; |
247 | u64 ino; | ||
247 | 248 | ||
248 | if (!dir || !inode) | 249 | if (!dir || !inode) |
249 | return -EINVAL; | 250 | return -EINVAL; |
@@ -251,19 +252,21 @@ static int btrfs_get_name(struct dentry *parent, char *name, | |||
251 | if (!S_ISDIR(dir->i_mode)) | 252 | if (!S_ISDIR(dir->i_mode)) |
252 | return -EINVAL; | 253 | return -EINVAL; |
253 | 254 | ||
255 | ino = btrfs_ino(inode); | ||
256 | |||
254 | path = btrfs_alloc_path(); | 257 | path = btrfs_alloc_path(); |
255 | if (!path) | 258 | if (!path) |
256 | return -ENOMEM; | 259 | return -ENOMEM; |
257 | path->leave_spinning = 1; | 260 | path->leave_spinning = 1; |
258 | 261 | ||
259 | if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { | 262 | if (ino == BTRFS_FIRST_FREE_OBJECTID) { |
260 | key.objectid = BTRFS_I(inode)->root->root_key.objectid; | 263 | key.objectid = BTRFS_I(inode)->root->root_key.objectid; |
261 | key.type = BTRFS_ROOT_BACKREF_KEY; | 264 | key.type = BTRFS_ROOT_BACKREF_KEY; |
262 | key.offset = (u64)-1; | 265 | key.offset = (u64)-1; |
263 | root = root->fs_info->tree_root; | 266 | root = root->fs_info->tree_root; |
264 | } else { | 267 | } else { |
265 | key.objectid = inode->i_ino; | 268 | key.objectid = ino; |
266 | key.offset = dir->i_ino; | 269 | key.offset = btrfs_ino(dir); |
267 | key.type = BTRFS_INODE_REF_KEY; | 270 | key.type = BTRFS_INODE_REF_KEY; |
268 | } | 271 | } |
269 | 272 | ||
@@ -272,7 +275,7 @@ static int btrfs_get_name(struct dentry *parent, char *name, | |||
272 | btrfs_free_path(path); | 275 | btrfs_free_path(path); |
273 | return ret; | 276 | return ret; |
274 | } else if (ret > 0) { | 277 | } else if (ret > 0) { |
275 | if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { | 278 | if (ino == BTRFS_FIRST_FREE_OBJECTID) { |
276 | path->slots[0]--; | 279 | path->slots[0]--; |
277 | } else { | 280 | } else { |
278 | btrfs_free_path(path); | 281 | btrfs_free_path(path); |
@@ -281,11 +284,11 @@ static int btrfs_get_name(struct dentry *parent, char *name, | |||
281 | } | 284 | } |
282 | leaf = path->nodes[0]; | 285 | leaf = path->nodes[0]; |
283 | 286 | ||
284 | if (inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) { | 287 | if (ino == BTRFS_FIRST_FREE_OBJECTID) { |
285 | rref = btrfs_item_ptr(leaf, path->slots[0], | 288 | rref = btrfs_item_ptr(leaf, path->slots[0], |
286 | struct btrfs_root_ref); | 289 | struct btrfs_root_ref); |
287 | name_ptr = (unsigned long)(rref + 1); | 290 | name_ptr = (unsigned long)(rref + 1); |
288 | name_len = btrfs_root_ref_name_len(leaf, rref); | 291 | name_len = btrfs_root_ref_name_len(leaf, rref); |
289 | } else { | 292 | } else { |
290 | iref = btrfs_item_ptr(leaf, path->slots[0], | 293 | iref = btrfs_item_ptr(leaf, path->slots[0], |
291 | struct btrfs_inode_ref); | 294 | struct btrfs_inode_ref); |
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c index 9ee6bd55e16c..169bd62ce776 100644 --- a/fs/btrfs/extent-tree.c +++ b/fs/btrfs/extent-tree.c | |||
@@ -94,7 +94,7 @@ static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) | |||
94 | return (cache->flags & bits) == bits; | 94 | return (cache->flags & bits) == bits; |
95 | } | 95 | } |
96 | 96 | ||
97 | void btrfs_get_block_group(struct btrfs_block_group_cache *cache) | 97 | static void btrfs_get_block_group(struct btrfs_block_group_cache *cache) |
98 | { | 98 | { |
99 | atomic_inc(&cache->count); | 99 | atomic_inc(&cache->count); |
100 | } | 100 | } |
@@ -105,6 +105,7 @@ void btrfs_put_block_group(struct btrfs_block_group_cache *cache) | |||
105 | WARN_ON(cache->pinned > 0); | 105 | WARN_ON(cache->pinned > 0); |
106 | WARN_ON(cache->reserved > 0); | 106 | WARN_ON(cache->reserved > 0); |
107 | WARN_ON(cache->reserved_pinned > 0); | 107 | WARN_ON(cache->reserved_pinned > 0); |
108 | kfree(cache->free_space_ctl); | ||
108 | kfree(cache); | 109 | kfree(cache); |
109 | } | 110 | } |
110 | } | 111 | } |
@@ -379,7 +380,7 @@ again: | |||
379 | break; | 380 | break; |
380 | 381 | ||
381 | caching_ctl->progress = last; | 382 | caching_ctl->progress = last; |
382 | btrfs_release_path(extent_root, path); | 383 | btrfs_release_path(path); |
383 | up_read(&fs_info->extent_commit_sem); | 384 | up_read(&fs_info->extent_commit_sem); |
384 | mutex_unlock(&caching_ctl->mutex); | 385 | mutex_unlock(&caching_ctl->mutex); |
385 | if (btrfs_transaction_in_commit(fs_info)) | 386 | if (btrfs_transaction_in_commit(fs_info)) |
@@ -754,8 +755,12 @@ again: | |||
754 | atomic_inc(&head->node.refs); | 755 | atomic_inc(&head->node.refs); |
755 | spin_unlock(&delayed_refs->lock); | 756 | spin_unlock(&delayed_refs->lock); |
756 | 757 | ||
757 | btrfs_release_path(root->fs_info->extent_root, path); | 758 | btrfs_release_path(path); |
758 | 759 | ||
760 | /* | ||
761 | * Mutex was contended, block until it's released and try | ||
762 | * again | ||
763 | */ | ||
759 | mutex_lock(&head->mutex); | 764 | mutex_lock(&head->mutex); |
760 | mutex_unlock(&head->mutex); | 765 | mutex_unlock(&head->mutex); |
761 | btrfs_put_delayed_ref(&head->node); | 766 | btrfs_put_delayed_ref(&head->node); |
@@ -934,7 +939,7 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans, | |||
934 | break; | 939 | break; |
935 | } | 940 | } |
936 | } | 941 | } |
937 | btrfs_release_path(root, path); | 942 | btrfs_release_path(path); |
938 | 943 | ||
939 | if (owner < BTRFS_FIRST_FREE_OBJECTID) | 944 | if (owner < BTRFS_FIRST_FREE_OBJECTID) |
940 | new_size += sizeof(*bi); | 945 | new_size += sizeof(*bi); |
@@ -947,7 +952,6 @@ static int convert_extent_item_v0(struct btrfs_trans_handle *trans, | |||
947 | BUG_ON(ret); | 952 | BUG_ON(ret); |
948 | 953 | ||
949 | ret = btrfs_extend_item(trans, root, path, new_size); | 954 | ret = btrfs_extend_item(trans, root, path, new_size); |
950 | BUG_ON(ret); | ||
951 | 955 | ||
952 | leaf = path->nodes[0]; | 956 | leaf = path->nodes[0]; |
953 | item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); | 957 | item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
@@ -1042,7 +1046,7 @@ again: | |||
1042 | return 0; | 1046 | return 0; |
1043 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | 1047 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 |
1044 | key.type = BTRFS_EXTENT_REF_V0_KEY; | 1048 | key.type = BTRFS_EXTENT_REF_V0_KEY; |
1045 | btrfs_release_path(root, path); | 1049 | btrfs_release_path(path); |
1046 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | 1050 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
1047 | if (ret < 0) { | 1051 | if (ret < 0) { |
1048 | err = ret; | 1052 | err = ret; |
@@ -1080,7 +1084,7 @@ again: | |||
1080 | if (match_extent_data_ref(leaf, ref, root_objectid, | 1084 | if (match_extent_data_ref(leaf, ref, root_objectid, |
1081 | owner, offset)) { | 1085 | owner, offset)) { |
1082 | if (recow) { | 1086 | if (recow) { |
1083 | btrfs_release_path(root, path); | 1087 | btrfs_release_path(path); |
1084 | goto again; | 1088 | goto again; |
1085 | } | 1089 | } |
1086 | err = 0; | 1090 | err = 0; |
@@ -1141,7 +1145,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, | |||
1141 | if (match_extent_data_ref(leaf, ref, root_objectid, | 1145 | if (match_extent_data_ref(leaf, ref, root_objectid, |
1142 | owner, offset)) | 1146 | owner, offset)) |
1143 | break; | 1147 | break; |
1144 | btrfs_release_path(root, path); | 1148 | btrfs_release_path(path); |
1145 | key.offset++; | 1149 | key.offset++; |
1146 | ret = btrfs_insert_empty_item(trans, root, path, &key, | 1150 | ret = btrfs_insert_empty_item(trans, root, path, &key, |
1147 | size); | 1151 | size); |
@@ -1167,7 +1171,7 @@ static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans, | |||
1167 | btrfs_mark_buffer_dirty(leaf); | 1171 | btrfs_mark_buffer_dirty(leaf); |
1168 | ret = 0; | 1172 | ret = 0; |
1169 | fail: | 1173 | fail: |
1170 | btrfs_release_path(root, path); | 1174 | btrfs_release_path(path); |
1171 | return ret; | 1175 | return ret; |
1172 | } | 1176 | } |
1173 | 1177 | ||
@@ -1293,7 +1297,7 @@ static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans, | |||
1293 | ret = -ENOENT; | 1297 | ret = -ENOENT; |
1294 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 | 1298 | #ifdef BTRFS_COMPAT_EXTENT_TREE_V0 |
1295 | if (ret == -ENOENT && parent) { | 1299 | if (ret == -ENOENT && parent) { |
1296 | btrfs_release_path(root, path); | 1300 | btrfs_release_path(path); |
1297 | key.type = BTRFS_EXTENT_REF_V0_KEY; | 1301 | key.type = BTRFS_EXTENT_REF_V0_KEY; |
1298 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | 1302 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
1299 | if (ret > 0) | 1303 | if (ret > 0) |
@@ -1322,7 +1326,7 @@ static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans, | |||
1322 | } | 1326 | } |
1323 | 1327 | ||
1324 | ret = btrfs_insert_empty_item(trans, root, path, &key, 0); | 1328 | ret = btrfs_insert_empty_item(trans, root, path, &key, 0); |
1325 | btrfs_release_path(root, path); | 1329 | btrfs_release_path(path); |
1326 | return ret; | 1330 | return ret; |
1327 | } | 1331 | } |
1328 | 1332 | ||
@@ -1555,7 +1559,6 @@ int setup_inline_extent_backref(struct btrfs_trans_handle *trans, | |||
1555 | size = btrfs_extent_inline_ref_size(type); | 1559 | size = btrfs_extent_inline_ref_size(type); |
1556 | 1560 | ||
1557 | ret = btrfs_extend_item(trans, root, path, size); | 1561 | ret = btrfs_extend_item(trans, root, path, size); |
1558 | BUG_ON(ret); | ||
1559 | 1562 | ||
1560 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); | 1563 | ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item); |
1561 | refs = btrfs_extent_refs(leaf, ei); | 1564 | refs = btrfs_extent_refs(leaf, ei); |
@@ -1608,7 +1611,7 @@ static int lookup_extent_backref(struct btrfs_trans_handle *trans, | |||
1608 | if (ret != -ENOENT) | 1611 | if (ret != -ENOENT) |
1609 | return ret; | 1612 | return ret; |
1610 | 1613 | ||
1611 | btrfs_release_path(root, path); | 1614 | btrfs_release_path(path); |
1612 | *ref_ret = NULL; | 1615 | *ref_ret = NULL; |
1613 | 1616 | ||
1614 | if (owner < BTRFS_FIRST_FREE_OBJECTID) { | 1617 | if (owner < BTRFS_FIRST_FREE_OBJECTID) { |
@@ -1684,7 +1687,6 @@ int update_inline_extent_backref(struct btrfs_trans_handle *trans, | |||
1684 | end - ptr - size); | 1687 | end - ptr - size); |
1685 | item_size -= size; | 1688 | item_size -= size; |
1686 | ret = btrfs_truncate_item(trans, root, path, item_size, 1); | 1689 | ret = btrfs_truncate_item(trans, root, path, item_size, 1); |
1687 | BUG_ON(ret); | ||
1688 | } | 1690 | } |
1689 | btrfs_mark_buffer_dirty(leaf); | 1691 | btrfs_mark_buffer_dirty(leaf); |
1690 | return 0; | 1692 | return 0; |
@@ -1862,7 +1864,7 @@ static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, | |||
1862 | __run_delayed_extent_op(extent_op, leaf, item); | 1864 | __run_delayed_extent_op(extent_op, leaf, item); |
1863 | 1865 | ||
1864 | btrfs_mark_buffer_dirty(leaf); | 1866 | btrfs_mark_buffer_dirty(leaf); |
1865 | btrfs_release_path(root->fs_info->extent_root, path); | 1867 | btrfs_release_path(path); |
1866 | 1868 | ||
1867 | path->reada = 1; | 1869 | path->reada = 1; |
1868 | path->leave_spinning = 1; | 1870 | path->leave_spinning = 1; |
@@ -2297,6 +2299,10 @@ again: | |||
2297 | atomic_inc(&ref->refs); | 2299 | atomic_inc(&ref->refs); |
2298 | 2300 | ||
2299 | spin_unlock(&delayed_refs->lock); | 2301 | spin_unlock(&delayed_refs->lock); |
2302 | /* | ||
2303 | * Mutex was contended, block until it's | ||
2304 | * released and try again | ||
2305 | */ | ||
2300 | mutex_lock(&head->mutex); | 2306 | mutex_lock(&head->mutex); |
2301 | mutex_unlock(&head->mutex); | 2307 | mutex_unlock(&head->mutex); |
2302 | 2308 | ||
@@ -2361,8 +2367,12 @@ static noinline int check_delayed_ref(struct btrfs_trans_handle *trans, | |||
2361 | atomic_inc(&head->node.refs); | 2367 | atomic_inc(&head->node.refs); |
2362 | spin_unlock(&delayed_refs->lock); | 2368 | spin_unlock(&delayed_refs->lock); |
2363 | 2369 | ||
2364 | btrfs_release_path(root->fs_info->extent_root, path); | 2370 | btrfs_release_path(path); |
2365 | 2371 | ||
2372 | /* | ||
2373 | * Mutex was contended, block until it's released and let | ||
2374 | * caller try again | ||
2375 | */ | ||
2366 | mutex_lock(&head->mutex); | 2376 | mutex_lock(&head->mutex); |
2367 | mutex_unlock(&head->mutex); | 2377 | mutex_unlock(&head->mutex); |
2368 | btrfs_put_delayed_ref(&head->node); | 2378 | btrfs_put_delayed_ref(&head->node); |
@@ -2510,126 +2520,6 @@ out: | |||
2510 | return ret; | 2520 | return ret; |
2511 | } | 2521 | } |
2512 | 2522 | ||
2513 | #if 0 | ||
2514 | int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root, | ||
2515 | struct extent_buffer *buf, u32 nr_extents) | ||
2516 | { | ||
2517 | struct btrfs_key key; | ||
2518 | struct btrfs_file_extent_item *fi; | ||
2519 | u64 root_gen; | ||
2520 | u32 nritems; | ||
2521 | int i; | ||
2522 | int level; | ||
2523 | int ret = 0; | ||
2524 | int shared = 0; | ||
2525 | |||
2526 | if (!root->ref_cows) | ||
2527 | return 0; | ||
2528 | |||
2529 | if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) { | ||
2530 | shared = 0; | ||
2531 | root_gen = root->root_key.offset; | ||
2532 | } else { | ||
2533 | shared = 1; | ||
2534 | root_gen = trans->transid - 1; | ||
2535 | } | ||
2536 | |||
2537 | level = btrfs_header_level(buf); | ||
2538 | nritems = btrfs_header_nritems(buf); | ||
2539 | |||
2540 | if (level == 0) { | ||
2541 | struct btrfs_leaf_ref *ref; | ||
2542 | struct btrfs_extent_info *info; | ||
2543 | |||
2544 | ref = btrfs_alloc_leaf_ref(root, nr_extents); | ||
2545 | if (!ref) { | ||
2546 | ret = -ENOMEM; | ||
2547 | goto out; | ||
2548 | } | ||
2549 | |||
2550 | ref->root_gen = root_gen; | ||
2551 | ref->bytenr = buf->start; | ||
2552 | ref->owner = btrfs_header_owner(buf); | ||
2553 | ref->generation = btrfs_header_generation(buf); | ||
2554 | ref->nritems = nr_extents; | ||
2555 | info = ref->extents; | ||
2556 | |||
2557 | for (i = 0; nr_extents > 0 && i < nritems; i++) { | ||
2558 | u64 disk_bytenr; | ||
2559 | btrfs_item_key_to_cpu(buf, &key, i); | ||
2560 | if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) | ||
2561 | continue; | ||
2562 | fi = btrfs_item_ptr(buf, i, | ||
2563 | struct btrfs_file_extent_item); | ||
2564 | if (btrfs_file_extent_type(buf, fi) == | ||
2565 | BTRFS_FILE_EXTENT_INLINE) | ||
2566 | continue; | ||
2567 | disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi); | ||
2568 | if (disk_bytenr == 0) | ||
2569 | continue; | ||
2570 | |||
2571 | info->bytenr = disk_bytenr; | ||
2572 | info->num_bytes = | ||
2573 | btrfs_file_extent_disk_num_bytes(buf, fi); | ||
2574 | info->objectid = key.objectid; | ||
2575 | info->offset = key.offset; | ||
2576 | info++; | ||
2577 | } | ||
2578 | |||
2579 | ret = btrfs_add_leaf_ref(root, ref, shared); | ||
2580 | if (ret == -EEXIST && shared) { | ||
2581 | struct btrfs_leaf_ref *old; | ||
2582 | old = btrfs_lookup_leaf_ref(root, ref->bytenr); | ||
2583 | BUG_ON(!old); | ||
2584 | btrfs_remove_leaf_ref(root, old); | ||
2585 | btrfs_free_leaf_ref(root, old); | ||
2586 | ret = btrfs_add_leaf_ref(root, ref, shared); | ||
2587 | } | ||
2588 | WARN_ON(ret); | ||
2589 | btrfs_free_leaf_ref(root, ref); | ||
2590 | } | ||
2591 | out: | ||
2592 | return ret; | ||
2593 | } | ||
2594 | |||
2595 | /* when a block goes through cow, we update the reference counts of | ||
2596 | * everything that block points to. The internal pointers of the block | ||
2597 | * can be in just about any order, and it is likely to have clusters of | ||
2598 | * things that are close together and clusters of things that are not. | ||
2599 | * | ||
2600 | * To help reduce the seeks that come with updating all of these reference | ||
2601 | * counts, sort them by byte number before actual updates are done. | ||
2602 | * | ||
2603 | * struct refsort is used to match byte number to slot in the btree block. | ||
2604 | * we sort based on the byte number and then use the slot to actually | ||
2605 | * find the item. | ||
2606 | * | ||
2607 | * struct refsort is smaller than strcut btrfs_item and smaller than | ||
2608 | * struct btrfs_key_ptr. Since we're currently limited to the page size | ||
2609 | * for a btree block, there's no way for a kmalloc of refsorts for a | ||
2610 | * single node to be bigger than a page. | ||
2611 | */ | ||
2612 | struct refsort { | ||
2613 | u64 bytenr; | ||
2614 | u32 slot; | ||
2615 | }; | ||
2616 | |||
2617 | /* | ||
2618 | * for passing into sort() | ||
2619 | */ | ||
2620 | static int refsort_cmp(const void *a_void, const void *b_void) | ||
2621 | { | ||
2622 | const struct refsort *a = a_void; | ||
2623 | const struct refsort *b = b_void; | ||
2624 | |||
2625 | if (a->bytenr < b->bytenr) | ||
2626 | return -1; | ||
2627 | if (a->bytenr > b->bytenr) | ||
2628 | return 1; | ||
2629 | return 0; | ||
2630 | } | ||
2631 | #endif | ||
2632 | |||
2633 | static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, | 2523 | static int __btrfs_mod_ref(struct btrfs_trans_handle *trans, |
2634 | struct btrfs_root *root, | 2524 | struct btrfs_root *root, |
2635 | struct extent_buffer *buf, | 2525 | struct extent_buffer *buf, |
@@ -2732,7 +2622,7 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans, | |||
2732 | bi = btrfs_item_ptr_offset(leaf, path->slots[0]); | 2622 | bi = btrfs_item_ptr_offset(leaf, path->slots[0]); |
2733 | write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); | 2623 | write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item)); |
2734 | btrfs_mark_buffer_dirty(leaf); | 2624 | btrfs_mark_buffer_dirty(leaf); |
2735 | btrfs_release_path(extent_root, path); | 2625 | btrfs_release_path(path); |
2736 | fail: | 2626 | fail: |
2737 | if (ret) | 2627 | if (ret) |
2738 | return ret; | 2628 | return ret; |
@@ -2785,7 +2675,7 @@ again: | |||
2785 | inode = lookup_free_space_inode(root, block_group, path); | 2675 | inode = lookup_free_space_inode(root, block_group, path); |
2786 | if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { | 2676 | if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { |
2787 | ret = PTR_ERR(inode); | 2677 | ret = PTR_ERR(inode); |
2788 | btrfs_release_path(root, path); | 2678 | btrfs_release_path(path); |
2789 | goto out; | 2679 | goto out; |
2790 | } | 2680 | } |
2791 | 2681 | ||
@@ -2854,7 +2744,7 @@ again: | |||
2854 | out_put: | 2744 | out_put: |
2855 | iput(inode); | 2745 | iput(inode); |
2856 | out_free: | 2746 | out_free: |
2857 | btrfs_release_path(root, path); | 2747 | btrfs_release_path(path); |
2858 | out: | 2748 | out: |
2859 | spin_lock(&block_group->lock); | 2749 | spin_lock(&block_group->lock); |
2860 | block_group->disk_cache_state = dcs; | 2750 | block_group->disk_cache_state = dcs; |
@@ -3144,7 +3034,8 @@ int btrfs_check_data_free_space(struct inode *inode, u64 bytes) | |||
3144 | /* make sure bytes are sectorsize aligned */ | 3034 | /* make sure bytes are sectorsize aligned */ |
3145 | bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); | 3035 | bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1); |
3146 | 3036 | ||
3147 | if (root == root->fs_info->tree_root) { | 3037 | if (root == root->fs_info->tree_root || |
3038 | BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) { | ||
3148 | alloc_chunk = 0; | 3039 | alloc_chunk = 0; |
3149 | committed = 1; | 3040 | committed = 1; |
3150 | } | 3041 | } |
@@ -3211,18 +3102,6 @@ commit_trans: | |||
3211 | goto again; | 3102 | goto again; |
3212 | } | 3103 | } |
3213 | 3104 | ||
3214 | #if 0 /* I hope we never need this code again, just in case */ | ||
3215 | printk(KERN_ERR "no space left, need %llu, %llu bytes_used, " | ||
3216 | "%llu bytes_reserved, " "%llu bytes_pinned, " | ||
3217 | "%llu bytes_readonly, %llu may use %llu total\n", | ||
3218 | (unsigned long long)bytes, | ||
3219 | (unsigned long long)data_sinfo->bytes_used, | ||
3220 | (unsigned long long)data_sinfo->bytes_reserved, | ||
3221 | (unsigned long long)data_sinfo->bytes_pinned, | ||
3222 | (unsigned long long)data_sinfo->bytes_readonly, | ||
3223 | (unsigned long long)data_sinfo->bytes_may_use, | ||
3224 | (unsigned long long)data_sinfo->total_bytes); | ||
3225 | #endif | ||
3226 | return -ENOSPC; | 3105 | return -ENOSPC; |
3227 | } | 3106 | } |
3228 | data_sinfo->bytes_may_use += bytes; | 3107 | data_sinfo->bytes_may_use += bytes; |
@@ -3425,6 +3304,10 @@ static int shrink_delalloc(struct btrfs_trans_handle *trans, | |||
3425 | if (reserved == 0) | 3304 | if (reserved == 0) |
3426 | return 0; | 3305 | return 0; |
3427 | 3306 | ||
3307 | /* nothing to shrink - nothing to reclaim */ | ||
3308 | if (root->fs_info->delalloc_bytes == 0) | ||
3309 | return 0; | ||
3310 | |||
3428 | max_reclaim = min(reserved, to_reclaim); | 3311 | max_reclaim = min(reserved, to_reclaim); |
3429 | 3312 | ||
3430 | while (loops < 1024) { | 3313 | while (loops < 1024) { |
@@ -3651,8 +3534,8 @@ static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv, | |||
3651 | spin_unlock(&block_rsv->lock); | 3534 | spin_unlock(&block_rsv->lock); |
3652 | } | 3535 | } |
3653 | 3536 | ||
3654 | void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, | 3537 | static void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv, |
3655 | struct btrfs_block_rsv *dest, u64 num_bytes) | 3538 | struct btrfs_block_rsv *dest, u64 num_bytes) |
3656 | { | 3539 | { |
3657 | struct btrfs_space_info *space_info = block_rsv->space_info; | 3540 | struct btrfs_space_info *space_info = block_rsv->space_info; |
3658 | 3541 | ||
@@ -3855,23 +3738,7 @@ static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info) | |||
3855 | u64 meta_used; | 3738 | u64 meta_used; |
3856 | u64 data_used; | 3739 | u64 data_used; |
3857 | int csum_size = btrfs_super_csum_size(&fs_info->super_copy); | 3740 | int csum_size = btrfs_super_csum_size(&fs_info->super_copy); |
3858 | #if 0 | ||
3859 | /* | ||
3860 | * per tree used space accounting can be inaccuracy, so we | ||
3861 | * can't rely on it. | ||
3862 | */ | ||
3863 | spin_lock(&fs_info->extent_root->accounting_lock); | ||
3864 | num_bytes = btrfs_root_used(&fs_info->extent_root->root_item); | ||
3865 | spin_unlock(&fs_info->extent_root->accounting_lock); | ||
3866 | |||
3867 | spin_lock(&fs_info->csum_root->accounting_lock); | ||
3868 | num_bytes += btrfs_root_used(&fs_info->csum_root->root_item); | ||
3869 | spin_unlock(&fs_info->csum_root->accounting_lock); | ||
3870 | 3741 | ||
3871 | spin_lock(&fs_info->tree_root->accounting_lock); | ||
3872 | num_bytes += btrfs_root_used(&fs_info->tree_root->root_item); | ||
3873 | spin_unlock(&fs_info->tree_root->accounting_lock); | ||
3874 | #endif | ||
3875 | sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); | 3742 | sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA); |
3876 | spin_lock(&sinfo->lock); | 3743 | spin_lock(&sinfo->lock); |
3877 | data_used = sinfo->bytes_used; | 3744 | data_used = sinfo->bytes_used; |
@@ -3924,10 +3791,7 @@ static void update_global_block_rsv(struct btrfs_fs_info *fs_info) | |||
3924 | block_rsv->reserved = block_rsv->size; | 3791 | block_rsv->reserved = block_rsv->size; |
3925 | block_rsv->full = 1; | 3792 | block_rsv->full = 1; |
3926 | } | 3793 | } |
3927 | #if 0 | 3794 | |
3928 | printk(KERN_INFO"global block rsv size %llu reserved %llu\n", | ||
3929 | block_rsv->size, block_rsv->reserved); | ||
3930 | #endif | ||
3931 | spin_unlock(&sinfo->lock); | 3795 | spin_unlock(&sinfo->lock); |
3932 | spin_unlock(&block_rsv->lock); | 3796 | spin_unlock(&block_rsv->lock); |
3933 | } | 3797 | } |
@@ -3973,12 +3837,6 @@ static void release_global_block_rsv(struct btrfs_fs_info *fs_info) | |||
3973 | WARN_ON(fs_info->chunk_block_rsv.reserved > 0); | 3837 | WARN_ON(fs_info->chunk_block_rsv.reserved > 0); |
3974 | } | 3838 | } |
3975 | 3839 | ||
3976 | static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items) | ||
3977 | { | ||
3978 | return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) * | ||
3979 | 3 * num_items; | ||
3980 | } | ||
3981 | |||
3982 | int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, | 3840 | int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, |
3983 | struct btrfs_root *root, | 3841 | struct btrfs_root *root, |
3984 | int num_items) | 3842 | int num_items) |
@@ -3989,7 +3847,7 @@ int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans, | |||
3989 | if (num_items == 0 || root->fs_info->chunk_root == root) | 3847 | if (num_items == 0 || root->fs_info->chunk_root == root) |
3990 | return 0; | 3848 | return 0; |
3991 | 3849 | ||
3992 | num_bytes = calc_trans_metadata_size(root, num_items); | 3850 | num_bytes = btrfs_calc_trans_metadata_size(root, num_items); |
3993 | ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv, | 3851 | ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv, |
3994 | num_bytes); | 3852 | num_bytes); |
3995 | if (!ret) { | 3853 | if (!ret) { |
@@ -4028,14 +3886,14 @@ int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans, | |||
4028 | * If all of the metadata space is used, we can commit | 3886 | * If all of the metadata space is used, we can commit |
4029 | * transaction and use space it freed. | 3887 | * transaction and use space it freed. |
4030 | */ | 3888 | */ |
4031 | u64 num_bytes = calc_trans_metadata_size(root, 4); | 3889 | u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); |
4032 | return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); | 3890 | return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); |
4033 | } | 3891 | } |
4034 | 3892 | ||
4035 | void btrfs_orphan_release_metadata(struct inode *inode) | 3893 | void btrfs_orphan_release_metadata(struct inode *inode) |
4036 | { | 3894 | { |
4037 | struct btrfs_root *root = BTRFS_I(inode)->root; | 3895 | struct btrfs_root *root = BTRFS_I(inode)->root; |
4038 | u64 num_bytes = calc_trans_metadata_size(root, 4); | 3896 | u64 num_bytes = btrfs_calc_trans_metadata_size(root, 4); |
4039 | btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); | 3897 | btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes); |
4040 | } | 3898 | } |
4041 | 3899 | ||
@@ -4049,7 +3907,7 @@ int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans, | |||
4049 | * two for root back/forward refs, two for directory entries | 3907 | * two for root back/forward refs, two for directory entries |
4050 | * and one for root of the snapshot. | 3908 | * and one for root of the snapshot. |
4051 | */ | 3909 | */ |
4052 | u64 num_bytes = calc_trans_metadata_size(root, 5); | 3910 | u64 num_bytes = btrfs_calc_trans_metadata_size(root, 5); |
4053 | dst_rsv->space_info = src_rsv->space_info; | 3911 | dst_rsv->space_info = src_rsv->space_info; |
4054 | return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); | 3912 | return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes); |
4055 | } | 3913 | } |
@@ -4078,7 +3936,7 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes) | |||
4078 | 3936 | ||
4079 | if (nr_extents > reserved_extents) { | 3937 | if (nr_extents > reserved_extents) { |
4080 | nr_extents -= reserved_extents; | 3938 | nr_extents -= reserved_extents; |
4081 | to_reserve = calc_trans_metadata_size(root, nr_extents); | 3939 | to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents); |
4082 | } else { | 3940 | } else { |
4083 | nr_extents = 0; | 3941 | nr_extents = 0; |
4084 | to_reserve = 0; | 3942 | to_reserve = 0; |
@@ -4132,7 +3990,7 @@ void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes) | |||
4132 | 3990 | ||
4133 | to_free = calc_csum_metadata_size(inode, num_bytes); | 3991 | to_free = calc_csum_metadata_size(inode, num_bytes); |
4134 | if (nr_extents > 0) | 3992 | if (nr_extents > 0) |
4135 | to_free += calc_trans_metadata_size(root, nr_extents); | 3993 | to_free += btrfs_calc_trans_metadata_size(root, nr_extents); |
4136 | 3994 | ||
4137 | btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, | 3995 | btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv, |
4138 | to_free); | 3996 | to_free); |
@@ -4541,7 +4399,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
4541 | NULL, refs_to_drop, | 4399 | NULL, refs_to_drop, |
4542 | is_data); | 4400 | is_data); |
4543 | BUG_ON(ret); | 4401 | BUG_ON(ret); |
4544 | btrfs_release_path(extent_root, path); | 4402 | btrfs_release_path(path); |
4545 | path->leave_spinning = 1; | 4403 | path->leave_spinning = 1; |
4546 | 4404 | ||
4547 | key.objectid = bytenr; | 4405 | key.objectid = bytenr; |
@@ -4580,7 +4438,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
4580 | owner_objectid, 0); | 4438 | owner_objectid, 0); |
4581 | BUG_ON(ret < 0); | 4439 | BUG_ON(ret < 0); |
4582 | 4440 | ||
4583 | btrfs_release_path(extent_root, path); | 4441 | btrfs_release_path(path); |
4584 | path->leave_spinning = 1; | 4442 | path->leave_spinning = 1; |
4585 | 4443 | ||
4586 | key.objectid = bytenr; | 4444 | key.objectid = bytenr; |
@@ -4650,7 +4508,7 @@ static int __btrfs_free_extent(struct btrfs_trans_handle *trans, | |||
4650 | ret = btrfs_del_items(trans, extent_root, path, path->slots[0], | 4508 | ret = btrfs_del_items(trans, extent_root, path, path->slots[0], |
4651 | num_to_del); | 4509 | num_to_del); |
4652 | BUG_ON(ret); | 4510 | BUG_ON(ret); |
4653 | btrfs_release_path(extent_root, path); | 4511 | btrfs_release_path(path); |
4654 | 4512 | ||
4655 | if (is_data) { | 4513 | if (is_data) { |
4656 | ret = btrfs_del_csums(trans, root, bytenr, num_bytes); | 4514 | ret = btrfs_del_csums(trans, root, bytenr, num_bytes); |
@@ -4893,7 +4751,7 @@ wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, | |||
4893 | return 0; | 4751 | return 0; |
4894 | 4752 | ||
4895 | wait_event(caching_ctl->wait, block_group_cache_done(cache) || | 4753 | wait_event(caching_ctl->wait, block_group_cache_done(cache) || |
4896 | (cache->free_space >= num_bytes)); | 4754 | (cache->free_space_ctl->free_space >= num_bytes)); |
4897 | 4755 | ||
4898 | put_caching_control(caching_ctl); | 4756 | put_caching_control(caching_ctl); |
4899 | return 0; | 4757 | return 0; |
@@ -6480,7 +6338,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, | |||
6480 | trans->block_rsv = block_rsv; | 6338 | trans->block_rsv = block_rsv; |
6481 | } | 6339 | } |
6482 | } | 6340 | } |
6483 | btrfs_release_path(root, path); | 6341 | btrfs_release_path(path); |
6484 | BUG_ON(err); | 6342 | BUG_ON(err); |
6485 | 6343 | ||
6486 | ret = btrfs_del_root(trans, tree_root, &root->root_key); | 6344 | ret = btrfs_del_root(trans, tree_root, &root->root_key); |
@@ -6584,1514 +6442,6 @@ int btrfs_drop_subtree(struct btrfs_trans_handle *trans, | |||
6584 | return ret; | 6442 | return ret; |
6585 | } | 6443 | } |
6586 | 6444 | ||
6587 | #if 0 | ||
6588 | static unsigned long calc_ra(unsigned long start, unsigned long last, | ||
6589 | unsigned long nr) | ||
6590 | { | ||
6591 | return min(last, start + nr - 1); | ||
6592 | } | ||
6593 | |||
6594 | static noinline int relocate_inode_pages(struct inode *inode, u64 start, | ||
6595 | u64 len) | ||
6596 | { | ||
6597 | u64 page_start; | ||
6598 | u64 page_end; | ||
6599 | unsigned long first_index; | ||
6600 | unsigned long last_index; | ||
6601 | unsigned long i; | ||
6602 | struct page *page; | ||
6603 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | ||
6604 | struct file_ra_state *ra; | ||
6605 | struct btrfs_ordered_extent *ordered; | ||
6606 | unsigned int total_read = 0; | ||
6607 | unsigned int total_dirty = 0; | ||
6608 | int ret = 0; | ||
6609 | |||
6610 | ra = kzalloc(sizeof(*ra), GFP_NOFS); | ||
6611 | if (!ra) | ||
6612 | return -ENOMEM; | ||
6613 | |||
6614 | mutex_lock(&inode->i_mutex); | ||
6615 | first_index = start >> PAGE_CACHE_SHIFT; | ||
6616 | last_index = (start + len - 1) >> PAGE_CACHE_SHIFT; | ||
6617 | |||
6618 | /* make sure the dirty trick played by the caller work */ | ||
6619 | ret = invalidate_inode_pages2_range(inode->i_mapping, | ||
6620 | first_index, last_index); | ||
6621 | if (ret) | ||
6622 | goto out_unlock; | ||
6623 | |||
6624 | file_ra_state_init(ra, inode->i_mapping); | ||
6625 | |||
6626 | for (i = first_index ; i <= last_index; i++) { | ||
6627 | if (total_read % ra->ra_pages == 0) { | ||
6628 | btrfs_force_ra(inode->i_mapping, ra, NULL, i, | ||
6629 | calc_ra(i, last_index, ra->ra_pages)); | ||
6630 | } | ||
6631 | total_read++; | ||
6632 | again: | ||
6633 | if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode)) | ||
6634 | BUG_ON(1); | ||
6635 | page = grab_cache_page(inode->i_mapping, i); | ||
6636 | if (!page) { | ||
6637 | ret = -ENOMEM; | ||
6638 | goto out_unlock; | ||
6639 | } | ||
6640 | if (!PageUptodate(page)) { | ||
6641 | btrfs_readpage(NULL, page); | ||
6642 | lock_page(page); | ||
6643 | if (!PageUptodate(page)) { | ||
6644 | unlock_page(page); | ||
6645 | page_cache_release(page); | ||
6646 | ret = -EIO; | ||
6647 | goto out_unlock; | ||
6648 | } | ||
6649 | } | ||
6650 | wait_on_page_writeback(page); | ||
6651 | |||
6652 | page_start = (u64)page->index << PAGE_CACHE_SHIFT; | ||
6653 | page_end = page_start + PAGE_CACHE_SIZE - 1; | ||
6654 | lock_extent(io_tree, page_start, page_end, GFP_NOFS); | ||
6655 | |||
6656 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | ||
6657 | if (ordered) { | ||
6658 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | ||
6659 | unlock_page(page); | ||
6660 | page_cache_release(page); | ||
6661 | btrfs_start_ordered_extent(inode, ordered, 1); | ||
6662 | btrfs_put_ordered_extent(ordered); | ||
6663 | goto again; | ||
6664 | } | ||
6665 | set_page_extent_mapped(page); | ||
6666 | |||
6667 | if (i == first_index) | ||
6668 | set_extent_bits(io_tree, page_start, page_end, | ||
6669 | EXTENT_BOUNDARY, GFP_NOFS); | ||
6670 | btrfs_set_extent_delalloc(inode, page_start, page_end); | ||
6671 | |||
6672 | set_page_dirty(page); | ||
6673 | total_dirty++; | ||
6674 | |||
6675 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | ||
6676 | unlock_page(page); | ||
6677 | page_cache_release(page); | ||
6678 | } | ||
6679 | |||
6680 | out_unlock: | ||
6681 | kfree(ra); | ||
6682 | mutex_unlock(&inode->i_mutex); | ||
6683 | balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty); | ||
6684 | return ret; | ||
6685 | } | ||
6686 | |||
6687 | static noinline int relocate_data_extent(struct inode *reloc_inode, | ||
6688 | struct btrfs_key *extent_key, | ||
6689 | u64 offset) | ||
6690 | { | ||
6691 | struct btrfs_root *root = BTRFS_I(reloc_inode)->root; | ||
6692 | struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree; | ||
6693 | struct extent_map *em; | ||
6694 | u64 start = extent_key->objectid - offset; | ||
6695 | u64 end = start + extent_key->offset - 1; | ||
6696 | |||
6697 | em = alloc_extent_map(GFP_NOFS); | ||
6698 | BUG_ON(!em); | ||
6699 | |||
6700 | em->start = start; | ||
6701 | em->len = extent_key->offset; | ||
6702 | em->block_len = extent_key->offset; | ||
6703 | em->block_start = extent_key->objectid; | ||
6704 | em->bdev = root->fs_info->fs_devices->latest_bdev; | ||
6705 | set_bit(EXTENT_FLAG_PINNED, &em->flags); | ||
6706 | |||
6707 | /* setup extent map to cheat btrfs_readpage */ | ||
6708 | lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); | ||
6709 | while (1) { | ||
6710 | int ret; | ||
6711 | write_lock(&em_tree->lock); | ||
6712 | ret = add_extent_mapping(em_tree, em); | ||
6713 | write_unlock(&em_tree->lock); | ||
6714 | if (ret != -EEXIST) { | ||
6715 | free_extent_map(em); | ||
6716 | break; | ||
6717 | } | ||
6718 | btrfs_drop_extent_cache(reloc_inode, start, end, 0); | ||
6719 | } | ||
6720 | unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS); | ||
6721 | |||
6722 | return relocate_inode_pages(reloc_inode, start, extent_key->offset); | ||
6723 | } | ||
6724 | |||
6725 | struct btrfs_ref_path { | ||
6726 | u64 extent_start; | ||
6727 | u64 nodes[BTRFS_MAX_LEVEL]; | ||
6728 | u64 root_objectid; | ||
6729 | u64 root_generation; | ||
6730 | u64 owner_objectid; | ||
6731 | u32 num_refs; | ||
6732 | int lowest_level; | ||
6733 | int current_level; | ||
6734 | int shared_level; | ||
6735 | |||
6736 | struct btrfs_key node_keys[BTRFS_MAX_LEVEL]; | ||
6737 | u64 new_nodes[BTRFS_MAX_LEVEL]; | ||
6738 | }; | ||
6739 | |||
6740 | struct disk_extent { | ||
6741 | u64 ram_bytes; | ||
6742 | u64 disk_bytenr; | ||
6743 | u64 disk_num_bytes; | ||
6744 | u64 offset; | ||
6745 | u64 num_bytes; | ||
6746 | u8 compression; | ||
6747 | u8 encryption; | ||
6748 | u16 other_encoding; | ||
6749 | }; | ||
6750 | |||
6751 | static int is_cowonly_root(u64 root_objectid) | ||
6752 | { | ||
6753 | if (root_objectid == BTRFS_ROOT_TREE_OBJECTID || | ||
6754 | root_objectid == BTRFS_EXTENT_TREE_OBJECTID || | ||
6755 | root_objectid == BTRFS_CHUNK_TREE_OBJECTID || | ||
6756 | root_objectid == BTRFS_DEV_TREE_OBJECTID || | ||
6757 | root_objectid == BTRFS_TREE_LOG_OBJECTID || | ||
6758 | root_objectid == BTRFS_CSUM_TREE_OBJECTID) | ||
6759 | return 1; | ||
6760 | return 0; | ||
6761 | } | ||
6762 | |||
6763 | static noinline int __next_ref_path(struct btrfs_trans_handle *trans, | ||
6764 | struct btrfs_root *extent_root, | ||
6765 | struct btrfs_ref_path *ref_path, | ||
6766 | int first_time) | ||
6767 | { | ||
6768 | struct extent_buffer *leaf; | ||
6769 | struct btrfs_path *path; | ||
6770 | struct btrfs_extent_ref *ref; | ||
6771 | struct btrfs_key key; | ||
6772 | struct btrfs_key found_key; | ||
6773 | u64 bytenr; | ||
6774 | u32 nritems; | ||
6775 | int level; | ||
6776 | int ret = 1; | ||
6777 | |||
6778 | path = btrfs_alloc_path(); | ||
6779 | if (!path) | ||
6780 | return -ENOMEM; | ||
6781 | |||
6782 | if (first_time) { | ||
6783 | ref_path->lowest_level = -1; | ||
6784 | ref_path->current_level = -1; | ||
6785 | ref_path->shared_level = -1; | ||
6786 | goto walk_up; | ||
6787 | } | ||
6788 | walk_down: | ||
6789 | level = ref_path->current_level - 1; | ||
6790 | while (level >= -1) { | ||
6791 | u64 parent; | ||
6792 | if (level < ref_path->lowest_level) | ||
6793 | break; | ||
6794 | |||
6795 | if (level >= 0) | ||
6796 | bytenr = ref_path->nodes[level]; | ||
6797 | else | ||
6798 | bytenr = ref_path->extent_start; | ||
6799 | BUG_ON(bytenr == 0); | ||
6800 | |||
6801 | parent = ref_path->nodes[level + 1]; | ||
6802 | ref_path->nodes[level + 1] = 0; | ||
6803 | ref_path->current_level = level; | ||
6804 | BUG_ON(parent == 0); | ||
6805 | |||
6806 | key.objectid = bytenr; | ||
6807 | key.offset = parent + 1; | ||
6808 | key.type = BTRFS_EXTENT_REF_KEY; | ||
6809 | |||
6810 | ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0); | ||
6811 | if (ret < 0) | ||
6812 | goto out; | ||
6813 | BUG_ON(ret == 0); | ||
6814 | |||
6815 | leaf = path->nodes[0]; | ||
6816 | nritems = btrfs_header_nritems(leaf); | ||
6817 | if (path->slots[0] >= nritems) { | ||
6818 | ret = btrfs_next_leaf(extent_root, path); | ||
6819 | if (ret < 0) | ||
6820 | goto out; | ||
6821 | if (ret > 0) | ||
6822 | goto next; | ||
6823 | leaf = path->nodes[0]; | ||
6824 | } | ||
6825 | |||
6826 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | ||
6827 | if (found_key.objectid == bytenr && | ||
6828 | found_key.type == BTRFS_EXTENT_REF_KEY) { | ||
6829 | if (level < ref_path->shared_level) | ||
6830 | ref_path->shared_level = level; | ||
6831 | goto found; | ||
6832 | } | ||
6833 | next: | ||
6834 | level--; | ||
6835 | btrfs_release_path(extent_root, path); | ||
6836 | cond_resched(); | ||
6837 | } | ||
6838 | /* reached lowest level */ | ||
6839 | ret = 1; | ||
6840 | goto out; | ||
6841 | walk_up: | ||
6842 | level = ref_path->current_level; | ||
6843 | while (level < BTRFS_MAX_LEVEL - 1) { | ||
6844 | u64 ref_objectid; | ||
6845 | |||
6846 | if (level >= 0) | ||
6847 | bytenr = ref_path->nodes[level]; | ||
6848 | else | ||
6849 | bytenr = ref_path->extent_start; | ||
6850 | |||
6851 | BUG_ON(bytenr == 0); | ||
6852 | |||
6853 | key.objectid = bytenr; | ||
6854 | key.offset = 0; | ||
6855 | key.type = BTRFS_EXTENT_REF_KEY; | ||
6856 | |||
6857 | ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0); | ||
6858 | if (ret < 0) | ||
6859 | goto out; | ||
6860 | |||
6861 | leaf = path->nodes[0]; | ||
6862 | nritems = btrfs_header_nritems(leaf); | ||
6863 | if (path->slots[0] >= nritems) { | ||
6864 | ret = btrfs_next_leaf(extent_root, path); | ||
6865 | if (ret < 0) | ||
6866 | goto out; | ||
6867 | if (ret > 0) { | ||
6868 | /* the extent was freed by someone */ | ||
6869 | if (ref_path->lowest_level == level) | ||
6870 | goto out; | ||
6871 | btrfs_release_path(extent_root, path); | ||
6872 | goto walk_down; | ||
6873 | } | ||
6874 | leaf = path->nodes[0]; | ||
6875 | } | ||
6876 | |||
6877 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | ||
6878 | if (found_key.objectid != bytenr || | ||
6879 | found_key.type != BTRFS_EXTENT_REF_KEY) { | ||
6880 | /* the extent was freed by someone */ | ||
6881 | if (ref_path->lowest_level == level) { | ||
6882 | ret = 1; | ||
6883 | goto out; | ||
6884 | } | ||
6885 | btrfs_release_path(extent_root, path); | ||
6886 | goto walk_down; | ||
6887 | } | ||
6888 | found: | ||
6889 | ref = btrfs_item_ptr(leaf, path->slots[0], | ||
6890 | struct btrfs_extent_ref); | ||
6891 | ref_objectid = btrfs_ref_objectid(leaf, ref); | ||
6892 | if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) { | ||
6893 | if (first_time) { | ||
6894 | level = (int)ref_objectid; | ||
6895 | BUG_ON(level >= BTRFS_MAX_LEVEL); | ||
6896 | ref_path->lowest_level = level; | ||
6897 | ref_path->current_level = level; | ||
6898 | ref_path->nodes[level] = bytenr; | ||
6899 | } else { | ||
6900 | WARN_ON(ref_objectid != level); | ||
6901 | } | ||
6902 | } else { | ||
6903 | WARN_ON(level != -1); | ||
6904 | } | ||
6905 | first_time = 0; | ||
6906 | |||
6907 | if (ref_path->lowest_level == level) { | ||
6908 | ref_path->owner_objectid = ref_objectid; | ||
6909 | ref_path->num_refs = btrfs_ref_num_refs(leaf, ref); | ||
6910 | } | ||
6911 | |||
6912 | /* | ||
6913 | * the block is tree root or the block isn't in reference | ||
6914 | * counted tree. | ||
6915 | */ | ||
6916 | if (found_key.objectid == found_key.offset || | ||
6917 | is_cowonly_root(btrfs_ref_root(leaf, ref))) { | ||
6918 | ref_path->root_objectid = btrfs_ref_root(leaf, ref); | ||
6919 | ref_path->root_generation = | ||
6920 | btrfs_ref_generation(leaf, ref); | ||
6921 | if (level < 0) { | ||
6922 | /* special reference from the tree log */ | ||
6923 | ref_path->nodes[0] = found_key.offset; | ||
6924 | ref_path->current_level = 0; | ||
6925 | } | ||
6926 | ret = 0; | ||
6927 | goto out; | ||
6928 | } | ||
6929 | |||
6930 | level++; | ||
6931 | BUG_ON(ref_path->nodes[level] != 0); | ||
6932 | ref_path->nodes[level] = found_key.offset; | ||
6933 | ref_path->current_level = level; | ||
6934 | |||
6935 | /* | ||
6936 | * the reference was created in the running transaction, | ||
6937 | * no need to continue walking up. | ||
6938 | */ | ||
6939 | if (btrfs_ref_generation(leaf, ref) == trans->transid) { | ||
6940 | ref_path->root_objectid = btrfs_ref_root(leaf, ref); | ||
6941 | ref_path->root_generation = | ||
6942 | btrfs_ref_generation(leaf, ref); | ||
6943 | ret = 0; | ||
6944 | goto out; | ||
6945 | } | ||
6946 | |||
6947 | btrfs_release_path(extent_root, path); | ||
6948 | cond_resched(); | ||
6949 | } | ||
6950 | /* reached max tree level, but no tree root found. */ | ||
6951 | BUG(); | ||
6952 | out: | ||
6953 | btrfs_free_path(path); | ||
6954 | return ret; | ||
6955 | } | ||
6956 | |||
6957 | static int btrfs_first_ref_path(struct btrfs_trans_handle *trans, | ||
6958 | struct btrfs_root *extent_root, | ||
6959 | struct btrfs_ref_path *ref_path, | ||
6960 | u64 extent_start) | ||
6961 | { | ||
6962 | memset(ref_path, 0, sizeof(*ref_path)); | ||
6963 | ref_path->extent_start = extent_start; | ||
6964 | |||
6965 | return __next_ref_path(trans, extent_root, ref_path, 1); | ||
6966 | } | ||
6967 | |||
6968 | static int btrfs_next_ref_path(struct btrfs_trans_handle *trans, | ||
6969 | struct btrfs_root *extent_root, | ||
6970 | struct btrfs_ref_path *ref_path) | ||
6971 | { | ||
6972 | return __next_ref_path(trans, extent_root, ref_path, 0); | ||
6973 | } | ||
6974 | |||
6975 | static noinline int get_new_locations(struct inode *reloc_inode, | ||
6976 | struct btrfs_key *extent_key, | ||
6977 | u64 offset, int no_fragment, | ||
6978 | struct disk_extent **extents, | ||
6979 | int *nr_extents) | ||
6980 | { | ||
6981 | struct btrfs_root *root = BTRFS_I(reloc_inode)->root; | ||
6982 | struct btrfs_path *path; | ||
6983 | struct btrfs_file_extent_item *fi; | ||
6984 | struct extent_buffer *leaf; | ||
6985 | struct disk_extent *exts = *extents; | ||
6986 | struct btrfs_key found_key; | ||
6987 | u64 cur_pos; | ||
6988 | u64 last_byte; | ||
6989 | u32 nritems; | ||
6990 | int nr = 0; | ||
6991 | int max = *nr_extents; | ||
6992 | int ret; | ||
6993 | |||
6994 | WARN_ON(!no_fragment && *extents); | ||
6995 | if (!exts) { | ||
6996 | max = 1; | ||
6997 | exts = kmalloc(sizeof(*exts) * max, GFP_NOFS); | ||
6998 | if (!exts) | ||
6999 | return -ENOMEM; | ||
7000 | } | ||
7001 | |||
7002 | path = btrfs_alloc_path(); | ||
7003 | if (!path) { | ||
7004 | if (exts != *extents) | ||
7005 | kfree(exts); | ||
7006 | return -ENOMEM; | ||
7007 | } | ||
7008 | |||
7009 | cur_pos = extent_key->objectid - offset; | ||
7010 | last_byte = extent_key->objectid + extent_key->offset; | ||
7011 | ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, | ||
7012 | cur_pos, 0); | ||
7013 | if (ret < 0) | ||
7014 | goto out; | ||
7015 | if (ret > 0) { | ||
7016 | ret = -ENOENT; | ||
7017 | goto out; | ||
7018 | } | ||
7019 | |||
7020 | while (1) { | ||
7021 | leaf = path->nodes[0]; | ||
7022 | nritems = btrfs_header_nritems(leaf); | ||
7023 | if (path->slots[0] >= nritems) { | ||
7024 | ret = btrfs_next_leaf(root, path); | ||
7025 | if (ret < 0) | ||
7026 | goto out; | ||
7027 | if (ret > 0) | ||
7028 | break; | ||
7029 | leaf = path->nodes[0]; | ||
7030 | } | ||
7031 | |||
7032 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | ||
7033 | if (found_key.offset != cur_pos || | ||
7034 | found_key.type != BTRFS_EXTENT_DATA_KEY || | ||
7035 | found_key.objectid != reloc_inode->i_ino) | ||
7036 | break; | ||
7037 | |||
7038 | fi = btrfs_item_ptr(leaf, path->slots[0], | ||
7039 | struct btrfs_file_extent_item); | ||
7040 | if (btrfs_file_extent_type(leaf, fi) != | ||
7041 | BTRFS_FILE_EXTENT_REG || | ||
7042 | btrfs_file_extent_disk_bytenr(leaf, fi) == 0) | ||
7043 | break; | ||
7044 | |||
7045 | if (nr == max) { | ||
7046 | struct disk_extent *old = exts; | ||
7047 | max *= 2; | ||
7048 | exts = kzalloc(sizeof(*exts) * max, GFP_NOFS); | ||
7049 | if (!exts) { | ||
7050 | ret = -ENOMEM; | ||
7051 | goto out; | ||
7052 | } | ||
7053 | memcpy(exts, old, sizeof(*exts) * nr); | ||
7054 | if (old != *extents) | ||
7055 | kfree(old); | ||
7056 | } | ||
7057 | |||
7058 | exts[nr].disk_bytenr = | ||
7059 | btrfs_file_extent_disk_bytenr(leaf, fi); | ||
7060 | exts[nr].disk_num_bytes = | ||
7061 | btrfs_file_extent_disk_num_bytes(leaf, fi); | ||
7062 | exts[nr].offset = btrfs_file_extent_offset(leaf, fi); | ||
7063 | exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi); | ||
7064 | exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi); | ||
7065 | exts[nr].compression = btrfs_file_extent_compression(leaf, fi); | ||
7066 | exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi); | ||
7067 | exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf, | ||
7068 | fi); | ||
7069 | BUG_ON(exts[nr].offset > 0); | ||
7070 | BUG_ON(exts[nr].compression || exts[nr].encryption); | ||
7071 | BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes); | ||
7072 | |||
7073 | cur_pos += exts[nr].num_bytes; | ||
7074 | nr++; | ||
7075 | |||
7076 | if (cur_pos + offset >= last_byte) | ||
7077 | break; | ||
7078 | |||
7079 | if (no_fragment) { | ||
7080 | ret = 1; | ||
7081 | goto out; | ||
7082 | } | ||
7083 | path->slots[0]++; | ||
7084 | } | ||
7085 | |||
7086 | BUG_ON(cur_pos + offset > last_byte); | ||
7087 | if (cur_pos + offset < last_byte) { | ||
7088 | ret = -ENOENT; | ||
7089 | goto out; | ||
7090 | } | ||
7091 | ret = 0; | ||
7092 | out: | ||
7093 | btrfs_free_path(path); | ||
7094 | if (ret) { | ||
7095 | if (exts != *extents) | ||
7096 | kfree(exts); | ||
7097 | } else { | ||
7098 | *extents = exts; | ||
7099 | *nr_extents = nr; | ||
7100 | } | ||
7101 | return ret; | ||
7102 | } | ||
7103 | |||
7104 | static noinline int replace_one_extent(struct btrfs_trans_handle *trans, | ||
7105 | struct btrfs_root *root, | ||
7106 | struct btrfs_path *path, | ||
7107 | struct btrfs_key *extent_key, | ||
7108 | struct btrfs_key *leaf_key, | ||
7109 | struct btrfs_ref_path *ref_path, | ||
7110 | struct disk_extent *new_extents, | ||
7111 | int nr_extents) | ||
7112 | { | ||
7113 | struct extent_buffer *leaf; | ||
7114 | struct btrfs_file_extent_item *fi; | ||
7115 | struct inode *inode = NULL; | ||
7116 | struct btrfs_key key; | ||
7117 | u64 lock_start = 0; | ||
7118 | u64 lock_end = 0; | ||
7119 | u64 num_bytes; | ||
7120 | u64 ext_offset; | ||
7121 | u64 search_end = (u64)-1; | ||
7122 | u32 nritems; | ||
7123 | int nr_scaned = 0; | ||
7124 | int extent_locked = 0; | ||
7125 | int extent_type; | ||
7126 | int ret; | ||
7127 | |||
7128 | memcpy(&key, leaf_key, sizeof(key)); | ||
7129 | if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { | ||
7130 | if (key.objectid < ref_path->owner_objectid || | ||
7131 | (key.objectid == ref_path->owner_objectid && | ||
7132 | key.type < BTRFS_EXTENT_DATA_KEY)) { | ||
7133 | key.objectid = ref_path->owner_objectid; | ||
7134 | key.type = BTRFS_EXTENT_DATA_KEY; | ||
7135 | key.offset = 0; | ||
7136 | } | ||
7137 | } | ||
7138 | |||
7139 | while (1) { | ||
7140 | ret = btrfs_search_slot(trans, root, &key, path, 0, 1); | ||
7141 | if (ret < 0) | ||
7142 | goto out; | ||
7143 | |||
7144 | leaf = path->nodes[0]; | ||
7145 | nritems = btrfs_header_nritems(leaf); | ||
7146 | next: | ||
7147 | if (extent_locked && ret > 0) { | ||
7148 | /* | ||
7149 | * the file extent item was modified by someone | ||
7150 | * before the extent got locked. | ||
7151 | */ | ||
7152 | unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, | ||
7153 | lock_end, GFP_NOFS); | ||
7154 | extent_locked = 0; | ||
7155 | } | ||
7156 | |||
7157 | if (path->slots[0] >= nritems) { | ||
7158 | if (++nr_scaned > 2) | ||
7159 | break; | ||
7160 | |||
7161 | BUG_ON(extent_locked); | ||
7162 | ret = btrfs_next_leaf(root, path); | ||
7163 | if (ret < 0) | ||
7164 | goto out; | ||
7165 | if (ret > 0) | ||
7166 | break; | ||
7167 | leaf = path->nodes[0]; | ||
7168 | nritems = btrfs_header_nritems(leaf); | ||
7169 | } | ||
7170 | |||
7171 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | ||
7172 | |||
7173 | if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) { | ||
7174 | if ((key.objectid > ref_path->owner_objectid) || | ||
7175 | (key.objectid == ref_path->owner_objectid && | ||
7176 | key.type > BTRFS_EXTENT_DATA_KEY) || | ||
7177 | key.offset >= search_end) | ||
7178 | break; | ||
7179 | } | ||
7180 | |||
7181 | if (inode && key.objectid != inode->i_ino) { | ||
7182 | BUG_ON(extent_locked); | ||
7183 | btrfs_release_path(root, path); | ||
7184 | mutex_unlock(&inode->i_mutex); | ||
7185 | iput(inode); | ||
7186 | inode = NULL; | ||
7187 | continue; | ||
7188 | } | ||
7189 | |||
7190 | if (key.type != BTRFS_EXTENT_DATA_KEY) { | ||
7191 | path->slots[0]++; | ||
7192 | ret = 1; | ||
7193 | goto next; | ||
7194 | } | ||
7195 | fi = btrfs_item_ptr(leaf, path->slots[0], | ||
7196 | struct btrfs_file_extent_item); | ||
7197 | extent_type = btrfs_file_extent_type(leaf, fi); | ||
7198 | if ((extent_type != BTRFS_FILE_EXTENT_REG && | ||
7199 | extent_type != BTRFS_FILE_EXTENT_PREALLOC) || | ||
7200 | (btrfs_file_extent_disk_bytenr(leaf, fi) != | ||
7201 | extent_key->objectid)) { | ||
7202 | path->slots[0]++; | ||
7203 | ret = 1; | ||
7204 | goto next; | ||
7205 | } | ||
7206 | |||
7207 | num_bytes = btrfs_file_extent_num_bytes(leaf, fi); | ||
7208 | ext_offset = btrfs_file_extent_offset(leaf, fi); | ||
7209 | |||
7210 | if (search_end == (u64)-1) { | ||
7211 | search_end = key.offset - ext_offset + | ||
7212 | btrfs_file_extent_ram_bytes(leaf, fi); | ||
7213 | } | ||
7214 | |||
7215 | if (!extent_locked) { | ||
7216 | lock_start = key.offset; | ||
7217 | lock_end = lock_start + num_bytes - 1; | ||
7218 | } else { | ||
7219 | if (lock_start > key.offset || | ||
7220 | lock_end + 1 < key.offset + num_bytes) { | ||
7221 | unlock_extent(&BTRFS_I(inode)->io_tree, | ||
7222 | lock_start, lock_end, GFP_NOFS); | ||
7223 | extent_locked = 0; | ||
7224 | } | ||
7225 | } | ||
7226 | |||
7227 | if (!inode) { | ||
7228 | btrfs_release_path(root, path); | ||
7229 | |||
7230 | inode = btrfs_iget_locked(root->fs_info->sb, | ||
7231 | key.objectid, root); | ||
7232 | if (inode->i_state & I_NEW) { | ||
7233 | BTRFS_I(inode)->root = root; | ||
7234 | BTRFS_I(inode)->location.objectid = | ||
7235 | key.objectid; | ||
7236 | BTRFS_I(inode)->location.type = | ||
7237 | BTRFS_INODE_ITEM_KEY; | ||
7238 | BTRFS_I(inode)->location.offset = 0; | ||
7239 | btrfs_read_locked_inode(inode); | ||
7240 | unlock_new_inode(inode); | ||
7241 | } | ||
7242 | /* | ||
7243 | * some code call btrfs_commit_transaction while | ||
7244 | * holding the i_mutex, so we can't use mutex_lock | ||
7245 | * here. | ||
7246 | */ | ||
7247 | if (is_bad_inode(inode) || | ||
7248 | !mutex_trylock(&inode->i_mutex)) { | ||
7249 | iput(inode); | ||
7250 | inode = NULL; | ||
7251 | key.offset = (u64)-1; | ||
7252 | goto skip; | ||
7253 | } | ||
7254 | } | ||
7255 | |||
7256 | if (!extent_locked) { | ||
7257 | struct btrfs_ordered_extent *ordered; | ||
7258 | |||
7259 | btrfs_release_path(root, path); | ||
7260 | |||
7261 | lock_extent(&BTRFS_I(inode)->io_tree, lock_start, | ||
7262 | lock_end, GFP_NOFS); | ||
7263 | ordered = btrfs_lookup_first_ordered_extent(inode, | ||
7264 | lock_end); | ||
7265 | if (ordered && | ||
7266 | ordered->file_offset <= lock_end && | ||
7267 | ordered->file_offset + ordered->len > lock_start) { | ||
7268 | unlock_extent(&BTRFS_I(inode)->io_tree, | ||
7269 | lock_start, lock_end, GFP_NOFS); | ||
7270 | btrfs_start_ordered_extent(inode, ordered, 1); | ||
7271 | btrfs_put_ordered_extent(ordered); | ||
7272 | key.offset += num_bytes; | ||
7273 | goto skip; | ||
7274 | } | ||
7275 | if (ordered) | ||
7276 | btrfs_put_ordered_extent(ordered); | ||
7277 | |||
7278 | extent_locked = 1; | ||
7279 | continue; | ||
7280 | } | ||
7281 | |||
7282 | if (nr_extents == 1) { | ||
7283 | /* update extent pointer in place */ | ||
7284 | btrfs_set_file_extent_disk_bytenr(leaf, fi, | ||
7285 | new_extents[0].disk_bytenr); | ||
7286 | btrfs_set_file_extent_disk_num_bytes(leaf, fi, | ||
7287 | new_extents[0].disk_num_bytes); | ||
7288 | btrfs_mark_buffer_dirty(leaf); | ||
7289 | |||
7290 | btrfs_drop_extent_cache(inode, key.offset, | ||
7291 | key.offset + num_bytes - 1, 0); | ||
7292 | |||
7293 | ret = btrfs_inc_extent_ref(trans, root, | ||
7294 | new_extents[0].disk_bytenr, | ||
7295 | new_extents[0].disk_num_bytes, | ||
7296 | leaf->start, | ||
7297 | root->root_key.objectid, | ||
7298 | trans->transid, | ||
7299 | key.objectid); | ||
7300 | BUG_ON(ret); | ||
7301 | |||
7302 | ret = btrfs_free_extent(trans, root, | ||
7303 | extent_key->objectid, | ||
7304 | extent_key->offset, | ||
7305 | leaf->start, | ||
7306 | btrfs_header_owner(leaf), | ||
7307 | btrfs_header_generation(leaf), | ||
7308 | key.objectid, 0); | ||
7309 | BUG_ON(ret); | ||
7310 | |||
7311 | btrfs_release_path(root, path); | ||
7312 | key.offset += num_bytes; | ||
7313 | } else { | ||
7314 | BUG_ON(1); | ||
7315 | #if 0 | ||
7316 | u64 alloc_hint; | ||
7317 | u64 extent_len; | ||
7318 | int i; | ||
7319 | /* | ||
7320 | * drop old extent pointer at first, then insert the | ||
7321 | * new pointers one bye one | ||
7322 | */ | ||
7323 | btrfs_release_path(root, path); | ||
7324 | ret = btrfs_drop_extents(trans, root, inode, key.offset, | ||
7325 | key.offset + num_bytes, | ||
7326 | key.offset, &alloc_hint); | ||
7327 | BUG_ON(ret); | ||
7328 | |||
7329 | for (i = 0; i < nr_extents; i++) { | ||
7330 | if (ext_offset >= new_extents[i].num_bytes) { | ||
7331 | ext_offset -= new_extents[i].num_bytes; | ||
7332 | continue; | ||
7333 | } | ||
7334 | extent_len = min(new_extents[i].num_bytes - | ||
7335 | ext_offset, num_bytes); | ||
7336 | |||
7337 | ret = btrfs_insert_empty_item(trans, root, | ||
7338 | path, &key, | ||
7339 | sizeof(*fi)); | ||
7340 | BUG_ON(ret); | ||
7341 | |||
7342 | leaf = path->nodes[0]; | ||
7343 | fi = btrfs_item_ptr(leaf, path->slots[0], | ||
7344 | struct btrfs_file_extent_item); | ||
7345 | btrfs_set_file_extent_generation(leaf, fi, | ||
7346 | trans->transid); | ||
7347 | btrfs_set_file_extent_type(leaf, fi, | ||
7348 | BTRFS_FILE_EXTENT_REG); | ||
7349 | btrfs_set_file_extent_disk_bytenr(leaf, fi, | ||
7350 | new_extents[i].disk_bytenr); | ||
7351 | btrfs_set_file_extent_disk_num_bytes(leaf, fi, | ||
7352 | new_extents[i].disk_num_bytes); | ||
7353 | btrfs_set_file_extent_ram_bytes(leaf, fi, | ||
7354 | new_extents[i].ram_bytes); | ||
7355 | |||
7356 | btrfs_set_file_extent_compression(leaf, fi, | ||
7357 | new_extents[i].compression); | ||
7358 | btrfs_set_file_extent_encryption(leaf, fi, | ||
7359 | new_extents[i].encryption); | ||
7360 | btrfs_set_file_extent_other_encoding(leaf, fi, | ||
7361 | new_extents[i].other_encoding); | ||
7362 | |||
7363 | btrfs_set_file_extent_num_bytes(leaf, fi, | ||
7364 | extent_len); | ||
7365 | ext_offset += new_extents[i].offset; | ||
7366 | btrfs_set_file_extent_offset(leaf, fi, | ||
7367 | ext_offset); | ||
7368 | btrfs_mark_buffer_dirty(leaf); | ||
7369 | |||
7370 | btrfs_drop_extent_cache(inode, key.offset, | ||
7371 | key.offset + extent_len - 1, 0); | ||
7372 | |||
7373 | ret = btrfs_inc_extent_ref(trans, root, | ||
7374 | new_extents[i].disk_bytenr, | ||
7375 | new_extents[i].disk_num_bytes, | ||
7376 | leaf->start, | ||
7377 | root->root_key.objectid, | ||
7378 | trans->transid, key.objectid); | ||
7379 | BUG_ON(ret); | ||
7380 | btrfs_release_path(root, path); | ||
7381 | |||
7382 | inode_add_bytes(inode, extent_len); | ||
7383 | |||
7384 | ext_offset = 0; | ||
7385 | num_bytes -= extent_len; | ||
7386 | key.offset += extent_len; | ||
7387 | |||
7388 | if (num_bytes == 0) | ||
7389 | break; | ||
7390 | } | ||
7391 | BUG_ON(i >= nr_extents); | ||
7392 | #endif | ||
7393 | } | ||
7394 | |||
7395 | if (extent_locked) { | ||
7396 | unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, | ||
7397 | lock_end, GFP_NOFS); | ||
7398 | extent_locked = 0; | ||
7399 | } | ||
7400 | skip: | ||
7401 | if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS && | ||
7402 | key.offset >= search_end) | ||
7403 | break; | ||
7404 | |||
7405 | cond_resched(); | ||
7406 | } | ||
7407 | ret = 0; | ||
7408 | out: | ||
7409 | btrfs_release_path(root, path); | ||
7410 | if (inode) { | ||
7411 | mutex_unlock(&inode->i_mutex); | ||
7412 | if (extent_locked) { | ||
7413 | unlock_extent(&BTRFS_I(inode)->io_tree, lock_start, | ||
7414 | lock_end, GFP_NOFS); | ||
7415 | } | ||
7416 | iput(inode); | ||
7417 | } | ||
7418 | return ret; | ||
7419 | } | ||
7420 | |||
7421 | int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans, | ||
7422 | struct btrfs_root *root, | ||
7423 | struct extent_buffer *buf, u64 orig_start) | ||
7424 | { | ||
7425 | int level; | ||
7426 | int ret; | ||
7427 | |||
7428 | BUG_ON(btrfs_header_generation(buf) != trans->transid); | ||
7429 | BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID); | ||
7430 | |||
7431 | level = btrfs_header_level(buf); | ||
7432 | if (level == 0) { | ||
7433 | struct btrfs_leaf_ref *ref; | ||
7434 | struct btrfs_leaf_ref *orig_ref; | ||
7435 | |||
7436 | orig_ref = btrfs_lookup_leaf_ref(root, orig_start); | ||
7437 | if (!orig_ref) | ||
7438 | return -ENOENT; | ||
7439 | |||
7440 | ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems); | ||
7441 | if (!ref) { | ||
7442 | btrfs_free_leaf_ref(root, orig_ref); | ||
7443 | return -ENOMEM; | ||
7444 | } | ||
7445 | |||
7446 | ref->nritems = orig_ref->nritems; | ||
7447 | memcpy(ref->extents, orig_ref->extents, | ||
7448 | sizeof(ref->extents[0]) * ref->nritems); | ||
7449 | |||
7450 | btrfs_free_leaf_ref(root, orig_ref); | ||
7451 | |||
7452 | ref->root_gen = trans->transid; | ||
7453 | ref->bytenr = buf->start; | ||
7454 | ref->owner = btrfs_header_owner(buf); | ||
7455 | ref->generation = btrfs_header_generation(buf); | ||
7456 | |||
7457 | ret = btrfs_add_leaf_ref(root, ref, 0); | ||
7458 | WARN_ON(ret); | ||
7459 | btrfs_free_leaf_ref(root, ref); | ||
7460 | } | ||
7461 | return 0; | ||
7462 | } | ||
7463 | |||
7464 | static noinline int invalidate_extent_cache(struct btrfs_root *root, | ||
7465 | struct extent_buffer *leaf, | ||
7466 | struct btrfs_block_group_cache *group, | ||
7467 | struct btrfs_root *target_root) | ||
7468 | { | ||
7469 | struct btrfs_key key; | ||
7470 | struct inode *inode = NULL; | ||
7471 | struct btrfs_file_extent_item *fi; | ||
7472 | struct extent_state *cached_state = NULL; | ||
7473 | u64 num_bytes; | ||
7474 | u64 skip_objectid = 0; | ||
7475 | u32 nritems; | ||
7476 | u32 i; | ||
7477 | |||
7478 | nritems = btrfs_header_nritems(leaf); | ||
7479 | for (i = 0; i < nritems; i++) { | ||
7480 | btrfs_item_key_to_cpu(leaf, &key, i); | ||
7481 | if (key.objectid == skip_objectid || | ||
7482 | key.type != BTRFS_EXTENT_DATA_KEY) | ||
7483 | continue; | ||
7484 | fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); | ||
7485 | if (btrfs_file_extent_type(leaf, fi) == | ||
7486 | BTRFS_FILE_EXTENT_INLINE) | ||
7487 | continue; | ||
7488 | if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0) | ||
7489 | continue; | ||
7490 | if (!inode || inode->i_ino != key.objectid) { | ||
7491 | iput(inode); | ||
7492 | inode = btrfs_ilookup(target_root->fs_info->sb, | ||
7493 | key.objectid, target_root, 1); | ||
7494 | } | ||
7495 | if (!inode) { | ||
7496 | skip_objectid = key.objectid; | ||
7497 | continue; | ||
7498 | } | ||
7499 | num_bytes = btrfs_file_extent_num_bytes(leaf, fi); | ||
7500 | |||
7501 | lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset, | ||
7502 | key.offset + num_bytes - 1, 0, &cached_state, | ||
7503 | GFP_NOFS); | ||
7504 | btrfs_drop_extent_cache(inode, key.offset, | ||
7505 | key.offset + num_bytes - 1, 1); | ||
7506 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset, | ||
7507 | key.offset + num_bytes - 1, &cached_state, | ||
7508 | GFP_NOFS); | ||
7509 | cond_resched(); | ||
7510 | } | ||
7511 | iput(inode); | ||
7512 | return 0; | ||
7513 | } | ||
7514 | |||
7515 | static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans, | ||
7516 | struct btrfs_root *root, | ||
7517 | struct extent_buffer *leaf, | ||
7518 | struct btrfs_block_group_cache *group, | ||
7519 | struct inode *reloc_inode) | ||
7520 | { | ||
7521 | struct btrfs_key key; | ||
7522 | struct btrfs_key extent_key; | ||
7523 | struct btrfs_file_extent_item *fi; | ||
7524 | struct btrfs_leaf_ref *ref; | ||
7525 | struct disk_extent *new_extent; | ||
7526 | u64 bytenr; | ||
7527 | u64 num_bytes; | ||
7528 | u32 nritems; | ||
7529 | u32 i; | ||
7530 | int ext_index; | ||
7531 | int nr_extent; | ||
7532 | int ret; | ||
7533 | |||
7534 | new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS); | ||
7535 | if (!new_extent) | ||
7536 | return -ENOMEM; | ||
7537 | |||
7538 | ref = btrfs_lookup_leaf_ref(root, leaf->start); | ||
7539 | BUG_ON(!ref); | ||
7540 | |||
7541 | ext_index = -1; | ||
7542 | nritems = btrfs_header_nritems(leaf); | ||
7543 | for (i = 0; i < nritems; i++) { | ||
7544 | btrfs_item_key_to_cpu(leaf, &key, i); | ||
7545 | if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY) | ||
7546 | continue; | ||
7547 | fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item); | ||
7548 | if (btrfs_file_extent_type(leaf, fi) == | ||
7549 | BTRFS_FILE_EXTENT_INLINE) | ||
7550 | continue; | ||
7551 | bytenr = btrfs_file_extent_disk_bytenr(leaf, fi); | ||
7552 | num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi); | ||
7553 | if (bytenr == 0) | ||
7554 | continue; | ||
7555 | |||
7556 | ext_index++; | ||
7557 | if (bytenr >= group->key.objectid + group->key.offset || | ||
7558 | bytenr + num_bytes <= group->key.objectid) | ||
7559 | continue; | ||
7560 | |||
7561 | extent_key.objectid = bytenr; | ||
7562 | extent_key.offset = num_bytes; | ||
7563 | extent_key.type = BTRFS_EXTENT_ITEM_KEY; | ||
7564 | nr_extent = 1; | ||
7565 | ret = get_new_locations(reloc_inode, &extent_key, | ||
7566 | group->key.objectid, 1, | ||
7567 | &new_extent, &nr_extent); | ||
7568 | if (ret > 0) | ||
7569 | continue; | ||
7570 | BUG_ON(ret < 0); | ||
7571 | |||
7572 | BUG_ON(ref->extents[ext_index].bytenr != bytenr); | ||
7573 | BUG_ON(ref->extents[ext_index].num_bytes != num_bytes); | ||
7574 | ref->extents[ext_index].bytenr = new_extent->disk_bytenr; | ||
7575 | ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes; | ||
7576 | |||
7577 | btrfs_set_file_extent_disk_bytenr(leaf, fi, | ||
7578 | new_extent->disk_bytenr); | ||
7579 | btrfs_set_file_extent_disk_num_bytes(leaf, fi, | ||
7580 | new_extent->disk_num_bytes); | ||
7581 | btrfs_mark_buffer_dirty(leaf); | ||
7582 | |||
7583 | ret = btrfs_inc_extent_ref(trans, root, | ||
7584 | new_extent->disk_bytenr, | ||
7585 | new_extent->disk_num_bytes, | ||
7586 | leaf->start, | ||
7587 | root->root_key.objectid, | ||
7588 | trans->transid, key.objectid); | ||
7589 | BUG_ON(ret); | ||
7590 | |||
7591 | ret = btrfs_free_extent(trans, root, | ||
7592 | bytenr, num_bytes, leaf->start, | ||
7593 | btrfs_header_owner(leaf), | ||
7594 | btrfs_header_generation(leaf), | ||
7595 | key.objectid, 0); | ||
7596 | BUG_ON(ret); | ||
7597 | cond_resched(); | ||
7598 | } | ||
7599 | kfree(new_extent); | ||
7600 | BUG_ON(ext_index + 1 != ref->nritems); | ||
7601 | btrfs_free_leaf_ref(root, ref); | ||
7602 | return 0; | ||
7603 | } | ||
7604 | |||
7605 | int btrfs_free_reloc_root(struct btrfs_trans_handle *trans, | ||
7606 | struct btrfs_root *root) | ||
7607 | { | ||
7608 | struct btrfs_root *reloc_root; | ||
7609 | int ret; | ||
7610 | |||
7611 | if (root->reloc_root) { | ||
7612 | reloc_root = root->reloc_root; | ||
7613 | root->reloc_root = NULL; | ||
7614 | list_add(&reloc_root->dead_list, | ||
7615 | &root->fs_info->dead_reloc_roots); | ||
7616 | |||
7617 | btrfs_set_root_bytenr(&reloc_root->root_item, | ||
7618 | reloc_root->node->start); | ||
7619 | btrfs_set_root_level(&root->root_item, | ||
7620 | btrfs_header_level(reloc_root->node)); | ||
7621 | memset(&reloc_root->root_item.drop_progress, 0, | ||
7622 | sizeof(struct btrfs_disk_key)); | ||
7623 | reloc_root->root_item.drop_level = 0; | ||
7624 | |||
7625 | ret = btrfs_update_root(trans, root->fs_info->tree_root, | ||
7626 | &reloc_root->root_key, | ||
7627 | &reloc_root->root_item); | ||
7628 | BUG_ON(ret); | ||
7629 | } | ||
7630 | return 0; | ||
7631 | } | ||
7632 | |||
7633 | int btrfs_drop_dead_reloc_roots(struct btrfs_root *root) | ||
7634 | { | ||
7635 | struct btrfs_trans_handle *trans; | ||
7636 | struct btrfs_root *reloc_root; | ||
7637 | struct btrfs_root *prev_root = NULL; | ||
7638 | struct list_head dead_roots; | ||
7639 | int ret; | ||
7640 | unsigned long nr; | ||
7641 | |||
7642 | INIT_LIST_HEAD(&dead_roots); | ||
7643 | list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots); | ||
7644 | |||
7645 | while (!list_empty(&dead_roots)) { | ||
7646 | reloc_root = list_entry(dead_roots.prev, | ||
7647 | struct btrfs_root, dead_list); | ||
7648 | list_del_init(&reloc_root->dead_list); | ||
7649 | |||
7650 | BUG_ON(reloc_root->commit_root != NULL); | ||
7651 | while (1) { | ||
7652 | trans = btrfs_join_transaction(root, 1); | ||
7653 | BUG_ON(IS_ERR(trans)); | ||
7654 | |||
7655 | mutex_lock(&root->fs_info->drop_mutex); | ||
7656 | ret = btrfs_drop_snapshot(trans, reloc_root); | ||
7657 | if (ret != -EAGAIN) | ||
7658 | break; | ||
7659 | mutex_unlock(&root->fs_info->drop_mutex); | ||
7660 | |||
7661 | nr = trans->blocks_used; | ||
7662 | ret = btrfs_end_transaction(trans, root); | ||
7663 | BUG_ON(ret); | ||
7664 | btrfs_btree_balance_dirty(root, nr); | ||
7665 | } | ||
7666 | |||
7667 | free_extent_buffer(reloc_root->node); | ||
7668 | |||
7669 | ret = btrfs_del_root(trans, root->fs_info->tree_root, | ||
7670 | &reloc_root->root_key); | ||
7671 | BUG_ON(ret); | ||
7672 | mutex_unlock(&root->fs_info->drop_mutex); | ||
7673 | |||
7674 | nr = trans->blocks_used; | ||
7675 | ret = btrfs_end_transaction(trans, root); | ||
7676 | BUG_ON(ret); | ||
7677 | btrfs_btree_balance_dirty(root, nr); | ||
7678 | |||
7679 | kfree(prev_root); | ||
7680 | prev_root = reloc_root; | ||
7681 | } | ||
7682 | if (prev_root) { | ||
7683 | btrfs_remove_leaf_refs(prev_root, (u64)-1, 0); | ||
7684 | kfree(prev_root); | ||
7685 | } | ||
7686 | return 0; | ||
7687 | } | ||
7688 | |||
7689 | int btrfs_add_dead_reloc_root(struct btrfs_root *root) | ||
7690 | { | ||
7691 | list_add(&root->dead_list, &root->fs_info->dead_reloc_roots); | ||
7692 | return 0; | ||
7693 | } | ||
7694 | |||
7695 | int btrfs_cleanup_reloc_trees(struct btrfs_root *root) | ||
7696 | { | ||
7697 | struct btrfs_root *reloc_root; | ||
7698 | struct btrfs_trans_handle *trans; | ||
7699 | struct btrfs_key location; | ||
7700 | int found; | ||
7701 | int ret; | ||
7702 | |||
7703 | mutex_lock(&root->fs_info->tree_reloc_mutex); | ||
7704 | ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL); | ||
7705 | BUG_ON(ret); | ||
7706 | found = !list_empty(&root->fs_info->dead_reloc_roots); | ||
7707 | mutex_unlock(&root->fs_info->tree_reloc_mutex); | ||
7708 | |||
7709 | if (found) { | ||
7710 | trans = btrfs_start_transaction(root, 1); | ||
7711 | BUG_ON(IS_ERR(trans)); | ||
7712 | ret = btrfs_commit_transaction(trans, root); | ||
7713 | BUG_ON(ret); | ||
7714 | } | ||
7715 | |||
7716 | location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID; | ||
7717 | location.offset = (u64)-1; | ||
7718 | location.type = BTRFS_ROOT_ITEM_KEY; | ||
7719 | |||
7720 | reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location); | ||
7721 | BUG_ON(!reloc_root); | ||
7722 | ret = btrfs_orphan_cleanup(reloc_root); | ||
7723 | BUG_ON(ret); | ||
7724 | return 0; | ||
7725 | } | ||
7726 | |||
7727 | static noinline int init_reloc_tree(struct btrfs_trans_handle *trans, | ||
7728 | struct btrfs_root *root) | ||
7729 | { | ||
7730 | struct btrfs_root *reloc_root; | ||
7731 | struct extent_buffer *eb; | ||
7732 | struct btrfs_root_item *root_item; | ||
7733 | struct btrfs_key root_key; | ||
7734 | int ret; | ||
7735 | |||
7736 | BUG_ON(!root->ref_cows); | ||
7737 | if (root->reloc_root) | ||
7738 | return 0; | ||
7739 | |||
7740 | root_item = kmalloc(sizeof(*root_item), GFP_NOFS); | ||
7741 | if (!root_item) | ||
7742 | return -ENOMEM; | ||
7743 | |||
7744 | ret = btrfs_copy_root(trans, root, root->commit_root, | ||
7745 | &eb, BTRFS_TREE_RELOC_OBJECTID); | ||
7746 | BUG_ON(ret); | ||
7747 | |||
7748 | root_key.objectid = BTRFS_TREE_RELOC_OBJECTID; | ||
7749 | root_key.offset = root->root_key.objectid; | ||
7750 | root_key.type = BTRFS_ROOT_ITEM_KEY; | ||
7751 | |||
7752 | memcpy(root_item, &root->root_item, sizeof(root_item)); | ||
7753 | btrfs_set_root_refs(root_item, 0); | ||
7754 | btrfs_set_root_bytenr(root_item, eb->start); | ||
7755 | btrfs_set_root_level(root_item, btrfs_header_level(eb)); | ||
7756 | btrfs_set_root_generation(root_item, trans->transid); | ||
7757 | |||
7758 | btrfs_tree_unlock(eb); | ||
7759 | free_extent_buffer(eb); | ||
7760 | |||
7761 | ret = btrfs_insert_root(trans, root->fs_info->tree_root, | ||
7762 | &root_key, root_item); | ||
7763 | BUG_ON(ret); | ||
7764 | kfree(root_item); | ||
7765 | |||
7766 | reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root, | ||
7767 | &root_key); | ||
7768 | BUG_ON(IS_ERR(reloc_root)); | ||
7769 | reloc_root->last_trans = trans->transid; | ||
7770 | reloc_root->commit_root = NULL; | ||
7771 | reloc_root->ref_tree = &root->fs_info->reloc_ref_tree; | ||
7772 | |||
7773 | root->reloc_root = reloc_root; | ||
7774 | return 0; | ||
7775 | } | ||
7776 | |||
7777 | /* | ||
7778 | * Core function of space balance. | ||
7779 | * | ||
7780 | * The idea is using reloc trees to relocate tree blocks in reference | ||
7781 | * counted roots. There is one reloc tree for each subvol, and all | ||
7782 | * reloc trees share same root key objectid. Reloc trees are snapshots | ||
7783 | * of the latest committed roots of subvols (root->commit_root). | ||
7784 | * | ||
7785 | * To relocate a tree block referenced by a subvol, there are two steps. | ||
7786 | * COW the block through subvol's reloc tree, then update block pointer | ||
7787 | * in the subvol to point to the new block. Since all reloc trees share | ||
7788 | * same root key objectid, doing special handing for tree blocks owned | ||
7789 | * by them is easy. Once a tree block has been COWed in one reloc tree, | ||
7790 | * we can use the resulting new block directly when the same block is | ||
7791 | * required to COW again through other reloc trees. By this way, relocated | ||
7792 | * tree blocks are shared between reloc trees, so they are also shared | ||
7793 | * between subvols. | ||
7794 | */ | ||
7795 | static noinline int relocate_one_path(struct btrfs_trans_handle *trans, | ||
7796 | struct btrfs_root *root, | ||
7797 | struct btrfs_path *path, | ||
7798 | struct btrfs_key *first_key, | ||
7799 | struct btrfs_ref_path *ref_path, | ||
7800 | struct btrfs_block_group_cache *group, | ||
7801 | struct inode *reloc_inode) | ||
7802 | { | ||
7803 | struct btrfs_root *reloc_root; | ||
7804 | struct extent_buffer *eb = NULL; | ||
7805 | struct btrfs_key *keys; | ||
7806 | u64 *nodes; | ||
7807 | int level; | ||
7808 | int shared_level; | ||
7809 | int lowest_level = 0; | ||
7810 | int ret; | ||
7811 | |||
7812 | if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID) | ||
7813 | lowest_level = ref_path->owner_objectid; | ||
7814 | |||
7815 | if (!root->ref_cows) { | ||
7816 | path->lowest_level = lowest_level; | ||
7817 | ret = btrfs_search_slot(trans, root, first_key, path, 0, 1); | ||
7818 | BUG_ON(ret < 0); | ||
7819 | path->lowest_level = 0; | ||
7820 | btrfs_release_path(root, path); | ||
7821 | return 0; | ||
7822 | } | ||
7823 | |||
7824 | mutex_lock(&root->fs_info->tree_reloc_mutex); | ||
7825 | ret = init_reloc_tree(trans, root); | ||
7826 | BUG_ON(ret); | ||
7827 | reloc_root = root->reloc_root; | ||
7828 | |||
7829 | shared_level = ref_path->shared_level; | ||
7830 | ref_path->shared_level = BTRFS_MAX_LEVEL - 1; | ||
7831 | |||
7832 | keys = ref_path->node_keys; | ||
7833 | nodes = ref_path->new_nodes; | ||
7834 | memset(&keys[shared_level + 1], 0, | ||
7835 | sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1)); | ||
7836 | memset(&nodes[shared_level + 1], 0, | ||
7837 | sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1)); | ||
7838 | |||
7839 | if (nodes[lowest_level] == 0) { | ||
7840 | path->lowest_level = lowest_level; | ||
7841 | ret = btrfs_search_slot(trans, reloc_root, first_key, path, | ||
7842 | 0, 1); | ||
7843 | BUG_ON(ret); | ||
7844 | for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) { | ||
7845 | eb = path->nodes[level]; | ||
7846 | if (!eb || eb == reloc_root->node) | ||
7847 | break; | ||
7848 | nodes[level] = eb->start; | ||
7849 | if (level == 0) | ||
7850 | btrfs_item_key_to_cpu(eb, &keys[level], 0); | ||
7851 | else | ||
7852 | btrfs_node_key_to_cpu(eb, &keys[level], 0); | ||
7853 | } | ||
7854 | if (nodes[0] && | ||
7855 | ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { | ||
7856 | eb = path->nodes[0]; | ||
7857 | ret = replace_extents_in_leaf(trans, reloc_root, eb, | ||
7858 | group, reloc_inode); | ||
7859 | BUG_ON(ret); | ||
7860 | } | ||
7861 | btrfs_release_path(reloc_root, path); | ||
7862 | } else { | ||
7863 | ret = btrfs_merge_path(trans, reloc_root, keys, nodes, | ||
7864 | lowest_level); | ||
7865 | BUG_ON(ret); | ||
7866 | } | ||
7867 | |||
7868 | /* | ||
7869 | * replace tree blocks in the fs tree with tree blocks in | ||
7870 | * the reloc tree. | ||
7871 | */ | ||
7872 | ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level); | ||
7873 | BUG_ON(ret < 0); | ||
7874 | |||
7875 | if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { | ||
7876 | ret = btrfs_search_slot(trans, reloc_root, first_key, path, | ||
7877 | 0, 0); | ||
7878 | BUG_ON(ret); | ||
7879 | extent_buffer_get(path->nodes[0]); | ||
7880 | eb = path->nodes[0]; | ||
7881 | btrfs_release_path(reloc_root, path); | ||
7882 | ret = invalidate_extent_cache(reloc_root, eb, group, root); | ||
7883 | BUG_ON(ret); | ||
7884 | free_extent_buffer(eb); | ||
7885 | } | ||
7886 | |||
7887 | mutex_unlock(&root->fs_info->tree_reloc_mutex); | ||
7888 | path->lowest_level = 0; | ||
7889 | return 0; | ||
7890 | } | ||
7891 | |||
7892 | static noinline int relocate_tree_block(struct btrfs_trans_handle *trans, | ||
7893 | struct btrfs_root *root, | ||
7894 | struct btrfs_path *path, | ||
7895 | struct btrfs_key *first_key, | ||
7896 | struct btrfs_ref_path *ref_path) | ||
7897 | { | ||
7898 | int ret; | ||
7899 | |||
7900 | ret = relocate_one_path(trans, root, path, first_key, | ||
7901 | ref_path, NULL, NULL); | ||
7902 | BUG_ON(ret); | ||
7903 | |||
7904 | return 0; | ||
7905 | } | ||
7906 | |||
7907 | static noinline int del_extent_zero(struct btrfs_trans_handle *trans, | ||
7908 | struct btrfs_root *extent_root, | ||
7909 | struct btrfs_path *path, | ||
7910 | struct btrfs_key *extent_key) | ||
7911 | { | ||
7912 | int ret; | ||
7913 | |||
7914 | ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1); | ||
7915 | if (ret) | ||
7916 | goto out; | ||
7917 | ret = btrfs_del_item(trans, extent_root, path); | ||
7918 | out: | ||
7919 | btrfs_release_path(extent_root, path); | ||
7920 | return ret; | ||
7921 | } | ||
7922 | |||
7923 | static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info, | ||
7924 | struct btrfs_ref_path *ref_path) | ||
7925 | { | ||
7926 | struct btrfs_key root_key; | ||
7927 | |||
7928 | root_key.objectid = ref_path->root_objectid; | ||
7929 | root_key.type = BTRFS_ROOT_ITEM_KEY; | ||
7930 | if (is_cowonly_root(ref_path->root_objectid)) | ||
7931 | root_key.offset = 0; | ||
7932 | else | ||
7933 | root_key.offset = (u64)-1; | ||
7934 | |||
7935 | return btrfs_read_fs_root_no_name(fs_info, &root_key); | ||
7936 | } | ||
7937 | |||
7938 | static noinline int relocate_one_extent(struct btrfs_root *extent_root, | ||
7939 | struct btrfs_path *path, | ||
7940 | struct btrfs_key *extent_key, | ||
7941 | struct btrfs_block_group_cache *group, | ||
7942 | struct inode *reloc_inode, int pass) | ||
7943 | { | ||
7944 | struct btrfs_trans_handle *trans; | ||
7945 | struct btrfs_root *found_root; | ||
7946 | struct btrfs_ref_path *ref_path = NULL; | ||
7947 | struct disk_extent *new_extents = NULL; | ||
7948 | int nr_extents = 0; | ||
7949 | int loops; | ||
7950 | int ret; | ||
7951 | int level; | ||
7952 | struct btrfs_key first_key; | ||
7953 | u64 prev_block = 0; | ||
7954 | |||
7955 | |||
7956 | trans = btrfs_start_transaction(extent_root, 1); | ||
7957 | BUG_ON(IS_ERR(trans)); | ||
7958 | |||
7959 | if (extent_key->objectid == 0) { | ||
7960 | ret = del_extent_zero(trans, extent_root, path, extent_key); | ||
7961 | goto out; | ||
7962 | } | ||
7963 | |||
7964 | ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS); | ||
7965 | if (!ref_path) { | ||
7966 | ret = -ENOMEM; | ||
7967 | goto out; | ||
7968 | } | ||
7969 | |||
7970 | for (loops = 0; ; loops++) { | ||
7971 | if (loops == 0) { | ||
7972 | ret = btrfs_first_ref_path(trans, extent_root, ref_path, | ||
7973 | extent_key->objectid); | ||
7974 | } else { | ||
7975 | ret = btrfs_next_ref_path(trans, extent_root, ref_path); | ||
7976 | } | ||
7977 | if (ret < 0) | ||
7978 | goto out; | ||
7979 | if (ret > 0) | ||
7980 | break; | ||
7981 | |||
7982 | if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID || | ||
7983 | ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID) | ||
7984 | continue; | ||
7985 | |||
7986 | found_root = read_ref_root(extent_root->fs_info, ref_path); | ||
7987 | BUG_ON(!found_root); | ||
7988 | /* | ||
7989 | * for reference counted tree, only process reference paths | ||
7990 | * rooted at the latest committed root. | ||
7991 | */ | ||
7992 | if (found_root->ref_cows && | ||
7993 | ref_path->root_generation != found_root->root_key.offset) | ||
7994 | continue; | ||
7995 | |||
7996 | if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { | ||
7997 | if (pass == 0) { | ||
7998 | /* | ||
7999 | * copy data extents to new locations | ||
8000 | */ | ||
8001 | u64 group_start = group->key.objectid; | ||
8002 | ret = relocate_data_extent(reloc_inode, | ||
8003 | extent_key, | ||
8004 | group_start); | ||
8005 | if (ret < 0) | ||
8006 | goto out; | ||
8007 | break; | ||
8008 | } | ||
8009 | level = 0; | ||
8010 | } else { | ||
8011 | level = ref_path->owner_objectid; | ||
8012 | } | ||
8013 | |||
8014 | if (prev_block != ref_path->nodes[level]) { | ||
8015 | struct extent_buffer *eb; | ||
8016 | u64 block_start = ref_path->nodes[level]; | ||
8017 | u64 block_size = btrfs_level_size(found_root, level); | ||
8018 | |||
8019 | eb = read_tree_block(found_root, block_start, | ||
8020 | block_size, 0); | ||
8021 | if (!eb) { | ||
8022 | ret = -EIO; | ||
8023 | goto out; | ||
8024 | } | ||
8025 | btrfs_tree_lock(eb); | ||
8026 | BUG_ON(level != btrfs_header_level(eb)); | ||
8027 | |||
8028 | if (level == 0) | ||
8029 | btrfs_item_key_to_cpu(eb, &first_key, 0); | ||
8030 | else | ||
8031 | btrfs_node_key_to_cpu(eb, &first_key, 0); | ||
8032 | |||
8033 | btrfs_tree_unlock(eb); | ||
8034 | free_extent_buffer(eb); | ||
8035 | prev_block = block_start; | ||
8036 | } | ||
8037 | |||
8038 | mutex_lock(&extent_root->fs_info->trans_mutex); | ||
8039 | btrfs_record_root_in_trans(found_root); | ||
8040 | mutex_unlock(&extent_root->fs_info->trans_mutex); | ||
8041 | if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) { | ||
8042 | /* | ||
8043 | * try to update data extent references while | ||
8044 | * keeping metadata shared between snapshots. | ||
8045 | */ | ||
8046 | if (pass == 1) { | ||
8047 | ret = relocate_one_path(trans, found_root, | ||
8048 | path, &first_key, ref_path, | ||
8049 | group, reloc_inode); | ||
8050 | if (ret < 0) | ||
8051 | goto out; | ||
8052 | continue; | ||
8053 | } | ||
8054 | /* | ||
8055 | * use fallback method to process the remaining | ||
8056 | * references. | ||
8057 | */ | ||
8058 | if (!new_extents) { | ||
8059 | u64 group_start = group->key.objectid; | ||
8060 | new_extents = kmalloc(sizeof(*new_extents), | ||
8061 | GFP_NOFS); | ||
8062 | if (!new_extents) { | ||
8063 | ret = -ENOMEM; | ||
8064 | goto out; | ||
8065 | } | ||
8066 | nr_extents = 1; | ||
8067 | ret = get_new_locations(reloc_inode, | ||
8068 | extent_key, | ||
8069 | group_start, 1, | ||
8070 | &new_extents, | ||
8071 | &nr_extents); | ||
8072 | if (ret) | ||
8073 | goto out; | ||
8074 | } | ||
8075 | ret = replace_one_extent(trans, found_root, | ||
8076 | path, extent_key, | ||
8077 | &first_key, ref_path, | ||
8078 | new_extents, nr_extents); | ||
8079 | } else { | ||
8080 | ret = relocate_tree_block(trans, found_root, path, | ||
8081 | &first_key, ref_path); | ||
8082 | } | ||
8083 | if (ret < 0) | ||
8084 | goto out; | ||
8085 | } | ||
8086 | ret = 0; | ||
8087 | out: | ||
8088 | btrfs_end_transaction(trans, extent_root); | ||
8089 | kfree(new_extents); | ||
8090 | kfree(ref_path); | ||
8091 | return ret; | ||
8092 | } | ||
8093 | #endif | ||
8094 | |||
8095 | static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) | 6445 | static u64 update_block_group_flags(struct btrfs_root *root, u64 flags) |
8096 | { | 6446 | { |
8097 | u64 num_devices; | 6447 | u64 num_devices; |
@@ -8555,10 +6905,16 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
8555 | ret = -ENOMEM; | 6905 | ret = -ENOMEM; |
8556 | goto error; | 6906 | goto error; |
8557 | } | 6907 | } |
6908 | cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), | ||
6909 | GFP_NOFS); | ||
6910 | if (!cache->free_space_ctl) { | ||
6911 | kfree(cache); | ||
6912 | ret = -ENOMEM; | ||
6913 | goto error; | ||
6914 | } | ||
8558 | 6915 | ||
8559 | atomic_set(&cache->count, 1); | 6916 | atomic_set(&cache->count, 1); |
8560 | spin_lock_init(&cache->lock); | 6917 | spin_lock_init(&cache->lock); |
8561 | spin_lock_init(&cache->tree_lock); | ||
8562 | cache->fs_info = info; | 6918 | cache->fs_info = info; |
8563 | INIT_LIST_HEAD(&cache->list); | 6919 | INIT_LIST_HEAD(&cache->list); |
8564 | INIT_LIST_HEAD(&cache->cluster_list); | 6920 | INIT_LIST_HEAD(&cache->cluster_list); |
@@ -8566,24 +6922,18 @@ int btrfs_read_block_groups(struct btrfs_root *root) | |||
8566 | if (need_clear) | 6922 | if (need_clear) |
8567 | cache->disk_cache_state = BTRFS_DC_CLEAR; | 6923 | cache->disk_cache_state = BTRFS_DC_CLEAR; |
8568 | 6924 | ||
8569 | /* | ||
8570 | * we only want to have 32k of ram per block group for keeping | ||
8571 | * track of free space, and if we pass 1/2 of that we want to | ||
8572 | * start converting things over to using bitmaps | ||
8573 | */ | ||
8574 | cache->extents_thresh = ((1024 * 32) / 2) / | ||
8575 | sizeof(struct btrfs_free_space); | ||
8576 | |||
8577 | read_extent_buffer(leaf, &cache->item, | 6925 | read_extent_buffer(leaf, &cache->item, |
8578 | btrfs_item_ptr_offset(leaf, path->slots[0]), | 6926 | btrfs_item_ptr_offset(leaf, path->slots[0]), |
8579 | sizeof(cache->item)); | 6927 | sizeof(cache->item)); |
8580 | memcpy(&cache->key, &found_key, sizeof(found_key)); | 6928 | memcpy(&cache->key, &found_key, sizeof(found_key)); |
8581 | 6929 | ||
8582 | key.objectid = found_key.objectid + found_key.offset; | 6930 | key.objectid = found_key.objectid + found_key.offset; |
8583 | btrfs_release_path(root, path); | 6931 | btrfs_release_path(path); |
8584 | cache->flags = btrfs_block_group_flags(&cache->item); | 6932 | cache->flags = btrfs_block_group_flags(&cache->item); |
8585 | cache->sectorsize = root->sectorsize; | 6933 | cache->sectorsize = root->sectorsize; |
8586 | 6934 | ||
6935 | btrfs_init_free_space_ctl(cache); | ||
6936 | |||
8587 | /* | 6937 | /* |
8588 | * We need to exclude the super stripes now so that the space | 6938 | * We need to exclude the super stripes now so that the space |
8589 | * info has super bytes accounted for, otherwise we'll think | 6939 | * info has super bytes accounted for, otherwise we'll think |
@@ -8670,6 +7020,12 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
8670 | cache = kzalloc(sizeof(*cache), GFP_NOFS); | 7020 | cache = kzalloc(sizeof(*cache), GFP_NOFS); |
8671 | if (!cache) | 7021 | if (!cache) |
8672 | return -ENOMEM; | 7022 | return -ENOMEM; |
7023 | cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), | ||
7024 | GFP_NOFS); | ||
7025 | if (!cache->free_space_ctl) { | ||
7026 | kfree(cache); | ||
7027 | return -ENOMEM; | ||
7028 | } | ||
8673 | 7029 | ||
8674 | cache->key.objectid = chunk_offset; | 7030 | cache->key.objectid = chunk_offset; |
8675 | cache->key.offset = size; | 7031 | cache->key.offset = size; |
@@ -8677,19 +7033,13 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, | |||
8677 | cache->sectorsize = root->sectorsize; | 7033 | cache->sectorsize = root->sectorsize; |
8678 | cache->fs_info = root->fs_info; | 7034 | cache->fs_info = root->fs_info; |
8679 | 7035 | ||
8680 | /* | ||
8681 | * we only want to have 32k of ram per block group for keeping track | ||
8682 | * of free space, and if we pass 1/2 of that we want to start | ||
8683 | * converting things over to using bitmaps | ||
8684 | */ | ||
8685 | cache->extents_thresh = ((1024 * 32) / 2) / | ||
8686 | sizeof(struct btrfs_free_space); | ||
8687 | atomic_set(&cache->count, 1); | 7036 | atomic_set(&cache->count, 1); |
8688 | spin_lock_init(&cache->lock); | 7037 | spin_lock_init(&cache->lock); |
8689 | spin_lock_init(&cache->tree_lock); | ||
8690 | INIT_LIST_HEAD(&cache->list); | 7038 | INIT_LIST_HEAD(&cache->list); |
8691 | INIT_LIST_HEAD(&cache->cluster_list); | 7039 | INIT_LIST_HEAD(&cache->cluster_list); |
8692 | 7040 | ||
7041 | btrfs_init_free_space_ctl(cache); | ||
7042 | |||
8693 | btrfs_set_block_group_used(&cache->item, bytes_used); | 7043 | btrfs_set_block_group_used(&cache->item, bytes_used); |
8694 | btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); | 7044 | btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid); |
8695 | cache->flags = type; | 7045 | cache->flags = type; |
@@ -8802,12 +7152,12 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans, | |||
8802 | if (ret < 0) | 7152 | if (ret < 0) |
8803 | goto out; | 7153 | goto out; |
8804 | if (ret > 0) | 7154 | if (ret > 0) |
8805 | btrfs_release_path(tree_root, path); | 7155 | btrfs_release_path(path); |
8806 | if (ret == 0) { | 7156 | if (ret == 0) { |
8807 | ret = btrfs_del_item(trans, tree_root, path); | 7157 | ret = btrfs_del_item(trans, tree_root, path); |
8808 | if (ret) | 7158 | if (ret) |
8809 | goto out; | 7159 | goto out; |
8810 | btrfs_release_path(tree_root, path); | 7160 | btrfs_release_path(path); |
8811 | } | 7161 | } |
8812 | 7162 | ||
8813 | spin_lock(&root->fs_info->block_group_cache_lock); | 7163 | spin_lock(&root->fs_info->block_group_cache_lock); |
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c index 96fcfa522dab..c5d9fbb92bc3 100644 --- a/fs/btrfs/extent_io.c +++ b/fs/btrfs/extent_io.c | |||
@@ -11,6 +11,7 @@ | |||
11 | #include <linux/writeback.h> | 11 | #include <linux/writeback.h> |
12 | #include <linux/pagevec.h> | 12 | #include <linux/pagevec.h> |
13 | #include <linux/prefetch.h> | 13 | #include <linux/prefetch.h> |
14 | #include <linux/cleancache.h> | ||
14 | #include "extent_io.h" | 15 | #include "extent_io.h" |
15 | #include "extent_map.h" | 16 | #include "extent_map.h" |
16 | #include "compat.h" | 17 | #include "compat.h" |
@@ -102,7 +103,7 @@ void extent_io_exit(void) | |||
102 | } | 103 | } |
103 | 104 | ||
104 | void extent_io_tree_init(struct extent_io_tree *tree, | 105 | void extent_io_tree_init(struct extent_io_tree *tree, |
105 | struct address_space *mapping, gfp_t mask) | 106 | struct address_space *mapping) |
106 | { | 107 | { |
107 | tree->state = RB_ROOT; | 108 | tree->state = RB_ROOT; |
108 | INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC); | 109 | INIT_RADIX_TREE(&tree->buffer, GFP_ATOMIC); |
@@ -440,6 +441,15 @@ static int clear_state_bit(struct extent_io_tree *tree, | |||
440 | return ret; | 441 | return ret; |
441 | } | 442 | } |
442 | 443 | ||
444 | static struct extent_state * | ||
445 | alloc_extent_state_atomic(struct extent_state *prealloc) | ||
446 | { | ||
447 | if (!prealloc) | ||
448 | prealloc = alloc_extent_state(GFP_ATOMIC); | ||
449 | |||
450 | return prealloc; | ||
451 | } | ||
452 | |||
443 | /* | 453 | /* |
444 | * clear some bits on a range in the tree. This may require splitting | 454 | * clear some bits on a range in the tree. This may require splitting |
445 | * or inserting elements in the tree, so the gfp mask is used to | 455 | * or inserting elements in the tree, so the gfp mask is used to |
@@ -530,8 +540,8 @@ hit_next: | |||
530 | */ | 540 | */ |
531 | 541 | ||
532 | if (state->start < start) { | 542 | if (state->start < start) { |
533 | if (!prealloc) | 543 | prealloc = alloc_extent_state_atomic(prealloc); |
534 | prealloc = alloc_extent_state(GFP_ATOMIC); | 544 | BUG_ON(!prealloc); |
535 | err = split_state(tree, state, prealloc, start); | 545 | err = split_state(tree, state, prealloc, start); |
536 | BUG_ON(err == -EEXIST); | 546 | BUG_ON(err == -EEXIST); |
537 | prealloc = NULL; | 547 | prealloc = NULL; |
@@ -552,8 +562,8 @@ hit_next: | |||
552 | * on the first half | 562 | * on the first half |
553 | */ | 563 | */ |
554 | if (state->start <= end && state->end > end) { | 564 | if (state->start <= end && state->end > end) { |
555 | if (!prealloc) | 565 | prealloc = alloc_extent_state_atomic(prealloc); |
556 | prealloc = alloc_extent_state(GFP_ATOMIC); | 566 | BUG_ON(!prealloc); |
557 | err = split_state(tree, state, prealloc, end + 1); | 567 | err = split_state(tree, state, prealloc, end + 1); |
558 | BUG_ON(err == -EEXIST); | 568 | BUG_ON(err == -EEXIST); |
559 | if (wake) | 569 | if (wake) |
@@ -726,8 +736,7 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, | |||
726 | again: | 736 | again: |
727 | if (!prealloc && (mask & __GFP_WAIT)) { | 737 | if (!prealloc && (mask & __GFP_WAIT)) { |
728 | prealloc = alloc_extent_state(mask); | 738 | prealloc = alloc_extent_state(mask); |
729 | if (!prealloc) | 739 | BUG_ON(!prealloc); |
730 | return -ENOMEM; | ||
731 | } | 740 | } |
732 | 741 | ||
733 | spin_lock(&tree->lock); | 742 | spin_lock(&tree->lock); |
@@ -744,6 +753,8 @@ again: | |||
744 | */ | 753 | */ |
745 | node = tree_search(tree, start); | 754 | node = tree_search(tree, start); |
746 | if (!node) { | 755 | if (!node) { |
756 | prealloc = alloc_extent_state_atomic(prealloc); | ||
757 | BUG_ON(!prealloc); | ||
747 | err = insert_state(tree, prealloc, start, end, &bits); | 758 | err = insert_state(tree, prealloc, start, end, &bits); |
748 | prealloc = NULL; | 759 | prealloc = NULL; |
749 | BUG_ON(err == -EEXIST); | 760 | BUG_ON(err == -EEXIST); |
@@ -772,20 +783,18 @@ hit_next: | |||
772 | if (err) | 783 | if (err) |
773 | goto out; | 784 | goto out; |
774 | 785 | ||
786 | next_node = rb_next(node); | ||
775 | cache_state(state, cached_state); | 787 | cache_state(state, cached_state); |
776 | merge_state(tree, state); | 788 | merge_state(tree, state); |
777 | if (last_end == (u64)-1) | 789 | if (last_end == (u64)-1) |
778 | goto out; | 790 | goto out; |
779 | 791 | ||
780 | start = last_end + 1; | 792 | start = last_end + 1; |
781 | if (start < end && prealloc && !need_resched()) { | 793 | if (next_node && start < end && prealloc && !need_resched()) { |
782 | next_node = rb_next(node); | 794 | state = rb_entry(next_node, struct extent_state, |
783 | if (next_node) { | 795 | rb_node); |
784 | state = rb_entry(next_node, struct extent_state, | 796 | if (state->start == start) |
785 | rb_node); | 797 | goto hit_next; |
786 | if (state->start == start) | ||
787 | goto hit_next; | ||
788 | } | ||
789 | } | 798 | } |
790 | goto search_again; | 799 | goto search_again; |
791 | } | 800 | } |
@@ -812,6 +821,9 @@ hit_next: | |||
812 | err = -EEXIST; | 821 | err = -EEXIST; |
813 | goto out; | 822 | goto out; |
814 | } | 823 | } |
824 | |||
825 | prealloc = alloc_extent_state_atomic(prealloc); | ||
826 | BUG_ON(!prealloc); | ||
815 | err = split_state(tree, state, prealloc, start); | 827 | err = split_state(tree, state, prealloc, start); |
816 | BUG_ON(err == -EEXIST); | 828 | BUG_ON(err == -EEXIST); |
817 | prealloc = NULL; | 829 | prealloc = NULL; |
@@ -842,14 +854,25 @@ hit_next: | |||
842 | this_end = end; | 854 | this_end = end; |
843 | else | 855 | else |
844 | this_end = last_start - 1; | 856 | this_end = last_start - 1; |
857 | |||
858 | prealloc = alloc_extent_state_atomic(prealloc); | ||
859 | BUG_ON(!prealloc); | ||
860 | |||
861 | /* | ||
862 | * Avoid to free 'prealloc' if it can be merged with | ||
863 | * the later extent. | ||
864 | */ | ||
865 | atomic_inc(&prealloc->refs); | ||
845 | err = insert_state(tree, prealloc, start, this_end, | 866 | err = insert_state(tree, prealloc, start, this_end, |
846 | &bits); | 867 | &bits); |
847 | BUG_ON(err == -EEXIST); | 868 | BUG_ON(err == -EEXIST); |
848 | if (err) { | 869 | if (err) { |
870 | free_extent_state(prealloc); | ||
849 | prealloc = NULL; | 871 | prealloc = NULL; |
850 | goto out; | 872 | goto out; |
851 | } | 873 | } |
852 | cache_state(prealloc, cached_state); | 874 | cache_state(prealloc, cached_state); |
875 | free_extent_state(prealloc); | ||
853 | prealloc = NULL; | 876 | prealloc = NULL; |
854 | start = this_end + 1; | 877 | start = this_end + 1; |
855 | goto search_again; | 878 | goto search_again; |
@@ -866,6 +889,9 @@ hit_next: | |||
866 | err = -EEXIST; | 889 | err = -EEXIST; |
867 | goto out; | 890 | goto out; |
868 | } | 891 | } |
892 | |||
893 | prealloc = alloc_extent_state_atomic(prealloc); | ||
894 | BUG_ON(!prealloc); | ||
869 | err = split_state(tree, state, prealloc, end + 1); | 895 | err = split_state(tree, state, prealloc, end + 1); |
870 | BUG_ON(err == -EEXIST); | 896 | BUG_ON(err == -EEXIST); |
871 | 897 | ||
@@ -942,13 +968,6 @@ int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, | |||
942 | NULL, mask); | 968 | NULL, mask); |
943 | } | 969 | } |
944 | 970 | ||
945 | static int clear_extent_new(struct extent_io_tree *tree, u64 start, u64 end, | ||
946 | gfp_t mask) | ||
947 | { | ||
948 | return clear_extent_bit(tree, start, end, EXTENT_NEW, 0, 0, | ||
949 | NULL, mask); | ||
950 | } | ||
951 | |||
952 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, | 971 | int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, |
953 | struct extent_state **cached_state, gfp_t mask) | 972 | struct extent_state **cached_state, gfp_t mask) |
954 | { | 973 | { |
@@ -964,11 +983,6 @@ static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, | |||
964 | cached_state, mask); | 983 | cached_state, mask); |
965 | } | 984 | } |
966 | 985 | ||
967 | int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end) | ||
968 | { | ||
969 | return wait_extent_bit(tree, start, end, EXTENT_WRITEBACK); | ||
970 | } | ||
971 | |||
972 | /* | 986 | /* |
973 | * either insert or lock state struct between start and end use mask to tell | 987 | * either insert or lock state struct between start and end use mask to tell |
974 | * us if waiting is desired. | 988 | * us if waiting is desired. |
@@ -1029,25 +1043,6 @@ int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end, gfp_t mask) | |||
1029 | } | 1043 | } |
1030 | 1044 | ||
1031 | /* | 1045 | /* |
1032 | * helper function to set pages and extents in the tree dirty | ||
1033 | */ | ||
1034 | int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end) | ||
1035 | { | ||
1036 | unsigned long index = start >> PAGE_CACHE_SHIFT; | ||
1037 | unsigned long end_index = end >> PAGE_CACHE_SHIFT; | ||
1038 | struct page *page; | ||
1039 | |||
1040 | while (index <= end_index) { | ||
1041 | page = find_get_page(tree->mapping, index); | ||
1042 | BUG_ON(!page); | ||
1043 | __set_page_dirty_nobuffers(page); | ||
1044 | page_cache_release(page); | ||
1045 | index++; | ||
1046 | } | ||
1047 | return 0; | ||
1048 | } | ||
1049 | |||
1050 | /* | ||
1051 | * helper function to set both pages and extents in the tree writeback | 1046 | * helper function to set both pages and extents in the tree writeback |
1052 | */ | 1047 | */ |
1053 | static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) | 1048 | static int set_range_writeback(struct extent_io_tree *tree, u64 start, u64 end) |
@@ -1820,46 +1815,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err) | |||
1820 | bio_put(bio); | 1815 | bio_put(bio); |
1821 | } | 1816 | } |
1822 | 1817 | ||
1823 | /* | ||
1824 | * IO done from prepare_write is pretty simple, we just unlock | ||
1825 | * the structs in the extent tree when done, and set the uptodate bits | ||
1826 | * as appropriate. | ||
1827 | */ | ||
1828 | static void end_bio_extent_preparewrite(struct bio *bio, int err) | ||
1829 | { | ||
1830 | const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); | ||
1831 | struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; | ||
1832 | struct extent_io_tree *tree; | ||
1833 | u64 start; | ||
1834 | u64 end; | ||
1835 | |||
1836 | do { | ||
1837 | struct page *page = bvec->bv_page; | ||
1838 | struct extent_state *cached = NULL; | ||
1839 | tree = &BTRFS_I(page->mapping->host)->io_tree; | ||
1840 | |||
1841 | start = ((u64)page->index << PAGE_CACHE_SHIFT) + | ||
1842 | bvec->bv_offset; | ||
1843 | end = start + bvec->bv_len - 1; | ||
1844 | |||
1845 | if (--bvec >= bio->bi_io_vec) | ||
1846 | prefetchw(&bvec->bv_page->flags); | ||
1847 | |||
1848 | if (uptodate) { | ||
1849 | set_extent_uptodate(tree, start, end, &cached, | ||
1850 | GFP_ATOMIC); | ||
1851 | } else { | ||
1852 | ClearPageUptodate(page); | ||
1853 | SetPageError(page); | ||
1854 | } | ||
1855 | |||
1856 | unlock_extent_cached(tree, start, end, &cached, GFP_ATOMIC); | ||
1857 | |||
1858 | } while (bvec >= bio->bi_io_vec); | ||
1859 | |||
1860 | bio_put(bio); | ||
1861 | } | ||
1862 | |||
1863 | struct bio * | 1818 | struct bio * |
1864 | btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, | 1819 | btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs, |
1865 | gfp_t gfp_flags) | 1820 | gfp_t gfp_flags) |
@@ -2008,7 +1963,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2008 | struct btrfs_ordered_extent *ordered; | 1963 | struct btrfs_ordered_extent *ordered; |
2009 | int ret; | 1964 | int ret; |
2010 | int nr = 0; | 1965 | int nr = 0; |
2011 | size_t page_offset = 0; | 1966 | size_t pg_offset = 0; |
2012 | size_t iosize; | 1967 | size_t iosize; |
2013 | size_t disk_io_size; | 1968 | size_t disk_io_size; |
2014 | size_t blocksize = inode->i_sb->s_blocksize; | 1969 | size_t blocksize = inode->i_sb->s_blocksize; |
@@ -2016,6 +1971,13 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2016 | 1971 | ||
2017 | set_page_extent_mapped(page); | 1972 | set_page_extent_mapped(page); |
2018 | 1973 | ||
1974 | if (!PageUptodate(page)) { | ||
1975 | if (cleancache_get_page(page) == 0) { | ||
1976 | BUG_ON(blocksize != PAGE_SIZE); | ||
1977 | goto out; | ||
1978 | } | ||
1979 | } | ||
1980 | |||
2019 | end = page_end; | 1981 | end = page_end; |
2020 | while (1) { | 1982 | while (1) { |
2021 | lock_extent(tree, start, end, GFP_NOFS); | 1983 | lock_extent(tree, start, end, GFP_NOFS); |
@@ -2044,9 +2006,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2044 | char *userpage; | 2006 | char *userpage; |
2045 | struct extent_state *cached = NULL; | 2007 | struct extent_state *cached = NULL; |
2046 | 2008 | ||
2047 | iosize = PAGE_CACHE_SIZE - page_offset; | 2009 | iosize = PAGE_CACHE_SIZE - pg_offset; |
2048 | userpage = kmap_atomic(page, KM_USER0); | 2010 | userpage = kmap_atomic(page, KM_USER0); |
2049 | memset(userpage + page_offset, 0, iosize); | 2011 | memset(userpage + pg_offset, 0, iosize); |
2050 | flush_dcache_page(page); | 2012 | flush_dcache_page(page); |
2051 | kunmap_atomic(userpage, KM_USER0); | 2013 | kunmap_atomic(userpage, KM_USER0); |
2052 | set_extent_uptodate(tree, cur, cur + iosize - 1, | 2014 | set_extent_uptodate(tree, cur, cur + iosize - 1, |
@@ -2055,9 +2017,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2055 | &cached, GFP_NOFS); | 2017 | &cached, GFP_NOFS); |
2056 | break; | 2018 | break; |
2057 | } | 2019 | } |
2058 | em = get_extent(inode, page, page_offset, cur, | 2020 | em = get_extent(inode, page, pg_offset, cur, |
2059 | end - cur + 1, 0); | 2021 | end - cur + 1, 0); |
2060 | if (IS_ERR(em) || !em) { | 2022 | if (IS_ERR_OR_NULL(em)) { |
2061 | SetPageError(page); | 2023 | SetPageError(page); |
2062 | unlock_extent(tree, cur, end, GFP_NOFS); | 2024 | unlock_extent(tree, cur, end, GFP_NOFS); |
2063 | break; | 2025 | break; |
@@ -2095,7 +2057,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2095 | struct extent_state *cached = NULL; | 2057 | struct extent_state *cached = NULL; |
2096 | 2058 | ||
2097 | userpage = kmap_atomic(page, KM_USER0); | 2059 | userpage = kmap_atomic(page, KM_USER0); |
2098 | memset(userpage + page_offset, 0, iosize); | 2060 | memset(userpage + pg_offset, 0, iosize); |
2099 | flush_dcache_page(page); | 2061 | flush_dcache_page(page); |
2100 | kunmap_atomic(userpage, KM_USER0); | 2062 | kunmap_atomic(userpage, KM_USER0); |
2101 | 2063 | ||
@@ -2104,7 +2066,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2104 | unlock_extent_cached(tree, cur, cur + iosize - 1, | 2066 | unlock_extent_cached(tree, cur, cur + iosize - 1, |
2105 | &cached, GFP_NOFS); | 2067 | &cached, GFP_NOFS); |
2106 | cur = cur + iosize; | 2068 | cur = cur + iosize; |
2107 | page_offset += iosize; | 2069 | pg_offset += iosize; |
2108 | continue; | 2070 | continue; |
2109 | } | 2071 | } |
2110 | /* the get_extent function already copied into the page */ | 2072 | /* the get_extent function already copied into the page */ |
@@ -2113,7 +2075,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2113 | check_page_uptodate(tree, page); | 2075 | check_page_uptodate(tree, page); |
2114 | unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); | 2076 | unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); |
2115 | cur = cur + iosize; | 2077 | cur = cur + iosize; |
2116 | page_offset += iosize; | 2078 | pg_offset += iosize; |
2117 | continue; | 2079 | continue; |
2118 | } | 2080 | } |
2119 | /* we have an inline extent but it didn't get marked up | 2081 | /* we have an inline extent but it didn't get marked up |
@@ -2123,7 +2085,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2123 | SetPageError(page); | 2085 | SetPageError(page); |
2124 | unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); | 2086 | unlock_extent(tree, cur, cur + iosize - 1, GFP_NOFS); |
2125 | cur = cur + iosize; | 2087 | cur = cur + iosize; |
2126 | page_offset += iosize; | 2088 | pg_offset += iosize; |
2127 | continue; | 2089 | continue; |
2128 | } | 2090 | } |
2129 | 2091 | ||
@@ -2136,7 +2098,7 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2136 | unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; | 2098 | unsigned long pnr = (last_byte >> PAGE_CACHE_SHIFT) + 1; |
2137 | pnr -= page->index; | 2099 | pnr -= page->index; |
2138 | ret = submit_extent_page(READ, tree, page, | 2100 | ret = submit_extent_page(READ, tree, page, |
2139 | sector, disk_io_size, page_offset, | 2101 | sector, disk_io_size, pg_offset, |
2140 | bdev, bio, pnr, | 2102 | bdev, bio, pnr, |
2141 | end_bio_extent_readpage, mirror_num, | 2103 | end_bio_extent_readpage, mirror_num, |
2142 | *bio_flags, | 2104 | *bio_flags, |
@@ -2147,8 +2109,9 @@ static int __extent_read_full_page(struct extent_io_tree *tree, | |||
2147 | if (ret) | 2109 | if (ret) |
2148 | SetPageError(page); | 2110 | SetPageError(page); |
2149 | cur = cur + iosize; | 2111 | cur = cur + iosize; |
2150 | page_offset += iosize; | 2112 | pg_offset += iosize; |
2151 | } | 2113 | } |
2114 | out: | ||
2152 | if (!nr) { | 2115 | if (!nr) { |
2153 | if (!PageError(page)) | 2116 | if (!PageError(page)) |
2154 | SetPageUptodate(page); | 2117 | SetPageUptodate(page); |
@@ -2342,7 +2305,7 @@ static int __extent_writepage(struct page *page, struct writeback_control *wbc, | |||
2342 | } | 2305 | } |
2343 | em = epd->get_extent(inode, page, pg_offset, cur, | 2306 | em = epd->get_extent(inode, page, pg_offset, cur, |
2344 | end - cur + 1, 1); | 2307 | end - cur + 1, 1); |
2345 | if (IS_ERR(em) || !em) { | 2308 | if (IS_ERR_OR_NULL(em)) { |
2346 | SetPageError(page); | 2309 | SetPageError(page); |
2347 | break; | 2310 | break; |
2348 | } | 2311 | } |
@@ -2721,128 +2684,6 @@ int extent_invalidatepage(struct extent_io_tree *tree, | |||
2721 | } | 2684 | } |
2722 | 2685 | ||
2723 | /* | 2686 | /* |
2724 | * simple commit_write call, set_range_dirty is used to mark both | ||
2725 | * the pages and the extent records as dirty | ||
2726 | */ | ||
2727 | int extent_commit_write(struct extent_io_tree *tree, | ||
2728 | struct inode *inode, struct page *page, | ||
2729 | unsigned from, unsigned to) | ||
2730 | { | ||
2731 | loff_t pos = ((loff_t)page->index << PAGE_CACHE_SHIFT) + to; | ||
2732 | |||
2733 | set_page_extent_mapped(page); | ||
2734 | set_page_dirty(page); | ||
2735 | |||
2736 | if (pos > inode->i_size) { | ||
2737 | i_size_write(inode, pos); | ||
2738 | mark_inode_dirty(inode); | ||
2739 | } | ||
2740 | return 0; | ||
2741 | } | ||
2742 | |||
2743 | int extent_prepare_write(struct extent_io_tree *tree, | ||
2744 | struct inode *inode, struct page *page, | ||
2745 | unsigned from, unsigned to, get_extent_t *get_extent) | ||
2746 | { | ||
2747 | u64 page_start = (u64)page->index << PAGE_CACHE_SHIFT; | ||
2748 | u64 page_end = page_start + PAGE_CACHE_SIZE - 1; | ||
2749 | u64 block_start; | ||
2750 | u64 orig_block_start; | ||
2751 | u64 block_end; | ||
2752 | u64 cur_end; | ||
2753 | struct extent_map *em; | ||
2754 | unsigned blocksize = 1 << inode->i_blkbits; | ||
2755 | size_t page_offset = 0; | ||
2756 | size_t block_off_start; | ||
2757 | size_t block_off_end; | ||
2758 | int err = 0; | ||
2759 | int iocount = 0; | ||
2760 | int ret = 0; | ||
2761 | int isnew; | ||
2762 | |||
2763 | set_page_extent_mapped(page); | ||
2764 | |||
2765 | block_start = (page_start + from) & ~((u64)blocksize - 1); | ||
2766 | block_end = (page_start + to - 1) | (blocksize - 1); | ||
2767 | orig_block_start = block_start; | ||
2768 | |||
2769 | lock_extent(tree, page_start, page_end, GFP_NOFS); | ||
2770 | while (block_start <= block_end) { | ||
2771 | em = get_extent(inode, page, page_offset, block_start, | ||
2772 | block_end - block_start + 1, 1); | ||
2773 | if (IS_ERR(em) || !em) | ||
2774 | goto err; | ||
2775 | |||
2776 | cur_end = min(block_end, extent_map_end(em) - 1); | ||
2777 | block_off_start = block_start & (PAGE_CACHE_SIZE - 1); | ||
2778 | block_off_end = block_off_start + blocksize; | ||
2779 | isnew = clear_extent_new(tree, block_start, cur_end, GFP_NOFS); | ||
2780 | |||
2781 | if (!PageUptodate(page) && isnew && | ||
2782 | (block_off_end > to || block_off_start < from)) { | ||
2783 | void *kaddr; | ||
2784 | |||
2785 | kaddr = kmap_atomic(page, KM_USER0); | ||
2786 | if (block_off_end > to) | ||
2787 | memset(kaddr + to, 0, block_off_end - to); | ||
2788 | if (block_off_start < from) | ||
2789 | memset(kaddr + block_off_start, 0, | ||
2790 | from - block_off_start); | ||
2791 | flush_dcache_page(page); | ||
2792 | kunmap_atomic(kaddr, KM_USER0); | ||
2793 | } | ||
2794 | if ((em->block_start != EXTENT_MAP_HOLE && | ||
2795 | em->block_start != EXTENT_MAP_INLINE) && | ||
2796 | !isnew && !PageUptodate(page) && | ||
2797 | (block_off_end > to || block_off_start < from) && | ||
2798 | !test_range_bit(tree, block_start, cur_end, | ||
2799 | EXTENT_UPTODATE, 1, NULL)) { | ||
2800 | u64 sector; | ||
2801 | u64 extent_offset = block_start - em->start; | ||
2802 | size_t iosize; | ||
2803 | sector = (em->block_start + extent_offset) >> 9; | ||
2804 | iosize = (cur_end - block_start + blocksize) & | ||
2805 | ~((u64)blocksize - 1); | ||
2806 | /* | ||
2807 | * we've already got the extent locked, but we | ||
2808 | * need to split the state such that our end_bio | ||
2809 | * handler can clear the lock. | ||
2810 | */ | ||
2811 | set_extent_bit(tree, block_start, | ||
2812 | block_start + iosize - 1, | ||
2813 | EXTENT_LOCKED, 0, NULL, NULL, GFP_NOFS); | ||
2814 | ret = submit_extent_page(READ, tree, page, | ||
2815 | sector, iosize, page_offset, em->bdev, | ||
2816 | NULL, 1, | ||
2817 | end_bio_extent_preparewrite, 0, | ||
2818 | 0, 0); | ||
2819 | if (ret && !err) | ||
2820 | err = ret; | ||
2821 | iocount++; | ||
2822 | block_start = block_start + iosize; | ||
2823 | } else { | ||
2824 | struct extent_state *cached = NULL; | ||
2825 | |||
2826 | set_extent_uptodate(tree, block_start, cur_end, &cached, | ||
2827 | GFP_NOFS); | ||
2828 | unlock_extent_cached(tree, block_start, cur_end, | ||
2829 | &cached, GFP_NOFS); | ||
2830 | block_start = cur_end + 1; | ||
2831 | } | ||
2832 | page_offset = block_start & (PAGE_CACHE_SIZE - 1); | ||
2833 | free_extent_map(em); | ||
2834 | } | ||
2835 | if (iocount) { | ||
2836 | wait_extent_bit(tree, orig_block_start, | ||
2837 | block_end, EXTENT_LOCKED); | ||
2838 | } | ||
2839 | check_page_uptodate(tree, page); | ||
2840 | err: | ||
2841 | /* FIXME, zero out newly allocated blocks on error */ | ||
2842 | return err; | ||
2843 | } | ||
2844 | |||
2845 | /* | ||
2846 | * a helper for releasepage, this tests for areas of the page that | 2687 | * a helper for releasepage, this tests for areas of the page that |
2847 | * are locked or under IO and drops the related state bits if it is safe | 2688 | * are locked or under IO and drops the related state bits if it is safe |
2848 | * to drop the page. | 2689 | * to drop the page. |
@@ -2900,7 +2741,7 @@ int try_release_extent_mapping(struct extent_map_tree *map, | |||
2900 | len = end - start + 1; | 2741 | len = end - start + 1; |
2901 | write_lock(&map->lock); | 2742 | write_lock(&map->lock); |
2902 | em = lookup_extent_mapping(map, start, len); | 2743 | em = lookup_extent_mapping(map, start, len); |
2903 | if (!em || IS_ERR(em)) { | 2744 | if (IS_ERR_OR_NULL(em)) { |
2904 | write_unlock(&map->lock); | 2745 | write_unlock(&map->lock); |
2905 | break; | 2746 | break; |
2906 | } | 2747 | } |
@@ -2928,33 +2769,6 @@ int try_release_extent_mapping(struct extent_map_tree *map, | |||
2928 | return try_release_extent_state(map, tree, page, mask); | 2769 | return try_release_extent_state(map, tree, page, mask); |
2929 | } | 2770 | } |
2930 | 2771 | ||
2931 | sector_t extent_bmap(struct address_space *mapping, sector_t iblock, | ||
2932 | get_extent_t *get_extent) | ||
2933 | { | ||
2934 | struct inode *inode = mapping->host; | ||
2935 | struct extent_state *cached_state = NULL; | ||
2936 | u64 start = iblock << inode->i_blkbits; | ||
2937 | sector_t sector = 0; | ||
2938 | size_t blksize = (1 << inode->i_blkbits); | ||
2939 | struct extent_map *em; | ||
2940 | |||
2941 | lock_extent_bits(&BTRFS_I(inode)->io_tree, start, start + blksize - 1, | ||
2942 | 0, &cached_state, GFP_NOFS); | ||
2943 | em = get_extent(inode, NULL, 0, start, blksize, 0); | ||
2944 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, start, | ||
2945 | start + blksize - 1, &cached_state, GFP_NOFS); | ||
2946 | if (!em || IS_ERR(em)) | ||
2947 | return 0; | ||
2948 | |||
2949 | if (em->block_start > EXTENT_MAP_LAST_BYTE) | ||
2950 | goto out; | ||
2951 | |||
2952 | sector = (em->block_start + start - em->start) >> inode->i_blkbits; | ||
2953 | out: | ||
2954 | free_extent_map(em); | ||
2955 | return sector; | ||
2956 | } | ||
2957 | |||
2958 | /* | 2772 | /* |
2959 | * helper function for fiemap, which doesn't want to see any holes. | 2773 | * helper function for fiemap, which doesn't want to see any holes. |
2960 | * This maps until we find something past 'last' | 2774 | * This maps until we find something past 'last' |
@@ -2977,7 +2791,7 @@ static struct extent_map *get_extent_skip_holes(struct inode *inode, | |||
2977 | break; | 2791 | break; |
2978 | len = (len + sectorsize - 1) & ~(sectorsize - 1); | 2792 | len = (len + sectorsize - 1) & ~(sectorsize - 1); |
2979 | em = get_extent(inode, NULL, 0, offset, len, 0); | 2793 | em = get_extent(inode, NULL, 0, offset, len, 0); |
2980 | if (!em || IS_ERR(em)) | 2794 | if (IS_ERR_OR_NULL(em)) |
2981 | return em; | 2795 | return em; |
2982 | 2796 | ||
2983 | /* if this isn't a hole return it */ | 2797 | /* if this isn't a hole return it */ |
@@ -3031,7 +2845,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
3031 | * because there might be preallocation past i_size | 2845 | * because there might be preallocation past i_size |
3032 | */ | 2846 | */ |
3033 | ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, | 2847 | ret = btrfs_lookup_file_extent(NULL, BTRFS_I(inode)->root, |
3034 | path, inode->i_ino, -1, 0); | 2848 | path, btrfs_ino(inode), -1, 0); |
3035 | if (ret < 0) { | 2849 | if (ret < 0) { |
3036 | btrfs_free_path(path); | 2850 | btrfs_free_path(path); |
3037 | return ret; | 2851 | return ret; |
@@ -3044,7 +2858,7 @@ int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
3044 | found_type = btrfs_key_type(&found_key); | 2858 | found_type = btrfs_key_type(&found_key); |
3045 | 2859 | ||
3046 | /* No extents, but there might be delalloc bits */ | 2860 | /* No extents, but there might be delalloc bits */ |
3047 | if (found_key.objectid != inode->i_ino || | 2861 | if (found_key.objectid != btrfs_ino(inode) || |
3048 | found_type != BTRFS_EXTENT_DATA_KEY) { | 2862 | found_type != BTRFS_EXTENT_DATA_KEY) { |
3049 | /* have to trust i_size as the end */ | 2863 | /* have to trust i_size as the end */ |
3050 | last = (u64)-1; | 2864 | last = (u64)-1; |
@@ -3267,8 +3081,7 @@ static inline void btrfs_release_extent_buffer(struct extent_buffer *eb) | |||
3267 | 3081 | ||
3268 | struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | 3082 | struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, |
3269 | u64 start, unsigned long len, | 3083 | u64 start, unsigned long len, |
3270 | struct page *page0, | 3084 | struct page *page0) |
3271 | gfp_t mask) | ||
3272 | { | 3085 | { |
3273 | unsigned long num_pages = num_extent_pages(start, len); | 3086 | unsigned long num_pages = num_extent_pages(start, len); |
3274 | unsigned long i; | 3087 | unsigned long i; |
@@ -3289,7 +3102,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
3289 | } | 3102 | } |
3290 | rcu_read_unlock(); | 3103 | rcu_read_unlock(); |
3291 | 3104 | ||
3292 | eb = __alloc_extent_buffer(tree, start, len, mask); | 3105 | eb = __alloc_extent_buffer(tree, start, len, GFP_NOFS); |
3293 | if (!eb) | 3106 | if (!eb) |
3294 | return NULL; | 3107 | return NULL; |
3295 | 3108 | ||
@@ -3306,7 +3119,7 @@ struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | |||
3306 | i = 0; | 3119 | i = 0; |
3307 | } | 3120 | } |
3308 | for (; i < num_pages; i++, index++) { | 3121 | for (; i < num_pages; i++, index++) { |
3309 | p = find_or_create_page(mapping, index, mask | __GFP_HIGHMEM); | 3122 | p = find_or_create_page(mapping, index, GFP_NOFS | __GFP_HIGHMEM); |
3310 | if (!p) { | 3123 | if (!p) { |
3311 | WARN_ON(1); | 3124 | WARN_ON(1); |
3312 | goto free_eb; | 3125 | goto free_eb; |
@@ -3378,8 +3191,7 @@ free_eb: | |||
3378 | } | 3191 | } |
3379 | 3192 | ||
3380 | struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, | 3193 | struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, |
3381 | u64 start, unsigned long len, | 3194 | u64 start, unsigned long len) |
3382 | gfp_t mask) | ||
3383 | { | 3195 | { |
3384 | struct extent_buffer *eb; | 3196 | struct extent_buffer *eb; |
3385 | 3197 | ||
@@ -3440,13 +3252,6 @@ int clear_extent_buffer_dirty(struct extent_io_tree *tree, | |||
3440 | return 0; | 3252 | return 0; |
3441 | } | 3253 | } |
3442 | 3254 | ||
3443 | int wait_on_extent_buffer_writeback(struct extent_io_tree *tree, | ||
3444 | struct extent_buffer *eb) | ||
3445 | { | ||
3446 | return wait_on_extent_writeback(tree, eb->start, | ||
3447 | eb->start + eb->len - 1); | ||
3448 | } | ||
3449 | |||
3450 | int set_extent_buffer_dirty(struct extent_io_tree *tree, | 3255 | int set_extent_buffer_dirty(struct extent_io_tree *tree, |
3451 | struct extent_buffer *eb) | 3256 | struct extent_buffer *eb) |
3452 | { | 3257 | { |
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h index af2d7179c372..4e8445a4757c 100644 --- a/fs/btrfs/extent_io.h +++ b/fs/btrfs/extent_io.h | |||
@@ -153,23 +153,14 @@ static inline int extent_compress_type(unsigned long bio_flags) | |||
153 | 153 | ||
154 | struct extent_map_tree; | 154 | struct extent_map_tree; |
155 | 155 | ||
156 | static inline struct extent_state *extent_state_next(struct extent_state *state) | ||
157 | { | ||
158 | struct rb_node *node; | ||
159 | node = rb_next(&state->rb_node); | ||
160 | if (!node) | ||
161 | return NULL; | ||
162 | return rb_entry(node, struct extent_state, rb_node); | ||
163 | } | ||
164 | |||
165 | typedef struct extent_map *(get_extent_t)(struct inode *inode, | 156 | typedef struct extent_map *(get_extent_t)(struct inode *inode, |
166 | struct page *page, | 157 | struct page *page, |
167 | size_t page_offset, | 158 | size_t pg_offset, |
168 | u64 start, u64 len, | 159 | u64 start, u64 len, |
169 | int create); | 160 | int create); |
170 | 161 | ||
171 | void extent_io_tree_init(struct extent_io_tree *tree, | 162 | void extent_io_tree_init(struct extent_io_tree *tree, |
172 | struct address_space *mapping, gfp_t mask); | 163 | struct address_space *mapping); |
173 | int try_release_extent_mapping(struct extent_map_tree *map, | 164 | int try_release_extent_mapping(struct extent_map_tree *map, |
174 | struct extent_io_tree *tree, struct page *page, | 165 | struct extent_io_tree *tree, struct page *page, |
175 | gfp_t mask); | 166 | gfp_t mask); |
@@ -215,14 +206,8 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, | |||
215 | gfp_t mask); | 206 | gfp_t mask); |
216 | int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, | 207 | int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, |
217 | gfp_t mask); | 208 | gfp_t mask); |
218 | int clear_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, | ||
219 | gfp_t mask); | ||
220 | int clear_extent_ordered_metadata(struct extent_io_tree *tree, u64 start, | ||
221 | u64 end, gfp_t mask); | ||
222 | int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, | 209 | int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, |
223 | struct extent_state **cached_state, gfp_t mask); | 210 | struct extent_state **cached_state, gfp_t mask); |
224 | int set_extent_ordered(struct extent_io_tree *tree, u64 start, u64 end, | ||
225 | gfp_t mask); | ||
226 | int find_first_extent_bit(struct extent_io_tree *tree, u64 start, | 211 | int find_first_extent_bit(struct extent_io_tree *tree, u64 start, |
227 | u64 *start_ret, u64 *end_ret, int bits); | 212 | u64 *start_ret, u64 *end_ret, int bits); |
228 | struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, | 213 | struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, |
@@ -243,28 +228,17 @@ int extent_readpages(struct extent_io_tree *tree, | |||
243 | struct address_space *mapping, | 228 | struct address_space *mapping, |
244 | struct list_head *pages, unsigned nr_pages, | 229 | struct list_head *pages, unsigned nr_pages, |
245 | get_extent_t get_extent); | 230 | get_extent_t get_extent); |
246 | int extent_prepare_write(struct extent_io_tree *tree, | ||
247 | struct inode *inode, struct page *page, | ||
248 | unsigned from, unsigned to, get_extent_t *get_extent); | ||
249 | int extent_commit_write(struct extent_io_tree *tree, | ||
250 | struct inode *inode, struct page *page, | ||
251 | unsigned from, unsigned to); | ||
252 | sector_t extent_bmap(struct address_space *mapping, sector_t iblock, | ||
253 | get_extent_t *get_extent); | ||
254 | int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 231 | int extent_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
255 | __u64 start, __u64 len, get_extent_t *get_extent); | 232 | __u64 start, __u64 len, get_extent_t *get_extent); |
256 | int set_range_dirty(struct extent_io_tree *tree, u64 start, u64 end); | ||
257 | int set_state_private(struct extent_io_tree *tree, u64 start, u64 private); | 233 | int set_state_private(struct extent_io_tree *tree, u64 start, u64 private); |
258 | int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private); | 234 | int get_state_private(struct extent_io_tree *tree, u64 start, u64 *private); |
259 | void set_page_extent_mapped(struct page *page); | 235 | void set_page_extent_mapped(struct page *page); |
260 | 236 | ||
261 | struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, | 237 | struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, |
262 | u64 start, unsigned long len, | 238 | u64 start, unsigned long len, |
263 | struct page *page0, | 239 | struct page *page0); |
264 | gfp_t mask); | ||
265 | struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, | 240 | struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, |
266 | u64 start, unsigned long len, | 241 | u64 start, unsigned long len); |
267 | gfp_t mask); | ||
268 | void free_extent_buffer(struct extent_buffer *eb); | 242 | void free_extent_buffer(struct extent_buffer *eb); |
269 | int read_extent_buffer_pages(struct extent_io_tree *tree, | 243 | int read_extent_buffer_pages(struct extent_io_tree *tree, |
270 | struct extent_buffer *eb, u64 start, int wait, | 244 | struct extent_buffer *eb, u64 start, int wait, |
@@ -292,16 +266,11 @@ void memmove_extent_buffer(struct extent_buffer *dst, unsigned long dst_offset, | |||
292 | unsigned long src_offset, unsigned long len); | 266 | unsigned long src_offset, unsigned long len); |
293 | void memset_extent_buffer(struct extent_buffer *eb, char c, | 267 | void memset_extent_buffer(struct extent_buffer *eb, char c, |
294 | unsigned long start, unsigned long len); | 268 | unsigned long start, unsigned long len); |
295 | int wait_on_extent_buffer_writeback(struct extent_io_tree *tree, | ||
296 | struct extent_buffer *eb); | ||
297 | int wait_on_extent_writeback(struct extent_io_tree *tree, u64 start, u64 end); | ||
298 | int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); | 269 | int wait_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int bits); |
299 | int clear_extent_buffer_dirty(struct extent_io_tree *tree, | 270 | int clear_extent_buffer_dirty(struct extent_io_tree *tree, |
300 | struct extent_buffer *eb); | 271 | struct extent_buffer *eb); |
301 | int set_extent_buffer_dirty(struct extent_io_tree *tree, | 272 | int set_extent_buffer_dirty(struct extent_io_tree *tree, |
302 | struct extent_buffer *eb); | 273 | struct extent_buffer *eb); |
303 | int test_extent_buffer_dirty(struct extent_io_tree *tree, | ||
304 | struct extent_buffer *eb); | ||
305 | int set_extent_buffer_uptodate(struct extent_io_tree *tree, | 274 | int set_extent_buffer_uptodate(struct extent_io_tree *tree, |
306 | struct extent_buffer *eb); | 275 | struct extent_buffer *eb); |
307 | int clear_extent_buffer_uptodate(struct extent_io_tree *tree, | 276 | int clear_extent_buffer_uptodate(struct extent_io_tree *tree, |
@@ -319,7 +288,6 @@ int map_private_extent_buffer(struct extent_buffer *eb, unsigned long offset, | |||
319 | unsigned long *map_start, | 288 | unsigned long *map_start, |
320 | unsigned long *map_len, int km); | 289 | unsigned long *map_len, int km); |
321 | void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km); | 290 | void unmap_extent_buffer(struct extent_buffer *eb, char *token, int km); |
322 | int release_extent_buffer_tail_pages(struct extent_buffer *eb); | ||
323 | int extent_range_uptodate(struct extent_io_tree *tree, | 291 | int extent_range_uptodate(struct extent_io_tree *tree, |
324 | u64 start, u64 end); | 292 | u64 start, u64 end); |
325 | int extent_clear_unlock_delalloc(struct inode *inode, | 293 | int extent_clear_unlock_delalloc(struct inode *inode, |
diff --git a/fs/btrfs/extent_map.c b/fs/btrfs/extent_map.c index a24a3f2fa13e..2d0410344ea3 100644 --- a/fs/btrfs/extent_map.c +++ b/fs/btrfs/extent_map.c | |||
@@ -28,12 +28,11 @@ void extent_map_exit(void) | |||
28 | /** | 28 | /** |
29 | * extent_map_tree_init - initialize extent map tree | 29 | * extent_map_tree_init - initialize extent map tree |
30 | * @tree: tree to initialize | 30 | * @tree: tree to initialize |
31 | * @mask: flags for memory allocations during tree operations | ||
32 | * | 31 | * |
33 | * Initialize the extent tree @tree. Should be called for each new inode | 32 | * Initialize the extent tree @tree. Should be called for each new inode |
34 | * or other user of the extent_map interface. | 33 | * or other user of the extent_map interface. |
35 | */ | 34 | */ |
36 | void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) | 35 | void extent_map_tree_init(struct extent_map_tree *tree) |
37 | { | 36 | { |
38 | tree->map = RB_ROOT; | 37 | tree->map = RB_ROOT; |
39 | rwlock_init(&tree->lock); | 38 | rwlock_init(&tree->lock); |
@@ -41,16 +40,15 @@ void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask) | |||
41 | 40 | ||
42 | /** | 41 | /** |
43 | * alloc_extent_map - allocate new extent map structure | 42 | * alloc_extent_map - allocate new extent map structure |
44 | * @mask: memory allocation flags | ||
45 | * | 43 | * |
46 | * Allocate a new extent_map structure. The new structure is | 44 | * Allocate a new extent_map structure. The new structure is |
47 | * returned with a reference count of one and needs to be | 45 | * returned with a reference count of one and needs to be |
48 | * freed using free_extent_map() | 46 | * freed using free_extent_map() |
49 | */ | 47 | */ |
50 | struct extent_map *alloc_extent_map(gfp_t mask) | 48 | struct extent_map *alloc_extent_map(void) |
51 | { | 49 | { |
52 | struct extent_map *em; | 50 | struct extent_map *em; |
53 | em = kmem_cache_alloc(extent_map_cache, mask); | 51 | em = kmem_cache_alloc(extent_map_cache, GFP_NOFS); |
54 | if (!em) | 52 | if (!em) |
55 | return NULL; | 53 | return NULL; |
56 | em->in_tree = 0; | 54 | em->in_tree = 0; |
diff --git a/fs/btrfs/extent_map.h b/fs/btrfs/extent_map.h index 28b44dbd1e35..33a7890b1f40 100644 --- a/fs/btrfs/extent_map.h +++ b/fs/btrfs/extent_map.h | |||
@@ -49,14 +49,14 @@ static inline u64 extent_map_block_end(struct extent_map *em) | |||
49 | return em->block_start + em->block_len; | 49 | return em->block_start + em->block_len; |
50 | } | 50 | } |
51 | 51 | ||
52 | void extent_map_tree_init(struct extent_map_tree *tree, gfp_t mask); | 52 | void extent_map_tree_init(struct extent_map_tree *tree); |
53 | struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, | 53 | struct extent_map *lookup_extent_mapping(struct extent_map_tree *tree, |
54 | u64 start, u64 len); | 54 | u64 start, u64 len); |
55 | int add_extent_mapping(struct extent_map_tree *tree, | 55 | int add_extent_mapping(struct extent_map_tree *tree, |
56 | struct extent_map *em); | 56 | struct extent_map *em); |
57 | int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em); | 57 | int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em); |
58 | 58 | ||
59 | struct extent_map *alloc_extent_map(gfp_t mask); | 59 | struct extent_map *alloc_extent_map(void); |
60 | void free_extent_map(struct extent_map *em); | 60 | void free_extent_map(struct extent_map *em); |
61 | int __init extent_map_init(void); | 61 | int __init extent_map_init(void); |
62 | void extent_map_exit(void); | 62 | void extent_map_exit(void); |
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c index a6a9d4e8b491..90d4ee52cd45 100644 --- a/fs/btrfs/file-item.c +++ b/fs/btrfs/file-item.c | |||
@@ -193,7 +193,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, | |||
193 | u32 item_size; | 193 | u32 item_size; |
194 | 194 | ||
195 | if (item) | 195 | if (item) |
196 | btrfs_release_path(root, path); | 196 | btrfs_release_path(path); |
197 | item = btrfs_lookup_csum(NULL, root->fs_info->csum_root, | 197 | item = btrfs_lookup_csum(NULL, root->fs_info->csum_root, |
198 | path, disk_bytenr, 0); | 198 | path, disk_bytenr, 0); |
199 | if (IS_ERR(item)) { | 199 | if (IS_ERR(item)) { |
@@ -208,12 +208,13 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root, | |||
208 | EXTENT_NODATASUM, GFP_NOFS); | 208 | EXTENT_NODATASUM, GFP_NOFS); |
209 | } else { | 209 | } else { |
210 | printk(KERN_INFO "btrfs no csum found " | 210 | printk(KERN_INFO "btrfs no csum found " |
211 | "for inode %lu start %llu\n", | 211 | "for inode %llu start %llu\n", |
212 | inode->i_ino, | 212 | (unsigned long long) |
213 | btrfs_ino(inode), | ||
213 | (unsigned long long)offset); | 214 | (unsigned long long)offset); |
214 | } | 215 | } |
215 | item = NULL; | 216 | item = NULL; |
216 | btrfs_release_path(root, path); | 217 | btrfs_release_path(path); |
217 | goto found; | 218 | goto found; |
218 | } | 219 | } |
219 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | 220 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, |
@@ -266,7 +267,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode, | |||
266 | } | 267 | } |
267 | 268 | ||
268 | int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, | 269 | int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, |
269 | struct list_head *list) | 270 | struct list_head *list, int search_commit) |
270 | { | 271 | { |
271 | struct btrfs_key key; | 272 | struct btrfs_key key; |
272 | struct btrfs_path *path; | 273 | struct btrfs_path *path; |
@@ -283,6 +284,12 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end, | |||
283 | path = btrfs_alloc_path(); | 284 | path = btrfs_alloc_path(); |
284 | BUG_ON(!path); | 285 | BUG_ON(!path); |
285 | 286 | ||
287 | if (search_commit) { | ||
288 | path->skip_locking = 1; | ||
289 | path->reada = 2; | ||
290 | path->search_commit_root = 1; | ||
291 | } | ||
292 | |||
286 | key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; | 293 | key.objectid = BTRFS_EXTENT_CSUM_OBJECTID; |
287 | key.offset = start; | 294 | key.offset = start; |
288 | key.type = BTRFS_EXTENT_CSUM_KEY; | 295 | key.type = BTRFS_EXTENT_CSUM_KEY; |
@@ -495,7 +502,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, | |||
495 | u32 new_size = (bytenr - key->offset) >> blocksize_bits; | 502 | u32 new_size = (bytenr - key->offset) >> blocksize_bits; |
496 | new_size *= csum_size; | 503 | new_size *= csum_size; |
497 | ret = btrfs_truncate_item(trans, root, path, new_size, 1); | 504 | ret = btrfs_truncate_item(trans, root, path, new_size, 1); |
498 | BUG_ON(ret); | ||
499 | } else if (key->offset >= bytenr && csum_end > end_byte && | 505 | } else if (key->offset >= bytenr && csum_end > end_byte && |
500 | end_byte > key->offset) { | 506 | end_byte > key->offset) { |
501 | /* | 507 | /* |
@@ -508,7 +514,6 @@ static noinline int truncate_one_csum(struct btrfs_trans_handle *trans, | |||
508 | new_size *= csum_size; | 514 | new_size *= csum_size; |
509 | 515 | ||
510 | ret = btrfs_truncate_item(trans, root, path, new_size, 0); | 516 | ret = btrfs_truncate_item(trans, root, path, new_size, 0); |
511 | BUG_ON(ret); | ||
512 | 517 | ||
513 | key->offset = end_byte; | 518 | key->offset = end_byte; |
514 | ret = btrfs_set_item_key_safe(trans, root, path, key); | 519 | ret = btrfs_set_item_key_safe(trans, root, path, key); |
@@ -551,10 +556,10 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, | |||
551 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | 556 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); |
552 | if (ret > 0) { | 557 | if (ret > 0) { |
553 | if (path->slots[0] == 0) | 558 | if (path->slots[0] == 0) |
554 | goto out; | 559 | break; |
555 | path->slots[0]--; | 560 | path->slots[0]--; |
556 | } else if (ret < 0) { | 561 | } else if (ret < 0) { |
557 | goto out; | 562 | break; |
558 | } | 563 | } |
559 | 564 | ||
560 | leaf = path->nodes[0]; | 565 | leaf = path->nodes[0]; |
@@ -579,7 +584,8 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, | |||
579 | /* delete the entire item, it is inside our range */ | 584 | /* delete the entire item, it is inside our range */ |
580 | if (key.offset >= bytenr && csum_end <= end_byte) { | 585 | if (key.offset >= bytenr && csum_end <= end_byte) { |
581 | ret = btrfs_del_item(trans, root, path); | 586 | ret = btrfs_del_item(trans, root, path); |
582 | BUG_ON(ret); | 587 | if (ret) |
588 | goto out; | ||
583 | if (key.offset == bytenr) | 589 | if (key.offset == bytenr) |
584 | break; | 590 | break; |
585 | } else if (key.offset < bytenr && csum_end > end_byte) { | 591 | } else if (key.offset < bytenr && csum_end > end_byte) { |
@@ -631,11 +637,12 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans, | |||
631 | if (key.offset < bytenr) | 637 | if (key.offset < bytenr) |
632 | break; | 638 | break; |
633 | } | 639 | } |
634 | btrfs_release_path(root, path); | 640 | btrfs_release_path(path); |
635 | } | 641 | } |
642 | ret = 0; | ||
636 | out: | 643 | out: |
637 | btrfs_free_path(path); | 644 | btrfs_free_path(path); |
638 | return 0; | 645 | return ret; |
639 | } | 646 | } |
640 | 647 | ||
641 | int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, | 648 | int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans, |
@@ -722,7 +729,7 @@ again: | |||
722 | * at this point, we know the tree has an item, but it isn't big | 729 | * at this point, we know the tree has an item, but it isn't big |
723 | * enough yet to put our csum in. Grow it | 730 | * enough yet to put our csum in. Grow it |
724 | */ | 731 | */ |
725 | btrfs_release_path(root, path); | 732 | btrfs_release_path(path); |
726 | ret = btrfs_search_slot(trans, root, &file_key, path, | 733 | ret = btrfs_search_slot(trans, root, &file_key, path, |
727 | csum_size, 1); | 734 | csum_size, 1); |
728 | if (ret < 0) | 735 | if (ret < 0) |
@@ -761,12 +768,11 @@ again: | |||
761 | goto insert; | 768 | goto insert; |
762 | 769 | ||
763 | ret = btrfs_extend_item(trans, root, path, diff); | 770 | ret = btrfs_extend_item(trans, root, path, diff); |
764 | BUG_ON(ret); | ||
765 | goto csum; | 771 | goto csum; |
766 | } | 772 | } |
767 | 773 | ||
768 | insert: | 774 | insert: |
769 | btrfs_release_path(root, path); | 775 | btrfs_release_path(path); |
770 | csum_offset = 0; | 776 | csum_offset = 0; |
771 | if (found_next) { | 777 | if (found_next) { |
772 | u64 tmp = total_bytes + root->sectorsize; | 778 | u64 tmp = total_bytes + root->sectorsize; |
@@ -850,7 +856,7 @@ next_sector: | |||
850 | } | 856 | } |
851 | btrfs_mark_buffer_dirty(path->nodes[0]); | 857 | btrfs_mark_buffer_dirty(path->nodes[0]); |
852 | if (total_bytes < sums->len) { | 858 | if (total_bytes < sums->len) { |
853 | btrfs_release_path(root, path); | 859 | btrfs_release_path(path); |
854 | cond_resched(); | 860 | cond_resched(); |
855 | goto again; | 861 | goto again; |
856 | } | 862 | } |
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c index 75899a01dded..c6a22d783c35 100644 --- a/fs/btrfs/file.c +++ b/fs/btrfs/file.c | |||
@@ -40,6 +40,263 @@ | |||
40 | #include "locking.h" | 40 | #include "locking.h" |
41 | #include "compat.h" | 41 | #include "compat.h" |
42 | 42 | ||
43 | /* | ||
44 | * when auto defrag is enabled we | ||
45 | * queue up these defrag structs to remember which | ||
46 | * inodes need defragging passes | ||
47 | */ | ||
48 | struct inode_defrag { | ||
49 | struct rb_node rb_node; | ||
50 | /* objectid */ | ||
51 | u64 ino; | ||
52 | /* | ||
53 | * transid where the defrag was added, we search for | ||
54 | * extents newer than this | ||
55 | */ | ||
56 | u64 transid; | ||
57 | |||
58 | /* root objectid */ | ||
59 | u64 root; | ||
60 | |||
61 | /* last offset we were able to defrag */ | ||
62 | u64 last_offset; | ||
63 | |||
64 | /* if we've wrapped around back to zero once already */ | ||
65 | int cycled; | ||
66 | }; | ||
67 | |||
68 | /* pop a record for an inode into the defrag tree. The lock | ||
69 | * must be held already | ||
70 | * | ||
71 | * If you're inserting a record for an older transid than an | ||
72 | * existing record, the transid already in the tree is lowered | ||
73 | * | ||
74 | * If an existing record is found the defrag item you | ||
75 | * pass in is freed | ||
76 | */ | ||
77 | static int __btrfs_add_inode_defrag(struct inode *inode, | ||
78 | struct inode_defrag *defrag) | ||
79 | { | ||
80 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
81 | struct inode_defrag *entry; | ||
82 | struct rb_node **p; | ||
83 | struct rb_node *parent = NULL; | ||
84 | |||
85 | p = &root->fs_info->defrag_inodes.rb_node; | ||
86 | while (*p) { | ||
87 | parent = *p; | ||
88 | entry = rb_entry(parent, struct inode_defrag, rb_node); | ||
89 | |||
90 | if (defrag->ino < entry->ino) | ||
91 | p = &parent->rb_left; | ||
92 | else if (defrag->ino > entry->ino) | ||
93 | p = &parent->rb_right; | ||
94 | else { | ||
95 | /* if we're reinserting an entry for | ||
96 | * an old defrag run, make sure to | ||
97 | * lower the transid of our existing record | ||
98 | */ | ||
99 | if (defrag->transid < entry->transid) | ||
100 | entry->transid = defrag->transid; | ||
101 | if (defrag->last_offset > entry->last_offset) | ||
102 | entry->last_offset = defrag->last_offset; | ||
103 | goto exists; | ||
104 | } | ||
105 | } | ||
106 | BTRFS_I(inode)->in_defrag = 1; | ||
107 | rb_link_node(&defrag->rb_node, parent, p); | ||
108 | rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes); | ||
109 | return 0; | ||
110 | |||
111 | exists: | ||
112 | kfree(defrag); | ||
113 | return 0; | ||
114 | |||
115 | } | ||
116 | |||
117 | /* | ||
118 | * insert a defrag record for this inode if auto defrag is | ||
119 | * enabled | ||
120 | */ | ||
121 | int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, | ||
122 | struct inode *inode) | ||
123 | { | ||
124 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
125 | struct inode_defrag *defrag; | ||
126 | int ret = 0; | ||
127 | u64 transid; | ||
128 | |||
129 | if (!btrfs_test_opt(root, AUTO_DEFRAG)) | ||
130 | return 0; | ||
131 | |||
132 | if (root->fs_info->closing) | ||
133 | return 0; | ||
134 | |||
135 | if (BTRFS_I(inode)->in_defrag) | ||
136 | return 0; | ||
137 | |||
138 | if (trans) | ||
139 | transid = trans->transid; | ||
140 | else | ||
141 | transid = BTRFS_I(inode)->root->last_trans; | ||
142 | |||
143 | defrag = kzalloc(sizeof(*defrag), GFP_NOFS); | ||
144 | if (!defrag) | ||
145 | return -ENOMEM; | ||
146 | |||
147 | defrag->ino = inode->i_ino; | ||
148 | defrag->transid = transid; | ||
149 | defrag->root = root->root_key.objectid; | ||
150 | |||
151 | spin_lock(&root->fs_info->defrag_inodes_lock); | ||
152 | if (!BTRFS_I(inode)->in_defrag) | ||
153 | ret = __btrfs_add_inode_defrag(inode, defrag); | ||
154 | spin_unlock(&root->fs_info->defrag_inodes_lock); | ||
155 | return ret; | ||
156 | } | ||
157 | |||
158 | /* | ||
159 | * must be called with the defrag_inodes lock held | ||
160 | */ | ||
161 | struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino, | ||
162 | struct rb_node **next) | ||
163 | { | ||
164 | struct inode_defrag *entry = NULL; | ||
165 | struct rb_node *p; | ||
166 | struct rb_node *parent = NULL; | ||
167 | |||
168 | p = info->defrag_inodes.rb_node; | ||
169 | while (p) { | ||
170 | parent = p; | ||
171 | entry = rb_entry(parent, struct inode_defrag, rb_node); | ||
172 | |||
173 | if (ino < entry->ino) | ||
174 | p = parent->rb_left; | ||
175 | else if (ino > entry->ino) | ||
176 | p = parent->rb_right; | ||
177 | else | ||
178 | return entry; | ||
179 | } | ||
180 | |||
181 | if (next) { | ||
182 | while (parent && ino > entry->ino) { | ||
183 | parent = rb_next(parent); | ||
184 | entry = rb_entry(parent, struct inode_defrag, rb_node); | ||
185 | } | ||
186 | *next = parent; | ||
187 | } | ||
188 | return NULL; | ||
189 | } | ||
190 | |||
191 | /* | ||
192 | * run through the list of inodes in the FS that need | ||
193 | * defragging | ||
194 | */ | ||
195 | int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info) | ||
196 | { | ||
197 | struct inode_defrag *defrag; | ||
198 | struct btrfs_root *inode_root; | ||
199 | struct inode *inode; | ||
200 | struct rb_node *n; | ||
201 | struct btrfs_key key; | ||
202 | struct btrfs_ioctl_defrag_range_args range; | ||
203 | u64 first_ino = 0; | ||
204 | int num_defrag; | ||
205 | int defrag_batch = 1024; | ||
206 | |||
207 | memset(&range, 0, sizeof(range)); | ||
208 | range.len = (u64)-1; | ||
209 | |||
210 | atomic_inc(&fs_info->defrag_running); | ||
211 | spin_lock(&fs_info->defrag_inodes_lock); | ||
212 | while(1) { | ||
213 | n = NULL; | ||
214 | |||
215 | /* find an inode to defrag */ | ||
216 | defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n); | ||
217 | if (!defrag) { | ||
218 | if (n) | ||
219 | defrag = rb_entry(n, struct inode_defrag, rb_node); | ||
220 | else if (first_ino) { | ||
221 | first_ino = 0; | ||
222 | continue; | ||
223 | } else { | ||
224 | break; | ||
225 | } | ||
226 | } | ||
227 | |||
228 | /* remove it from the rbtree */ | ||
229 | first_ino = defrag->ino + 1; | ||
230 | rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); | ||
231 | |||
232 | if (fs_info->closing) | ||
233 | goto next_free; | ||
234 | |||
235 | spin_unlock(&fs_info->defrag_inodes_lock); | ||
236 | |||
237 | /* get the inode */ | ||
238 | key.objectid = defrag->root; | ||
239 | btrfs_set_key_type(&key, BTRFS_ROOT_ITEM_KEY); | ||
240 | key.offset = (u64)-1; | ||
241 | inode_root = btrfs_read_fs_root_no_name(fs_info, &key); | ||
242 | if (IS_ERR(inode_root)) | ||
243 | goto next; | ||
244 | |||
245 | key.objectid = defrag->ino; | ||
246 | btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); | ||
247 | key.offset = 0; | ||
248 | |||
249 | inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); | ||
250 | if (IS_ERR(inode)) | ||
251 | goto next; | ||
252 | |||
253 | /* do a chunk of defrag */ | ||
254 | BTRFS_I(inode)->in_defrag = 0; | ||
255 | range.start = defrag->last_offset; | ||
256 | num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, | ||
257 | defrag_batch); | ||
258 | /* | ||
259 | * if we filled the whole defrag batch, there | ||
260 | * must be more work to do. Queue this defrag | ||
261 | * again | ||
262 | */ | ||
263 | if (num_defrag == defrag_batch) { | ||
264 | defrag->last_offset = range.start; | ||
265 | __btrfs_add_inode_defrag(inode, defrag); | ||
266 | /* | ||
267 | * we don't want to kfree defrag, we added it back to | ||
268 | * the rbtree | ||
269 | */ | ||
270 | defrag = NULL; | ||
271 | } else if (defrag->last_offset && !defrag->cycled) { | ||
272 | /* | ||
273 | * we didn't fill our defrag batch, but | ||
274 | * we didn't start at zero. Make sure we loop | ||
275 | * around to the start of the file. | ||
276 | */ | ||
277 | defrag->last_offset = 0; | ||
278 | defrag->cycled = 1; | ||
279 | __btrfs_add_inode_defrag(inode, defrag); | ||
280 | defrag = NULL; | ||
281 | } | ||
282 | |||
283 | iput(inode); | ||
284 | next: | ||
285 | spin_lock(&fs_info->defrag_inodes_lock); | ||
286 | next_free: | ||
287 | kfree(defrag); | ||
288 | } | ||
289 | spin_unlock(&fs_info->defrag_inodes_lock); | ||
290 | |||
291 | atomic_dec(&fs_info->defrag_running); | ||
292 | |||
293 | /* | ||
294 | * during unmount, we use the transaction_wait queue to | ||
295 | * wait for the defragger to stop | ||
296 | */ | ||
297 | wake_up(&fs_info->transaction_wait); | ||
298 | return 0; | ||
299 | } | ||
43 | 300 | ||
44 | /* simple helper to fault in pages and copy. This should go away | 301 | /* simple helper to fault in pages and copy. This should go away |
45 | * and be replaced with calls into generic code. | 302 | * and be replaced with calls into generic code. |
@@ -191,9 +448,9 @@ int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, | |||
191 | } | 448 | } |
192 | while (1) { | 449 | while (1) { |
193 | if (!split) | 450 | if (!split) |
194 | split = alloc_extent_map(GFP_NOFS); | 451 | split = alloc_extent_map(); |
195 | if (!split2) | 452 | if (!split2) |
196 | split2 = alloc_extent_map(GFP_NOFS); | 453 | split2 = alloc_extent_map(); |
197 | BUG_ON(!split || !split2); | 454 | BUG_ON(!split || !split2); |
198 | 455 | ||
199 | write_lock(&em_tree->lock); | 456 | write_lock(&em_tree->lock); |
@@ -298,6 +555,7 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, | |||
298 | struct btrfs_path *path; | 555 | struct btrfs_path *path; |
299 | struct btrfs_key key; | 556 | struct btrfs_key key; |
300 | struct btrfs_key new_key; | 557 | struct btrfs_key new_key; |
558 | u64 ino = btrfs_ino(inode); | ||
301 | u64 search_start = start; | 559 | u64 search_start = start; |
302 | u64 disk_bytenr = 0; | 560 | u64 disk_bytenr = 0; |
303 | u64 num_bytes = 0; | 561 | u64 num_bytes = 0; |
@@ -318,14 +576,14 @@ int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, | |||
318 | 576 | ||
319 | while (1) { | 577 | while (1) { |
320 | recow = 0; | 578 | recow = 0; |
321 | ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, | 579 | ret = btrfs_lookup_file_extent(trans, root, path, ino, |
322 | search_start, -1); | 580 | search_start, -1); |
323 | if (ret < 0) | 581 | if (ret < 0) |
324 | break; | 582 | break; |
325 | if (ret > 0 && path->slots[0] > 0 && search_start == start) { | 583 | if (ret > 0 && path->slots[0] > 0 && search_start == start) { |
326 | leaf = path->nodes[0]; | 584 | leaf = path->nodes[0]; |
327 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); | 585 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1); |
328 | if (key.objectid == inode->i_ino && | 586 | if (key.objectid == ino && |
329 | key.type == BTRFS_EXTENT_DATA_KEY) | 587 | key.type == BTRFS_EXTENT_DATA_KEY) |
330 | path->slots[0]--; | 588 | path->slots[0]--; |
331 | } | 589 | } |
@@ -346,7 +604,7 @@ next_slot: | |||
346 | } | 604 | } |
347 | 605 | ||
348 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | 606 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); |
349 | if (key.objectid > inode->i_ino || | 607 | if (key.objectid > ino || |
350 | key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) | 608 | key.type > BTRFS_EXTENT_DATA_KEY || key.offset >= end) |
351 | break; | 609 | break; |
352 | 610 | ||
@@ -376,7 +634,7 @@ next_slot: | |||
376 | 634 | ||
377 | search_start = max(key.offset, start); | 635 | search_start = max(key.offset, start); |
378 | if (recow) { | 636 | if (recow) { |
379 | btrfs_release_path(root, path); | 637 | btrfs_release_path(path); |
380 | continue; | 638 | continue; |
381 | } | 639 | } |
382 | 640 | ||
@@ -393,7 +651,7 @@ next_slot: | |||
393 | ret = btrfs_duplicate_item(trans, root, path, | 651 | ret = btrfs_duplicate_item(trans, root, path, |
394 | &new_key); | 652 | &new_key); |
395 | if (ret == -EAGAIN) { | 653 | if (ret == -EAGAIN) { |
396 | btrfs_release_path(root, path); | 654 | btrfs_release_path(path); |
397 | continue; | 655 | continue; |
398 | } | 656 | } |
399 | if (ret < 0) | 657 | if (ret < 0) |
@@ -516,7 +774,7 @@ next_slot: | |||
516 | del_nr = 0; | 774 | del_nr = 0; |
517 | del_slot = 0; | 775 | del_slot = 0; |
518 | 776 | ||
519 | btrfs_release_path(root, path); | 777 | btrfs_release_path(path); |
520 | continue; | 778 | continue; |
521 | } | 779 | } |
522 | 780 | ||
@@ -592,6 +850,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, | |||
592 | int del_slot = 0; | 850 | int del_slot = 0; |
593 | int recow; | 851 | int recow; |
594 | int ret; | 852 | int ret; |
853 | u64 ino = btrfs_ino(inode); | ||
595 | 854 | ||
596 | btrfs_drop_extent_cache(inode, start, end - 1, 0); | 855 | btrfs_drop_extent_cache(inode, start, end - 1, 0); |
597 | 856 | ||
@@ -600,7 +859,7 @@ int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, | |||
600 | again: | 859 | again: |
601 | recow = 0; | 860 | recow = 0; |
602 | split = start; | 861 | split = start; |
603 | key.objectid = inode->i_ino; | 862 | key.objectid = ino; |
604 | key.type = BTRFS_EXTENT_DATA_KEY; | 863 | key.type = BTRFS_EXTENT_DATA_KEY; |
605 | key.offset = split; | 864 | key.offset = split; |
606 | 865 | ||
@@ -612,8 +871,7 @@ again: | |||
612 | 871 | ||
613 | leaf = path->nodes[0]; | 872 | leaf = path->nodes[0]; |
614 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | 873 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); |
615 | BUG_ON(key.objectid != inode->i_ino || | 874 | BUG_ON(key.objectid != ino || key.type != BTRFS_EXTENT_DATA_KEY); |
616 | key.type != BTRFS_EXTENT_DATA_KEY); | ||
617 | fi = btrfs_item_ptr(leaf, path->slots[0], | 875 | fi = btrfs_item_ptr(leaf, path->slots[0], |
618 | struct btrfs_file_extent_item); | 876 | struct btrfs_file_extent_item); |
619 | BUG_ON(btrfs_file_extent_type(leaf, fi) != | 877 | BUG_ON(btrfs_file_extent_type(leaf, fi) != |
@@ -630,7 +888,7 @@ again: | |||
630 | other_start = 0; | 888 | other_start = 0; |
631 | other_end = start; | 889 | other_end = start; |
632 | if (extent_mergeable(leaf, path->slots[0] - 1, | 890 | if (extent_mergeable(leaf, path->slots[0] - 1, |
633 | inode->i_ino, bytenr, orig_offset, | 891 | ino, bytenr, orig_offset, |
634 | &other_start, &other_end)) { | 892 | &other_start, &other_end)) { |
635 | new_key.offset = end; | 893 | new_key.offset = end; |
636 | btrfs_set_item_key_safe(trans, root, path, &new_key); | 894 | btrfs_set_item_key_safe(trans, root, path, &new_key); |
@@ -653,7 +911,7 @@ again: | |||
653 | other_start = end; | 911 | other_start = end; |
654 | other_end = 0; | 912 | other_end = 0; |
655 | if (extent_mergeable(leaf, path->slots[0] + 1, | 913 | if (extent_mergeable(leaf, path->slots[0] + 1, |
656 | inode->i_ino, bytenr, orig_offset, | 914 | ino, bytenr, orig_offset, |
657 | &other_start, &other_end)) { | 915 | &other_start, &other_end)) { |
658 | fi = btrfs_item_ptr(leaf, path->slots[0], | 916 | fi = btrfs_item_ptr(leaf, path->slots[0], |
659 | struct btrfs_file_extent_item); | 917 | struct btrfs_file_extent_item); |
@@ -681,7 +939,7 @@ again: | |||
681 | new_key.offset = split; | 939 | new_key.offset = split; |
682 | ret = btrfs_duplicate_item(trans, root, path, &new_key); | 940 | ret = btrfs_duplicate_item(trans, root, path, &new_key); |
683 | if (ret == -EAGAIN) { | 941 | if (ret == -EAGAIN) { |
684 | btrfs_release_path(root, path); | 942 | btrfs_release_path(path); |
685 | goto again; | 943 | goto again; |
686 | } | 944 | } |
687 | BUG_ON(ret < 0); | 945 | BUG_ON(ret < 0); |
@@ -702,7 +960,7 @@ again: | |||
702 | 960 | ||
703 | ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, | 961 | ret = btrfs_inc_extent_ref(trans, root, bytenr, num_bytes, 0, |
704 | root->root_key.objectid, | 962 | root->root_key.objectid, |
705 | inode->i_ino, orig_offset); | 963 | ino, orig_offset); |
706 | BUG_ON(ret); | 964 | BUG_ON(ret); |
707 | 965 | ||
708 | if (split == start) { | 966 | if (split == start) { |
@@ -718,10 +976,10 @@ again: | |||
718 | other_start = end; | 976 | other_start = end; |
719 | other_end = 0; | 977 | other_end = 0; |
720 | if (extent_mergeable(leaf, path->slots[0] + 1, | 978 | if (extent_mergeable(leaf, path->slots[0] + 1, |
721 | inode->i_ino, bytenr, orig_offset, | 979 | ino, bytenr, orig_offset, |
722 | &other_start, &other_end)) { | 980 | &other_start, &other_end)) { |
723 | if (recow) { | 981 | if (recow) { |
724 | btrfs_release_path(root, path); | 982 | btrfs_release_path(path); |
725 | goto again; | 983 | goto again; |
726 | } | 984 | } |
727 | extent_end = other_end; | 985 | extent_end = other_end; |
@@ -729,16 +987,16 @@ again: | |||
729 | del_nr++; | 987 | del_nr++; |
730 | ret = btrfs_free_extent(trans, root, bytenr, num_bytes, | 988 | ret = btrfs_free_extent(trans, root, bytenr, num_bytes, |
731 | 0, root->root_key.objectid, | 989 | 0, root->root_key.objectid, |
732 | inode->i_ino, orig_offset); | 990 | ino, orig_offset); |
733 | BUG_ON(ret); | 991 | BUG_ON(ret); |
734 | } | 992 | } |
735 | other_start = 0; | 993 | other_start = 0; |
736 | other_end = start; | 994 | other_end = start; |
737 | if (extent_mergeable(leaf, path->slots[0] - 1, | 995 | if (extent_mergeable(leaf, path->slots[0] - 1, |
738 | inode->i_ino, bytenr, orig_offset, | 996 | ino, bytenr, orig_offset, |
739 | &other_start, &other_end)) { | 997 | &other_start, &other_end)) { |
740 | if (recow) { | 998 | if (recow) { |
741 | btrfs_release_path(root, path); | 999 | btrfs_release_path(path); |
742 | goto again; | 1000 | goto again; |
743 | } | 1001 | } |
744 | key.offset = other_start; | 1002 | key.offset = other_start; |
@@ -746,7 +1004,7 @@ again: | |||
746 | del_nr++; | 1004 | del_nr++; |
747 | ret = btrfs_free_extent(trans, root, bytenr, num_bytes, | 1005 | ret = btrfs_free_extent(trans, root, bytenr, num_bytes, |
748 | 0, root->root_key.objectid, | 1006 | 0, root->root_key.objectid, |
749 | inode->i_ino, orig_offset); | 1007 | ino, orig_offset); |
750 | BUG_ON(ret); | 1008 | BUG_ON(ret); |
751 | } | 1009 | } |
752 | if (del_nr == 0) { | 1010 | if (del_nr == 0) { |
@@ -1375,7 +1633,7 @@ static long btrfs_fallocate(struct file *file, int mode, | |||
1375 | while (1) { | 1633 | while (1) { |
1376 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, | 1634 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, |
1377 | alloc_end - cur_offset, 0); | 1635 | alloc_end - cur_offset, 0); |
1378 | BUG_ON(IS_ERR(em) || !em); | 1636 | BUG_ON(IS_ERR_OR_NULL(em)); |
1379 | last_byte = min(extent_map_end(em), alloc_end); | 1637 | last_byte = min(extent_map_end(em), alloc_end); |
1380 | last_byte = (last_byte + mask) & ~mask; | 1638 | last_byte = (last_byte + mask) & ~mask; |
1381 | if (em->block_start == EXTENT_MAP_HOLE || | 1639 | if (em->block_start == EXTENT_MAP_HOLE || |
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c index 63731a1fb0a1..70d45795d758 100644 --- a/fs/btrfs/free-space-cache.c +++ b/fs/btrfs/free-space-cache.c | |||
@@ -25,18 +25,17 @@ | |||
25 | #include "transaction.h" | 25 | #include "transaction.h" |
26 | #include "disk-io.h" | 26 | #include "disk-io.h" |
27 | #include "extent_io.h" | 27 | #include "extent_io.h" |
28 | #include "inode-map.h" | ||
28 | 29 | ||
29 | #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) | 30 | #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8) |
30 | #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) | 31 | #define MAX_CACHE_BYTES_PER_GIG (32 * 1024) |
31 | 32 | ||
32 | static void recalculate_thresholds(struct btrfs_block_group_cache | 33 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
33 | *block_group); | ||
34 | static int link_free_space(struct btrfs_block_group_cache *block_group, | ||
35 | struct btrfs_free_space *info); | 34 | struct btrfs_free_space *info); |
36 | 35 | ||
37 | struct inode *lookup_free_space_inode(struct btrfs_root *root, | 36 | static struct inode *__lookup_free_space_inode(struct btrfs_root *root, |
38 | struct btrfs_block_group_cache | 37 | struct btrfs_path *path, |
39 | *block_group, struct btrfs_path *path) | 38 | u64 offset) |
40 | { | 39 | { |
41 | struct btrfs_key key; | 40 | struct btrfs_key key; |
42 | struct btrfs_key location; | 41 | struct btrfs_key location; |
@@ -46,22 +45,15 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, | |||
46 | struct inode *inode = NULL; | 45 | struct inode *inode = NULL; |
47 | int ret; | 46 | int ret; |
48 | 47 | ||
49 | spin_lock(&block_group->lock); | ||
50 | if (block_group->inode) | ||
51 | inode = igrab(block_group->inode); | ||
52 | spin_unlock(&block_group->lock); | ||
53 | if (inode) | ||
54 | return inode; | ||
55 | |||
56 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 48 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
57 | key.offset = block_group->key.objectid; | 49 | key.offset = offset; |
58 | key.type = 0; | 50 | key.type = 0; |
59 | 51 | ||
60 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 52 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
61 | if (ret < 0) | 53 | if (ret < 0) |
62 | return ERR_PTR(ret); | 54 | return ERR_PTR(ret); |
63 | if (ret > 0) { | 55 | if (ret > 0) { |
64 | btrfs_release_path(root, path); | 56 | btrfs_release_path(path); |
65 | return ERR_PTR(-ENOENT); | 57 | return ERR_PTR(-ENOENT); |
66 | } | 58 | } |
67 | 59 | ||
@@ -70,7 +62,7 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, | |||
70 | struct btrfs_free_space_header); | 62 | struct btrfs_free_space_header); |
71 | btrfs_free_space_key(leaf, header, &disk_key); | 63 | btrfs_free_space_key(leaf, header, &disk_key); |
72 | btrfs_disk_key_to_cpu(&location, &disk_key); | 64 | btrfs_disk_key_to_cpu(&location, &disk_key); |
73 | btrfs_release_path(root, path); | 65 | btrfs_release_path(path); |
74 | 66 | ||
75 | inode = btrfs_iget(root->fs_info->sb, &location, root, NULL); | 67 | inode = btrfs_iget(root->fs_info->sb, &location, root, NULL); |
76 | if (!inode) | 68 | if (!inode) |
@@ -84,6 +76,27 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, | |||
84 | 76 | ||
85 | inode->i_mapping->flags &= ~__GFP_FS; | 77 | inode->i_mapping->flags &= ~__GFP_FS; |
86 | 78 | ||
79 | return inode; | ||
80 | } | ||
81 | |||
82 | struct inode *lookup_free_space_inode(struct btrfs_root *root, | ||
83 | struct btrfs_block_group_cache | ||
84 | *block_group, struct btrfs_path *path) | ||
85 | { | ||
86 | struct inode *inode = NULL; | ||
87 | |||
88 | spin_lock(&block_group->lock); | ||
89 | if (block_group->inode) | ||
90 | inode = igrab(block_group->inode); | ||
91 | spin_unlock(&block_group->lock); | ||
92 | if (inode) | ||
93 | return inode; | ||
94 | |||
95 | inode = __lookup_free_space_inode(root, path, | ||
96 | block_group->key.objectid); | ||
97 | if (IS_ERR(inode)) | ||
98 | return inode; | ||
99 | |||
87 | spin_lock(&block_group->lock); | 100 | spin_lock(&block_group->lock); |
88 | if (!root->fs_info->closing) { | 101 | if (!root->fs_info->closing) { |
89 | block_group->inode = igrab(inode); | 102 | block_group->inode = igrab(inode); |
@@ -94,24 +107,18 @@ struct inode *lookup_free_space_inode(struct btrfs_root *root, | |||
94 | return inode; | 107 | return inode; |
95 | } | 108 | } |
96 | 109 | ||
97 | int create_free_space_inode(struct btrfs_root *root, | 110 | int __create_free_space_inode(struct btrfs_root *root, |
98 | struct btrfs_trans_handle *trans, | 111 | struct btrfs_trans_handle *trans, |
99 | struct btrfs_block_group_cache *block_group, | 112 | struct btrfs_path *path, u64 ino, u64 offset) |
100 | struct btrfs_path *path) | ||
101 | { | 113 | { |
102 | struct btrfs_key key; | 114 | struct btrfs_key key; |
103 | struct btrfs_disk_key disk_key; | 115 | struct btrfs_disk_key disk_key; |
104 | struct btrfs_free_space_header *header; | 116 | struct btrfs_free_space_header *header; |
105 | struct btrfs_inode_item *inode_item; | 117 | struct btrfs_inode_item *inode_item; |
106 | struct extent_buffer *leaf; | 118 | struct extent_buffer *leaf; |
107 | u64 objectid; | ||
108 | int ret; | 119 | int ret; |
109 | 120 | ||
110 | ret = btrfs_find_free_objectid(trans, root, 0, &objectid); | 121 | ret = btrfs_insert_empty_inode(trans, root, path, ino); |
111 | if (ret < 0) | ||
112 | return ret; | ||
113 | |||
114 | ret = btrfs_insert_empty_inode(trans, root, path, objectid); | ||
115 | if (ret) | 122 | if (ret) |
116 | return ret; | 123 | return ret; |
117 | 124 | ||
@@ -131,19 +138,18 @@ int create_free_space_inode(struct btrfs_root *root, | |||
131 | BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); | 138 | BTRFS_INODE_PREALLOC | BTRFS_INODE_NODATASUM); |
132 | btrfs_set_inode_nlink(leaf, inode_item, 1); | 139 | btrfs_set_inode_nlink(leaf, inode_item, 1); |
133 | btrfs_set_inode_transid(leaf, inode_item, trans->transid); | 140 | btrfs_set_inode_transid(leaf, inode_item, trans->transid); |
134 | btrfs_set_inode_block_group(leaf, inode_item, | 141 | btrfs_set_inode_block_group(leaf, inode_item, offset); |
135 | block_group->key.objectid); | ||
136 | btrfs_mark_buffer_dirty(leaf); | 142 | btrfs_mark_buffer_dirty(leaf); |
137 | btrfs_release_path(root, path); | 143 | btrfs_release_path(path); |
138 | 144 | ||
139 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 145 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
140 | key.offset = block_group->key.objectid; | 146 | key.offset = offset; |
141 | key.type = 0; | 147 | key.type = 0; |
142 | 148 | ||
143 | ret = btrfs_insert_empty_item(trans, root, path, &key, | 149 | ret = btrfs_insert_empty_item(trans, root, path, &key, |
144 | sizeof(struct btrfs_free_space_header)); | 150 | sizeof(struct btrfs_free_space_header)); |
145 | if (ret < 0) { | 151 | if (ret < 0) { |
146 | btrfs_release_path(root, path); | 152 | btrfs_release_path(path); |
147 | return ret; | 153 | return ret; |
148 | } | 154 | } |
149 | leaf = path->nodes[0]; | 155 | leaf = path->nodes[0]; |
@@ -152,11 +158,27 @@ int create_free_space_inode(struct btrfs_root *root, | |||
152 | memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header)); | 158 | memset_extent_buffer(leaf, 0, (unsigned long)header, sizeof(*header)); |
153 | btrfs_set_free_space_key(leaf, header, &disk_key); | 159 | btrfs_set_free_space_key(leaf, header, &disk_key); |
154 | btrfs_mark_buffer_dirty(leaf); | 160 | btrfs_mark_buffer_dirty(leaf); |
155 | btrfs_release_path(root, path); | 161 | btrfs_release_path(path); |
156 | 162 | ||
157 | return 0; | 163 | return 0; |
158 | } | 164 | } |
159 | 165 | ||
166 | int create_free_space_inode(struct btrfs_root *root, | ||
167 | struct btrfs_trans_handle *trans, | ||
168 | struct btrfs_block_group_cache *block_group, | ||
169 | struct btrfs_path *path) | ||
170 | { | ||
171 | int ret; | ||
172 | u64 ino; | ||
173 | |||
174 | ret = btrfs_find_free_objectid(root, &ino); | ||
175 | if (ret < 0) | ||
176 | return ret; | ||
177 | |||
178 | return __create_free_space_inode(root, trans, path, ino, | ||
179 | block_group->key.objectid); | ||
180 | } | ||
181 | |||
160 | int btrfs_truncate_free_space_cache(struct btrfs_root *root, | 182 | int btrfs_truncate_free_space_cache(struct btrfs_root *root, |
161 | struct btrfs_trans_handle *trans, | 183 | struct btrfs_trans_handle *trans, |
162 | struct btrfs_path *path, | 184 | struct btrfs_path *path, |
@@ -187,7 +209,8 @@ int btrfs_truncate_free_space_cache(struct btrfs_root *root, | |||
187 | return ret; | 209 | return ret; |
188 | } | 210 | } |
189 | 211 | ||
190 | return btrfs_update_inode(trans, root, inode); | 212 | ret = btrfs_update_inode(trans, root, inode); |
213 | return ret; | ||
191 | } | 214 | } |
192 | 215 | ||
193 | static int readahead_cache(struct inode *inode) | 216 | static int readahead_cache(struct inode *inode) |
@@ -209,15 +232,13 @@ static int readahead_cache(struct inode *inode) | |||
209 | return 0; | 232 | return 0; |
210 | } | 233 | } |
211 | 234 | ||
212 | int load_free_space_cache(struct btrfs_fs_info *fs_info, | 235 | int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, |
213 | struct btrfs_block_group_cache *block_group) | 236 | struct btrfs_free_space_ctl *ctl, |
237 | struct btrfs_path *path, u64 offset) | ||
214 | { | 238 | { |
215 | struct btrfs_root *root = fs_info->tree_root; | ||
216 | struct inode *inode; | ||
217 | struct btrfs_free_space_header *header; | 239 | struct btrfs_free_space_header *header; |
218 | struct extent_buffer *leaf; | 240 | struct extent_buffer *leaf; |
219 | struct page *page; | 241 | struct page *page; |
220 | struct btrfs_path *path; | ||
221 | u32 *checksums = NULL, *crc; | 242 | u32 *checksums = NULL, *crc; |
222 | char *disk_crcs = NULL; | 243 | char *disk_crcs = NULL; |
223 | struct btrfs_key key; | 244 | struct btrfs_key key; |
@@ -225,76 +246,47 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
225 | u64 num_entries; | 246 | u64 num_entries; |
226 | u64 num_bitmaps; | 247 | u64 num_bitmaps; |
227 | u64 generation; | 248 | u64 generation; |
228 | u64 used = btrfs_block_group_used(&block_group->item); | ||
229 | u32 cur_crc = ~(u32)0; | 249 | u32 cur_crc = ~(u32)0; |
230 | pgoff_t index = 0; | 250 | pgoff_t index = 0; |
231 | unsigned long first_page_offset; | 251 | unsigned long first_page_offset; |
232 | int num_checksums; | 252 | int num_checksums; |
233 | int ret = 0; | 253 | int ret = 0, ret2; |
234 | |||
235 | /* | ||
236 | * If we're unmounting then just return, since this does a search on the | ||
237 | * normal root and not the commit root and we could deadlock. | ||
238 | */ | ||
239 | smp_mb(); | ||
240 | if (fs_info->closing) | ||
241 | return 0; | ||
242 | |||
243 | /* | ||
244 | * If this block group has been marked to be cleared for one reason or | ||
245 | * another then we can't trust the on disk cache, so just return. | ||
246 | */ | ||
247 | spin_lock(&block_group->lock); | ||
248 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { | ||
249 | spin_unlock(&block_group->lock); | ||
250 | return 0; | ||
251 | } | ||
252 | spin_unlock(&block_group->lock); | ||
253 | 254 | ||
254 | INIT_LIST_HEAD(&bitmaps); | 255 | INIT_LIST_HEAD(&bitmaps); |
255 | 256 | ||
256 | path = btrfs_alloc_path(); | ||
257 | if (!path) | ||
258 | return 0; | ||
259 | |||
260 | inode = lookup_free_space_inode(root, block_group, path); | ||
261 | if (IS_ERR(inode)) { | ||
262 | btrfs_free_path(path); | ||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | /* Nothing in the space cache, goodbye */ | 257 | /* Nothing in the space cache, goodbye */ |
267 | if (!i_size_read(inode)) { | 258 | if (!i_size_read(inode)) |
268 | btrfs_free_path(path); | ||
269 | goto out; | 259 | goto out; |
270 | } | ||
271 | 260 | ||
272 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 261 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
273 | key.offset = block_group->key.objectid; | 262 | key.offset = offset; |
274 | key.type = 0; | 263 | key.type = 0; |
275 | 264 | ||
276 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 265 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
277 | if (ret) { | 266 | if (ret < 0) |
278 | btrfs_free_path(path); | 267 | goto out; |
268 | else if (ret > 0) { | ||
269 | btrfs_release_path(path); | ||
270 | ret = 0; | ||
279 | goto out; | 271 | goto out; |
280 | } | 272 | } |
281 | 273 | ||
274 | ret = -1; | ||
275 | |||
282 | leaf = path->nodes[0]; | 276 | leaf = path->nodes[0]; |
283 | header = btrfs_item_ptr(leaf, path->slots[0], | 277 | header = btrfs_item_ptr(leaf, path->slots[0], |
284 | struct btrfs_free_space_header); | 278 | struct btrfs_free_space_header); |
285 | num_entries = btrfs_free_space_entries(leaf, header); | 279 | num_entries = btrfs_free_space_entries(leaf, header); |
286 | num_bitmaps = btrfs_free_space_bitmaps(leaf, header); | 280 | num_bitmaps = btrfs_free_space_bitmaps(leaf, header); |
287 | generation = btrfs_free_space_generation(leaf, header); | 281 | generation = btrfs_free_space_generation(leaf, header); |
288 | btrfs_free_path(path); | 282 | btrfs_release_path(path); |
289 | 283 | ||
290 | if (BTRFS_I(inode)->generation != generation) { | 284 | if (BTRFS_I(inode)->generation != generation) { |
291 | printk(KERN_ERR "btrfs: free space inode generation (%llu) did" | 285 | printk(KERN_ERR "btrfs: free space inode generation (%llu) did" |
292 | " not match free space cache generation (%llu) for " | 286 | " not match free space cache generation (%llu)\n", |
293 | "block group %llu\n", | ||
294 | (unsigned long long)BTRFS_I(inode)->generation, | 287 | (unsigned long long)BTRFS_I(inode)->generation, |
295 | (unsigned long long)generation, | 288 | (unsigned long long)generation); |
296 | (unsigned long long)block_group->key.objectid); | 289 | goto out; |
297 | goto free_cache; | ||
298 | } | 290 | } |
299 | 291 | ||
300 | if (!num_entries) | 292 | if (!num_entries) |
@@ -311,10 +303,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
311 | goto out; | 303 | goto out; |
312 | 304 | ||
313 | ret = readahead_cache(inode); | 305 | ret = readahead_cache(inode); |
314 | if (ret) { | 306 | if (ret) |
315 | ret = 0; | ||
316 | goto out; | 307 | goto out; |
317 | } | ||
318 | 308 | ||
319 | while (1) { | 309 | while (1) { |
320 | struct btrfs_free_space_entry *entry; | 310 | struct btrfs_free_space_entry *entry; |
@@ -333,10 +323,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
333 | } | 323 | } |
334 | 324 | ||
335 | page = grab_cache_page(inode->i_mapping, index); | 325 | page = grab_cache_page(inode->i_mapping, index); |
336 | if (!page) { | 326 | if (!page) |
337 | ret = 0; | ||
338 | goto free_cache; | 327 | goto free_cache; |
339 | } | ||
340 | 328 | ||
341 | if (!PageUptodate(page)) { | 329 | if (!PageUptodate(page)) { |
342 | btrfs_readpage(NULL, page); | 330 | btrfs_readpage(NULL, page); |
@@ -345,9 +333,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
345 | unlock_page(page); | 333 | unlock_page(page); |
346 | page_cache_release(page); | 334 | page_cache_release(page); |
347 | printk(KERN_ERR "btrfs: error reading free " | 335 | printk(KERN_ERR "btrfs: error reading free " |
348 | "space cache: %llu\n", | 336 | "space cache\n"); |
349 | (unsigned long long) | ||
350 | block_group->key.objectid); | ||
351 | goto free_cache; | 337 | goto free_cache; |
352 | } | 338 | } |
353 | } | 339 | } |
@@ -360,13 +346,10 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
360 | gen = addr + (sizeof(u32) * num_checksums); | 346 | gen = addr + (sizeof(u32) * num_checksums); |
361 | if (*gen != BTRFS_I(inode)->generation) { | 347 | if (*gen != BTRFS_I(inode)->generation) { |
362 | printk(KERN_ERR "btrfs: space cache generation" | 348 | printk(KERN_ERR "btrfs: space cache generation" |
363 | " (%llu) does not match inode (%llu) " | 349 | " (%llu) does not match inode (%llu)\n", |
364 | "for block group %llu\n", | ||
365 | (unsigned long long)*gen, | 350 | (unsigned long long)*gen, |
366 | (unsigned long long) | 351 | (unsigned long long) |
367 | BTRFS_I(inode)->generation, | 352 | BTRFS_I(inode)->generation); |
368 | (unsigned long long) | ||
369 | block_group->key.objectid); | ||
370 | kunmap(page); | 353 | kunmap(page); |
371 | unlock_page(page); | 354 | unlock_page(page); |
372 | page_cache_release(page); | 355 | page_cache_release(page); |
@@ -382,9 +365,8 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
382 | PAGE_CACHE_SIZE - start_offset); | 365 | PAGE_CACHE_SIZE - start_offset); |
383 | btrfs_csum_final(cur_crc, (char *)&cur_crc); | 366 | btrfs_csum_final(cur_crc, (char *)&cur_crc); |
384 | if (cur_crc != *crc) { | 367 | if (cur_crc != *crc) { |
385 | printk(KERN_ERR "btrfs: crc mismatch for page %lu in " | 368 | printk(KERN_ERR "btrfs: crc mismatch for page %lu\n", |
386 | "block group %llu\n", index, | 369 | index); |
387 | (unsigned long long)block_group->key.objectid); | ||
388 | kunmap(page); | 370 | kunmap(page); |
389 | unlock_page(page); | 371 | unlock_page(page); |
390 | page_cache_release(page); | 372 | page_cache_release(page); |
@@ -417,9 +399,9 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
417 | } | 399 | } |
418 | 400 | ||
419 | if (entry->type == BTRFS_FREE_SPACE_EXTENT) { | 401 | if (entry->type == BTRFS_FREE_SPACE_EXTENT) { |
420 | spin_lock(&block_group->tree_lock); | 402 | spin_lock(&ctl->tree_lock); |
421 | ret = link_free_space(block_group, e); | 403 | ret = link_free_space(ctl, e); |
422 | spin_unlock(&block_group->tree_lock); | 404 | spin_unlock(&ctl->tree_lock); |
423 | BUG_ON(ret); | 405 | BUG_ON(ret); |
424 | } else { | 406 | } else { |
425 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | 407 | e->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); |
@@ -431,11 +413,11 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info, | |||
431 | page_cache_release(page); | 413 | page_cache_release(page); |
432 | goto free_cache; | 414 | goto free_cache; |
433 | } | 415 | } |
434 | spin_lock(&block_group->tree_lock); | 416 | spin_lock(&ctl->tree_lock); |
435 | ret = link_free_space(block_group, e); | 417 | ret2 = link_free_space(ctl, e); |
436 | block_group->total_bitmaps++; | 418 | ctl->total_bitmaps++; |
437 | recalculate_thresholds(block_group); | 419 | ctl->op->recalc_thresholds(ctl); |
438 | spin_unlock(&block_group->tree_lock); | 420 | spin_unlock(&ctl->tree_lock); |
439 | list_add_tail(&e->list, &bitmaps); | 421 | list_add_tail(&e->list, &bitmaps); |
440 | } | 422 | } |
441 | 423 | ||
@@ -471,41 +453,97 @@ next: | |||
471 | index++; | 453 | index++; |
472 | } | 454 | } |
473 | 455 | ||
474 | spin_lock(&block_group->tree_lock); | ||
475 | if (block_group->free_space != (block_group->key.offset - used - | ||
476 | block_group->bytes_super)) { | ||
477 | spin_unlock(&block_group->tree_lock); | ||
478 | printk(KERN_ERR "block group %llu has an wrong amount of free " | ||
479 | "space\n", block_group->key.objectid); | ||
480 | ret = 0; | ||
481 | goto free_cache; | ||
482 | } | ||
483 | spin_unlock(&block_group->tree_lock); | ||
484 | |||
485 | ret = 1; | 456 | ret = 1; |
486 | out: | 457 | out: |
487 | kfree(checksums); | 458 | kfree(checksums); |
488 | kfree(disk_crcs); | 459 | kfree(disk_crcs); |
489 | iput(inode); | ||
490 | return ret; | 460 | return ret; |
491 | |||
492 | free_cache: | 461 | free_cache: |
493 | /* This cache is bogus, make sure it gets cleared */ | 462 | __btrfs_remove_free_space_cache(ctl); |
463 | goto out; | ||
464 | } | ||
465 | |||
466 | int load_free_space_cache(struct btrfs_fs_info *fs_info, | ||
467 | struct btrfs_block_group_cache *block_group) | ||
468 | { | ||
469 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
470 | struct btrfs_root *root = fs_info->tree_root; | ||
471 | struct inode *inode; | ||
472 | struct btrfs_path *path; | ||
473 | int ret; | ||
474 | bool matched; | ||
475 | u64 used = btrfs_block_group_used(&block_group->item); | ||
476 | |||
477 | /* | ||
478 | * If we're unmounting then just return, since this does a search on the | ||
479 | * normal root and not the commit root and we could deadlock. | ||
480 | */ | ||
481 | smp_mb(); | ||
482 | if (fs_info->closing) | ||
483 | return 0; | ||
484 | |||
485 | /* | ||
486 | * If this block group has been marked to be cleared for one reason or | ||
487 | * another then we can't trust the on disk cache, so just return. | ||
488 | */ | ||
494 | spin_lock(&block_group->lock); | 489 | spin_lock(&block_group->lock); |
495 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | 490 | if (block_group->disk_cache_state != BTRFS_DC_WRITTEN) { |
491 | spin_unlock(&block_group->lock); | ||
492 | return 0; | ||
493 | } | ||
496 | spin_unlock(&block_group->lock); | 494 | spin_unlock(&block_group->lock); |
497 | btrfs_remove_free_space_cache(block_group); | 495 | |
498 | goto out; | 496 | path = btrfs_alloc_path(); |
497 | if (!path) | ||
498 | return 0; | ||
499 | |||
500 | inode = lookup_free_space_inode(root, block_group, path); | ||
501 | if (IS_ERR(inode)) { | ||
502 | btrfs_free_path(path); | ||
503 | return 0; | ||
504 | } | ||
505 | |||
506 | ret = __load_free_space_cache(fs_info->tree_root, inode, ctl, | ||
507 | path, block_group->key.objectid); | ||
508 | btrfs_free_path(path); | ||
509 | if (ret <= 0) | ||
510 | goto out; | ||
511 | |||
512 | spin_lock(&ctl->tree_lock); | ||
513 | matched = (ctl->free_space == (block_group->key.offset - used - | ||
514 | block_group->bytes_super)); | ||
515 | spin_unlock(&ctl->tree_lock); | ||
516 | |||
517 | if (!matched) { | ||
518 | __btrfs_remove_free_space_cache(ctl); | ||
519 | printk(KERN_ERR "block group %llu has an wrong amount of free " | ||
520 | "space\n", block_group->key.objectid); | ||
521 | ret = -1; | ||
522 | } | ||
523 | out: | ||
524 | if (ret < 0) { | ||
525 | /* This cache is bogus, make sure it gets cleared */ | ||
526 | spin_lock(&block_group->lock); | ||
527 | block_group->disk_cache_state = BTRFS_DC_CLEAR; | ||
528 | spin_unlock(&block_group->lock); | ||
529 | ret = 0; | ||
530 | |||
531 | printk(KERN_ERR "btrfs: failed to load free space cache " | ||
532 | "for block group %llu\n", block_group->key.objectid); | ||
533 | } | ||
534 | |||
535 | iput(inode); | ||
536 | return ret; | ||
499 | } | 537 | } |
500 | 538 | ||
501 | int btrfs_write_out_cache(struct btrfs_root *root, | 539 | int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, |
502 | struct btrfs_trans_handle *trans, | 540 | struct btrfs_free_space_ctl *ctl, |
503 | struct btrfs_block_group_cache *block_group, | 541 | struct btrfs_block_group_cache *block_group, |
504 | struct btrfs_path *path) | 542 | struct btrfs_trans_handle *trans, |
543 | struct btrfs_path *path, u64 offset) | ||
505 | { | 544 | { |
506 | struct btrfs_free_space_header *header; | 545 | struct btrfs_free_space_header *header; |
507 | struct extent_buffer *leaf; | 546 | struct extent_buffer *leaf; |
508 | struct inode *inode; | ||
509 | struct rb_node *node; | 547 | struct rb_node *node; |
510 | struct list_head *pos, *n; | 548 | struct list_head *pos, *n; |
511 | struct page **pages; | 549 | struct page **pages; |
@@ -522,35 +560,18 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
522 | int index = 0, num_pages = 0; | 560 | int index = 0, num_pages = 0; |
523 | int entries = 0; | 561 | int entries = 0; |
524 | int bitmaps = 0; | 562 | int bitmaps = 0; |
525 | int ret = 0; | 563 | int ret = -1; |
526 | bool next_page = false; | 564 | bool next_page = false; |
527 | bool out_of_space = false; | 565 | bool out_of_space = false; |
528 | 566 | ||
529 | root = root->fs_info->tree_root; | ||
530 | |||
531 | INIT_LIST_HEAD(&bitmap_list); | 567 | INIT_LIST_HEAD(&bitmap_list); |
532 | 568 | ||
533 | spin_lock(&block_group->lock); | 569 | node = rb_first(&ctl->free_space_offset); |
534 | if (block_group->disk_cache_state < BTRFS_DC_SETUP) { | 570 | if (!node) |
535 | spin_unlock(&block_group->lock); | ||
536 | return 0; | ||
537 | } | ||
538 | spin_unlock(&block_group->lock); | ||
539 | |||
540 | inode = lookup_free_space_inode(root, block_group, path); | ||
541 | if (IS_ERR(inode)) | ||
542 | return 0; | ||
543 | |||
544 | if (!i_size_read(inode)) { | ||
545 | iput(inode); | ||
546 | return 0; | 571 | return 0; |
547 | } | ||
548 | 572 | ||
549 | node = rb_first(&block_group->free_space_offset); | 573 | if (!i_size_read(inode)) |
550 | if (!node) { | 574 | return -1; |
551 | iput(inode); | ||
552 | return 0; | ||
553 | } | ||
554 | 575 | ||
555 | num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> | 576 | num_pages = (i_size_read(inode) + PAGE_CACHE_SIZE - 1) >> |
556 | PAGE_CACHE_SHIFT; | 577 | PAGE_CACHE_SHIFT; |
@@ -560,16 +581,13 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
560 | 581 | ||
561 | /* We need a checksum per page. */ | 582 | /* We need a checksum per page. */ |
562 | crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); | 583 | crc = checksums = kzalloc(sizeof(u32) * num_pages, GFP_NOFS); |
563 | if (!crc) { | 584 | if (!crc) |
564 | iput(inode); | 585 | return -1; |
565 | return 0; | ||
566 | } | ||
567 | 586 | ||
568 | pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); | 587 | pages = kzalloc(sizeof(struct page *) * num_pages, GFP_NOFS); |
569 | if (!pages) { | 588 | if (!pages) { |
570 | kfree(crc); | 589 | kfree(crc); |
571 | iput(inode); | 590 | return -1; |
572 | return 0; | ||
573 | } | 591 | } |
574 | 592 | ||
575 | /* Since the first page has all of our checksums and our generation we | 593 | /* Since the first page has all of our checksums and our generation we |
@@ -579,7 +597,7 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
579 | first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); | 597 | first_page_offset = (sizeof(u32) * num_pages) + sizeof(u64); |
580 | 598 | ||
581 | /* Get the cluster for this block_group if it exists */ | 599 | /* Get the cluster for this block_group if it exists */ |
582 | if (!list_empty(&block_group->cluster_list)) | 600 | if (block_group && !list_empty(&block_group->cluster_list)) |
583 | cluster = list_entry(block_group->cluster_list.next, | 601 | cluster = list_entry(block_group->cluster_list.next, |
584 | struct btrfs_free_cluster, | 602 | struct btrfs_free_cluster, |
585 | block_group_list); | 603 | block_group_list); |
@@ -621,7 +639,8 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
621 | * When searching for pinned extents, we need to start at our start | 639 | * When searching for pinned extents, we need to start at our start |
622 | * offset. | 640 | * offset. |
623 | */ | 641 | */ |
624 | start = block_group->key.objectid; | 642 | if (block_group) |
643 | start = block_group->key.objectid; | ||
625 | 644 | ||
626 | /* Write out the extent entries */ | 645 | /* Write out the extent entries */ |
627 | do { | 646 | do { |
@@ -679,8 +698,9 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
679 | * We want to add any pinned extents to our free space cache | 698 | * We want to add any pinned extents to our free space cache |
680 | * so we don't leak the space | 699 | * so we don't leak the space |
681 | */ | 700 | */ |
682 | while (!next_page && (start < block_group->key.objectid + | 701 | while (block_group && !next_page && |
683 | block_group->key.offset)) { | 702 | (start < block_group->key.objectid + |
703 | block_group->key.offset)) { | ||
684 | ret = find_first_extent_bit(unpin, start, &start, &end, | 704 | ret = find_first_extent_bit(unpin, start, &start, &end, |
685 | EXTENT_DIRTY); | 705 | EXTENT_DIRTY); |
686 | if (ret) { | 706 | if (ret) { |
@@ -798,12 +818,12 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
798 | filemap_write_and_wait(inode->i_mapping); | 818 | filemap_write_and_wait(inode->i_mapping); |
799 | 819 | ||
800 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; | 820 | key.objectid = BTRFS_FREE_SPACE_OBJECTID; |
801 | key.offset = block_group->key.objectid; | 821 | key.offset = offset; |
802 | key.type = 0; | 822 | key.type = 0; |
803 | 823 | ||
804 | ret = btrfs_search_slot(trans, root, &key, path, 1, 1); | 824 | ret = btrfs_search_slot(trans, root, &key, path, 1, 1); |
805 | if (ret < 0) { | 825 | if (ret < 0) { |
806 | ret = 0; | 826 | ret = -1; |
807 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, | 827 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, |
808 | EXTENT_DIRTY | EXTENT_DELALLOC | | 828 | EXTENT_DIRTY | EXTENT_DELALLOC | |
809 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); | 829 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, GFP_NOFS); |
@@ -816,13 +836,13 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
816 | path->slots[0]--; | 836 | path->slots[0]--; |
817 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | 837 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
818 | if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || | 838 | if (found_key.objectid != BTRFS_FREE_SPACE_OBJECTID || |
819 | found_key.offset != block_group->key.objectid) { | 839 | found_key.offset != offset) { |
820 | ret = 0; | 840 | ret = -1; |
821 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, | 841 | clear_extent_bit(&BTRFS_I(inode)->io_tree, 0, bytes - 1, |
822 | EXTENT_DIRTY | EXTENT_DELALLOC | | 842 | EXTENT_DIRTY | EXTENT_DELALLOC | |
823 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, | 843 | EXTENT_DO_ACCOUNTING, 0, 0, NULL, |
824 | GFP_NOFS); | 844 | GFP_NOFS); |
825 | btrfs_release_path(root, path); | 845 | btrfs_release_path(path); |
826 | goto out_free; | 846 | goto out_free; |
827 | } | 847 | } |
828 | } | 848 | } |
@@ -832,49 +852,83 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
832 | btrfs_set_free_space_bitmaps(leaf, header, bitmaps); | 852 | btrfs_set_free_space_bitmaps(leaf, header, bitmaps); |
833 | btrfs_set_free_space_generation(leaf, header, trans->transid); | 853 | btrfs_set_free_space_generation(leaf, header, trans->transid); |
834 | btrfs_mark_buffer_dirty(leaf); | 854 | btrfs_mark_buffer_dirty(leaf); |
835 | btrfs_release_path(root, path); | 855 | btrfs_release_path(path); |
836 | 856 | ||
837 | ret = 1; | 857 | ret = 1; |
838 | 858 | ||
839 | out_free: | 859 | out_free: |
840 | if (ret == 0) { | 860 | if (ret != 1) { |
841 | invalidate_inode_pages2_range(inode->i_mapping, 0, index); | 861 | invalidate_inode_pages2_range(inode->i_mapping, 0, index); |
842 | spin_lock(&block_group->lock); | ||
843 | block_group->disk_cache_state = BTRFS_DC_ERROR; | ||
844 | spin_unlock(&block_group->lock); | ||
845 | BTRFS_I(inode)->generation = 0; | 862 | BTRFS_I(inode)->generation = 0; |
846 | } | 863 | } |
847 | kfree(checksums); | 864 | kfree(checksums); |
848 | kfree(pages); | 865 | kfree(pages); |
849 | btrfs_update_inode(trans, root, inode); | 866 | btrfs_update_inode(trans, root, inode); |
867 | return ret; | ||
868 | } | ||
869 | |||
870 | int btrfs_write_out_cache(struct btrfs_root *root, | ||
871 | struct btrfs_trans_handle *trans, | ||
872 | struct btrfs_block_group_cache *block_group, | ||
873 | struct btrfs_path *path) | ||
874 | { | ||
875 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
876 | struct inode *inode; | ||
877 | int ret = 0; | ||
878 | |||
879 | root = root->fs_info->tree_root; | ||
880 | |||
881 | spin_lock(&block_group->lock); | ||
882 | if (block_group->disk_cache_state < BTRFS_DC_SETUP) { | ||
883 | spin_unlock(&block_group->lock); | ||
884 | return 0; | ||
885 | } | ||
886 | spin_unlock(&block_group->lock); | ||
887 | |||
888 | inode = lookup_free_space_inode(root, block_group, path); | ||
889 | if (IS_ERR(inode)) | ||
890 | return 0; | ||
891 | |||
892 | ret = __btrfs_write_out_cache(root, inode, ctl, block_group, trans, | ||
893 | path, block_group->key.objectid); | ||
894 | if (ret < 0) { | ||
895 | spin_lock(&block_group->lock); | ||
896 | block_group->disk_cache_state = BTRFS_DC_ERROR; | ||
897 | spin_unlock(&block_group->lock); | ||
898 | ret = 0; | ||
899 | |||
900 | printk(KERN_ERR "btrfs: failed to write free space cace " | ||
901 | "for block group %llu\n", block_group->key.objectid); | ||
902 | } | ||
903 | |||
850 | iput(inode); | 904 | iput(inode); |
851 | return ret; | 905 | return ret; |
852 | } | 906 | } |
853 | 907 | ||
854 | static inline unsigned long offset_to_bit(u64 bitmap_start, u64 sectorsize, | 908 | static inline unsigned long offset_to_bit(u64 bitmap_start, u32 unit, |
855 | u64 offset) | 909 | u64 offset) |
856 | { | 910 | { |
857 | BUG_ON(offset < bitmap_start); | 911 | BUG_ON(offset < bitmap_start); |
858 | offset -= bitmap_start; | 912 | offset -= bitmap_start; |
859 | return (unsigned long)(div64_u64(offset, sectorsize)); | 913 | return (unsigned long)(div_u64(offset, unit)); |
860 | } | 914 | } |
861 | 915 | ||
862 | static inline unsigned long bytes_to_bits(u64 bytes, u64 sectorsize) | 916 | static inline unsigned long bytes_to_bits(u64 bytes, u32 unit) |
863 | { | 917 | { |
864 | return (unsigned long)(div64_u64(bytes, sectorsize)); | 918 | return (unsigned long)(div_u64(bytes, unit)); |
865 | } | 919 | } |
866 | 920 | ||
867 | static inline u64 offset_to_bitmap(struct btrfs_block_group_cache *block_group, | 921 | static inline u64 offset_to_bitmap(struct btrfs_free_space_ctl *ctl, |
868 | u64 offset) | 922 | u64 offset) |
869 | { | 923 | { |
870 | u64 bitmap_start; | 924 | u64 bitmap_start; |
871 | u64 bytes_per_bitmap; | 925 | u64 bytes_per_bitmap; |
872 | 926 | ||
873 | bytes_per_bitmap = BITS_PER_BITMAP * block_group->sectorsize; | 927 | bytes_per_bitmap = BITS_PER_BITMAP * ctl->unit; |
874 | bitmap_start = offset - block_group->key.objectid; | 928 | bitmap_start = offset - ctl->start; |
875 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); | 929 | bitmap_start = div64_u64(bitmap_start, bytes_per_bitmap); |
876 | bitmap_start *= bytes_per_bitmap; | 930 | bitmap_start *= bytes_per_bitmap; |
877 | bitmap_start += block_group->key.objectid; | 931 | bitmap_start += ctl->start; |
878 | 932 | ||
879 | return bitmap_start; | 933 | return bitmap_start; |
880 | } | 934 | } |
@@ -932,10 +986,10 @@ static int tree_insert_offset(struct rb_root *root, u64 offset, | |||
932 | * offset. | 986 | * offset. |
933 | */ | 987 | */ |
934 | static struct btrfs_free_space * | 988 | static struct btrfs_free_space * |
935 | tree_search_offset(struct btrfs_block_group_cache *block_group, | 989 | tree_search_offset(struct btrfs_free_space_ctl *ctl, |
936 | u64 offset, int bitmap_only, int fuzzy) | 990 | u64 offset, int bitmap_only, int fuzzy) |
937 | { | 991 | { |
938 | struct rb_node *n = block_group->free_space_offset.rb_node; | 992 | struct rb_node *n = ctl->free_space_offset.rb_node; |
939 | struct btrfs_free_space *entry, *prev = NULL; | 993 | struct btrfs_free_space *entry, *prev = NULL; |
940 | 994 | ||
941 | /* find entry that is closest to the 'offset' */ | 995 | /* find entry that is closest to the 'offset' */ |
@@ -1031,8 +1085,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
1031 | break; | 1085 | break; |
1032 | } | 1086 | } |
1033 | } | 1087 | } |
1034 | if (entry->offset + BITS_PER_BITMAP * | 1088 | if (entry->offset + BITS_PER_BITMAP * ctl->unit > offset) |
1035 | block_group->sectorsize > offset) | ||
1036 | return entry; | 1089 | return entry; |
1037 | } else if (entry->offset + entry->bytes > offset) | 1090 | } else if (entry->offset + entry->bytes > offset) |
1038 | return entry; | 1091 | return entry; |
@@ -1043,7 +1096,7 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
1043 | while (1) { | 1096 | while (1) { |
1044 | if (entry->bitmap) { | 1097 | if (entry->bitmap) { |
1045 | if (entry->offset + BITS_PER_BITMAP * | 1098 | if (entry->offset + BITS_PER_BITMAP * |
1046 | block_group->sectorsize > offset) | 1099 | ctl->unit > offset) |
1047 | break; | 1100 | break; |
1048 | } else { | 1101 | } else { |
1049 | if (entry->offset + entry->bytes > offset) | 1102 | if (entry->offset + entry->bytes > offset) |
@@ -1059,42 +1112,47 @@ tree_search_offset(struct btrfs_block_group_cache *block_group, | |||
1059 | } | 1112 | } |
1060 | 1113 | ||
1061 | static inline void | 1114 | static inline void |
1062 | __unlink_free_space(struct btrfs_block_group_cache *block_group, | 1115 | __unlink_free_space(struct btrfs_free_space_ctl *ctl, |
1063 | struct btrfs_free_space *info) | 1116 | struct btrfs_free_space *info) |
1064 | { | 1117 | { |
1065 | rb_erase(&info->offset_index, &block_group->free_space_offset); | 1118 | rb_erase(&info->offset_index, &ctl->free_space_offset); |
1066 | block_group->free_extents--; | 1119 | ctl->free_extents--; |
1067 | } | 1120 | } |
1068 | 1121 | ||
1069 | static void unlink_free_space(struct btrfs_block_group_cache *block_group, | 1122 | static void unlink_free_space(struct btrfs_free_space_ctl *ctl, |
1070 | struct btrfs_free_space *info) | 1123 | struct btrfs_free_space *info) |
1071 | { | 1124 | { |
1072 | __unlink_free_space(block_group, info); | 1125 | __unlink_free_space(ctl, info); |
1073 | block_group->free_space -= info->bytes; | 1126 | ctl->free_space -= info->bytes; |
1074 | } | 1127 | } |
1075 | 1128 | ||
1076 | static int link_free_space(struct btrfs_block_group_cache *block_group, | 1129 | static int link_free_space(struct btrfs_free_space_ctl *ctl, |
1077 | struct btrfs_free_space *info) | 1130 | struct btrfs_free_space *info) |
1078 | { | 1131 | { |
1079 | int ret = 0; | 1132 | int ret = 0; |
1080 | 1133 | ||
1081 | BUG_ON(!info->bitmap && !info->bytes); | 1134 | BUG_ON(!info->bitmap && !info->bytes); |
1082 | ret = tree_insert_offset(&block_group->free_space_offset, info->offset, | 1135 | ret = tree_insert_offset(&ctl->free_space_offset, info->offset, |
1083 | &info->offset_index, (info->bitmap != NULL)); | 1136 | &info->offset_index, (info->bitmap != NULL)); |
1084 | if (ret) | 1137 | if (ret) |
1085 | return ret; | 1138 | return ret; |
1086 | 1139 | ||
1087 | block_group->free_space += info->bytes; | 1140 | ctl->free_space += info->bytes; |
1088 | block_group->free_extents++; | 1141 | ctl->free_extents++; |
1089 | return ret; | 1142 | return ret; |
1090 | } | 1143 | } |
1091 | 1144 | ||
1092 | static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | 1145 | static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) |
1093 | { | 1146 | { |
1147 | struct btrfs_block_group_cache *block_group = ctl->private; | ||
1094 | u64 max_bytes; | 1148 | u64 max_bytes; |
1095 | u64 bitmap_bytes; | 1149 | u64 bitmap_bytes; |
1096 | u64 extent_bytes; | 1150 | u64 extent_bytes; |
1097 | u64 size = block_group->key.offset; | 1151 | u64 size = block_group->key.offset; |
1152 | u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; | ||
1153 | int max_bitmaps = div64_u64(size + bytes_per_bg - 1, bytes_per_bg); | ||
1154 | |||
1155 | BUG_ON(ctl->total_bitmaps > max_bitmaps); | ||
1098 | 1156 | ||
1099 | /* | 1157 | /* |
1100 | * The goal is to keep the total amount of memory used per 1gb of space | 1158 | * The goal is to keep the total amount of memory used per 1gb of space |
@@ -1112,10 +1170,10 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | |||
1112 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as | 1170 | * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as |
1113 | * we add more bitmaps. | 1171 | * we add more bitmaps. |
1114 | */ | 1172 | */ |
1115 | bitmap_bytes = (block_group->total_bitmaps + 1) * PAGE_CACHE_SIZE; | 1173 | bitmap_bytes = (ctl->total_bitmaps + 1) * PAGE_CACHE_SIZE; |
1116 | 1174 | ||
1117 | if (bitmap_bytes >= max_bytes) { | 1175 | if (bitmap_bytes >= max_bytes) { |
1118 | block_group->extents_thresh = 0; | 1176 | ctl->extents_thresh = 0; |
1119 | return; | 1177 | return; |
1120 | } | 1178 | } |
1121 | 1179 | ||
@@ -1126,47 +1184,43 @@ static void recalculate_thresholds(struct btrfs_block_group_cache *block_group) | |||
1126 | extent_bytes = max_bytes - bitmap_bytes; | 1184 | extent_bytes = max_bytes - bitmap_bytes; |
1127 | extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); | 1185 | extent_bytes = min_t(u64, extent_bytes, div64_u64(max_bytes, 2)); |
1128 | 1186 | ||
1129 | block_group->extents_thresh = | 1187 | ctl->extents_thresh = |
1130 | div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); | 1188 | div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); |
1131 | } | 1189 | } |
1132 | 1190 | ||
1133 | static void bitmap_clear_bits(struct btrfs_block_group_cache *block_group, | 1191 | static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, |
1134 | struct btrfs_free_space *info, u64 offset, | 1192 | struct btrfs_free_space *info, u64 offset, |
1135 | u64 bytes) | 1193 | u64 bytes) |
1136 | { | 1194 | { |
1137 | unsigned long start, end; | 1195 | unsigned long start, count; |
1138 | unsigned long i; | ||
1139 | 1196 | ||
1140 | start = offset_to_bit(info->offset, block_group->sectorsize, offset); | 1197 | start = offset_to_bit(info->offset, ctl->unit, offset); |
1141 | end = start + bytes_to_bits(bytes, block_group->sectorsize); | 1198 | count = bytes_to_bits(bytes, ctl->unit); |
1142 | BUG_ON(end > BITS_PER_BITMAP); | 1199 | BUG_ON(start + count > BITS_PER_BITMAP); |
1143 | 1200 | ||
1144 | for (i = start; i < end; i++) | 1201 | bitmap_clear(info->bitmap, start, count); |
1145 | clear_bit(i, info->bitmap); | ||
1146 | 1202 | ||
1147 | info->bytes -= bytes; | 1203 | info->bytes -= bytes; |
1148 | block_group->free_space -= bytes; | 1204 | ctl->free_space -= bytes; |
1149 | } | 1205 | } |
1150 | 1206 | ||
1151 | static void bitmap_set_bits(struct btrfs_block_group_cache *block_group, | 1207 | static void bitmap_set_bits(struct btrfs_free_space_ctl *ctl, |
1152 | struct btrfs_free_space *info, u64 offset, | 1208 | struct btrfs_free_space *info, u64 offset, |
1153 | u64 bytes) | 1209 | u64 bytes) |
1154 | { | 1210 | { |
1155 | unsigned long start, end; | 1211 | unsigned long start, count; |
1156 | unsigned long i; | ||
1157 | 1212 | ||
1158 | start = offset_to_bit(info->offset, block_group->sectorsize, offset); | 1213 | start = offset_to_bit(info->offset, ctl->unit, offset); |
1159 | end = start + bytes_to_bits(bytes, block_group->sectorsize); | 1214 | count = bytes_to_bits(bytes, ctl->unit); |
1160 | BUG_ON(end > BITS_PER_BITMAP); | 1215 | BUG_ON(start + count > BITS_PER_BITMAP); |
1161 | 1216 | ||
1162 | for (i = start; i < end; i++) | 1217 | bitmap_set(info->bitmap, start, count); |
1163 | set_bit(i, info->bitmap); | ||
1164 | 1218 | ||
1165 | info->bytes += bytes; | 1219 | info->bytes += bytes; |
1166 | block_group->free_space += bytes; | 1220 | ctl->free_space += bytes; |
1167 | } | 1221 | } |
1168 | 1222 | ||
1169 | static int search_bitmap(struct btrfs_block_group_cache *block_group, | 1223 | static int search_bitmap(struct btrfs_free_space_ctl *ctl, |
1170 | struct btrfs_free_space *bitmap_info, u64 *offset, | 1224 | struct btrfs_free_space *bitmap_info, u64 *offset, |
1171 | u64 *bytes) | 1225 | u64 *bytes) |
1172 | { | 1226 | { |
@@ -1174,9 +1228,9 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, | |||
1174 | unsigned long bits, i; | 1228 | unsigned long bits, i; |
1175 | unsigned long next_zero; | 1229 | unsigned long next_zero; |
1176 | 1230 | ||
1177 | i = offset_to_bit(bitmap_info->offset, block_group->sectorsize, | 1231 | i = offset_to_bit(bitmap_info->offset, ctl->unit, |
1178 | max_t(u64, *offset, bitmap_info->offset)); | 1232 | max_t(u64, *offset, bitmap_info->offset)); |
1179 | bits = bytes_to_bits(*bytes, block_group->sectorsize); | 1233 | bits = bytes_to_bits(*bytes, ctl->unit); |
1180 | 1234 | ||
1181 | for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); | 1235 | for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); |
1182 | i < BITS_PER_BITMAP; | 1236 | i < BITS_PER_BITMAP; |
@@ -1191,29 +1245,25 @@ static int search_bitmap(struct btrfs_block_group_cache *block_group, | |||
1191 | } | 1245 | } |
1192 | 1246 | ||
1193 | if (found_bits) { | 1247 | if (found_bits) { |
1194 | *offset = (u64)(i * block_group->sectorsize) + | 1248 | *offset = (u64)(i * ctl->unit) + bitmap_info->offset; |
1195 | bitmap_info->offset; | 1249 | *bytes = (u64)(found_bits) * ctl->unit; |
1196 | *bytes = (u64)(found_bits) * block_group->sectorsize; | ||
1197 | return 0; | 1250 | return 0; |
1198 | } | 1251 | } |
1199 | 1252 | ||
1200 | return -1; | 1253 | return -1; |
1201 | } | 1254 | } |
1202 | 1255 | ||
1203 | static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | 1256 | static struct btrfs_free_space * |
1204 | *block_group, u64 *offset, | 1257 | find_free_space(struct btrfs_free_space_ctl *ctl, u64 *offset, u64 *bytes) |
1205 | u64 *bytes, int debug) | ||
1206 | { | 1258 | { |
1207 | struct btrfs_free_space *entry; | 1259 | struct btrfs_free_space *entry; |
1208 | struct rb_node *node; | 1260 | struct rb_node *node; |
1209 | int ret; | 1261 | int ret; |
1210 | 1262 | ||
1211 | if (!block_group->free_space_offset.rb_node) | 1263 | if (!ctl->free_space_offset.rb_node) |
1212 | return NULL; | 1264 | return NULL; |
1213 | 1265 | ||
1214 | entry = tree_search_offset(block_group, | 1266 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, *offset), 0, 1); |
1215 | offset_to_bitmap(block_group, *offset), | ||
1216 | 0, 1); | ||
1217 | if (!entry) | 1267 | if (!entry) |
1218 | return NULL; | 1268 | return NULL; |
1219 | 1269 | ||
@@ -1223,7 +1273,7 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | |||
1223 | continue; | 1273 | continue; |
1224 | 1274 | ||
1225 | if (entry->bitmap) { | 1275 | if (entry->bitmap) { |
1226 | ret = search_bitmap(block_group, entry, offset, bytes); | 1276 | ret = search_bitmap(ctl, entry, offset, bytes); |
1227 | if (!ret) | 1277 | if (!ret) |
1228 | return entry; | 1278 | return entry; |
1229 | continue; | 1279 | continue; |
@@ -1237,33 +1287,28 @@ static struct btrfs_free_space *find_free_space(struct btrfs_block_group_cache | |||
1237 | return NULL; | 1287 | return NULL; |
1238 | } | 1288 | } |
1239 | 1289 | ||
1240 | static void add_new_bitmap(struct btrfs_block_group_cache *block_group, | 1290 | static void add_new_bitmap(struct btrfs_free_space_ctl *ctl, |
1241 | struct btrfs_free_space *info, u64 offset) | 1291 | struct btrfs_free_space *info, u64 offset) |
1242 | { | 1292 | { |
1243 | u64 bytes_per_bg = BITS_PER_BITMAP * block_group->sectorsize; | 1293 | info->offset = offset_to_bitmap(ctl, offset); |
1244 | int max_bitmaps = (int)div64_u64(block_group->key.offset + | ||
1245 | bytes_per_bg - 1, bytes_per_bg); | ||
1246 | BUG_ON(block_group->total_bitmaps >= max_bitmaps); | ||
1247 | |||
1248 | info->offset = offset_to_bitmap(block_group, offset); | ||
1249 | info->bytes = 0; | 1294 | info->bytes = 0; |
1250 | link_free_space(block_group, info); | 1295 | link_free_space(ctl, info); |
1251 | block_group->total_bitmaps++; | 1296 | ctl->total_bitmaps++; |
1252 | 1297 | ||
1253 | recalculate_thresholds(block_group); | 1298 | ctl->op->recalc_thresholds(ctl); |
1254 | } | 1299 | } |
1255 | 1300 | ||
1256 | static void free_bitmap(struct btrfs_block_group_cache *block_group, | 1301 | static void free_bitmap(struct btrfs_free_space_ctl *ctl, |
1257 | struct btrfs_free_space *bitmap_info) | 1302 | struct btrfs_free_space *bitmap_info) |
1258 | { | 1303 | { |
1259 | unlink_free_space(block_group, bitmap_info); | 1304 | unlink_free_space(ctl, bitmap_info); |
1260 | kfree(bitmap_info->bitmap); | 1305 | kfree(bitmap_info->bitmap); |
1261 | kmem_cache_free(btrfs_free_space_cachep, bitmap_info); | 1306 | kmem_cache_free(btrfs_free_space_cachep, bitmap_info); |
1262 | block_group->total_bitmaps--; | 1307 | ctl->total_bitmaps--; |
1263 | recalculate_thresholds(block_group); | 1308 | ctl->op->recalc_thresholds(ctl); |
1264 | } | 1309 | } |
1265 | 1310 | ||
1266 | static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_group, | 1311 | static noinline int remove_from_bitmap(struct btrfs_free_space_ctl *ctl, |
1267 | struct btrfs_free_space *bitmap_info, | 1312 | struct btrfs_free_space *bitmap_info, |
1268 | u64 *offset, u64 *bytes) | 1313 | u64 *offset, u64 *bytes) |
1269 | { | 1314 | { |
@@ -1272,8 +1317,7 @@ static noinline int remove_from_bitmap(struct btrfs_block_group_cache *block_gro | |||
1272 | int ret; | 1317 | int ret; |
1273 | 1318 | ||
1274 | again: | 1319 | again: |
1275 | end = bitmap_info->offset + | 1320 | end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit) - 1; |
1276 | (u64)(BITS_PER_BITMAP * block_group->sectorsize) - 1; | ||
1277 | 1321 | ||
1278 | /* | 1322 | /* |
1279 | * XXX - this can go away after a few releases. | 1323 | * XXX - this can go away after a few releases. |
@@ -1288,24 +1332,22 @@ again: | |||
1288 | search_start = *offset; | 1332 | search_start = *offset; |
1289 | search_bytes = *bytes; | 1333 | search_bytes = *bytes; |
1290 | search_bytes = min(search_bytes, end - search_start + 1); | 1334 | search_bytes = min(search_bytes, end - search_start + 1); |
1291 | ret = search_bitmap(block_group, bitmap_info, &search_start, | 1335 | ret = search_bitmap(ctl, bitmap_info, &search_start, &search_bytes); |
1292 | &search_bytes); | ||
1293 | BUG_ON(ret < 0 || search_start != *offset); | 1336 | BUG_ON(ret < 0 || search_start != *offset); |
1294 | 1337 | ||
1295 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { | 1338 | if (*offset > bitmap_info->offset && *offset + *bytes > end) { |
1296 | bitmap_clear_bits(block_group, bitmap_info, *offset, | 1339 | bitmap_clear_bits(ctl, bitmap_info, *offset, end - *offset + 1); |
1297 | end - *offset + 1); | ||
1298 | *bytes -= end - *offset + 1; | 1340 | *bytes -= end - *offset + 1; |
1299 | *offset = end + 1; | 1341 | *offset = end + 1; |
1300 | } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { | 1342 | } else if (*offset >= bitmap_info->offset && *offset + *bytes <= end) { |
1301 | bitmap_clear_bits(block_group, bitmap_info, *offset, *bytes); | 1343 | bitmap_clear_bits(ctl, bitmap_info, *offset, *bytes); |
1302 | *bytes = 0; | 1344 | *bytes = 0; |
1303 | } | 1345 | } |
1304 | 1346 | ||
1305 | if (*bytes) { | 1347 | if (*bytes) { |
1306 | struct rb_node *next = rb_next(&bitmap_info->offset_index); | 1348 | struct rb_node *next = rb_next(&bitmap_info->offset_index); |
1307 | if (!bitmap_info->bytes) | 1349 | if (!bitmap_info->bytes) |
1308 | free_bitmap(block_group, bitmap_info); | 1350 | free_bitmap(ctl, bitmap_info); |
1309 | 1351 | ||
1310 | /* | 1352 | /* |
1311 | * no entry after this bitmap, but we still have bytes to | 1353 | * no entry after this bitmap, but we still have bytes to |
@@ -1332,31 +1374,28 @@ again: | |||
1332 | */ | 1374 | */ |
1333 | search_start = *offset; | 1375 | search_start = *offset; |
1334 | search_bytes = *bytes; | 1376 | search_bytes = *bytes; |
1335 | ret = search_bitmap(block_group, bitmap_info, &search_start, | 1377 | ret = search_bitmap(ctl, bitmap_info, &search_start, |
1336 | &search_bytes); | 1378 | &search_bytes); |
1337 | if (ret < 0 || search_start != *offset) | 1379 | if (ret < 0 || search_start != *offset) |
1338 | return -EAGAIN; | 1380 | return -EAGAIN; |
1339 | 1381 | ||
1340 | goto again; | 1382 | goto again; |
1341 | } else if (!bitmap_info->bytes) | 1383 | } else if (!bitmap_info->bytes) |
1342 | free_bitmap(block_group, bitmap_info); | 1384 | free_bitmap(ctl, bitmap_info); |
1343 | 1385 | ||
1344 | return 0; | 1386 | return 0; |
1345 | } | 1387 | } |
1346 | 1388 | ||
1347 | static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | 1389 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, |
1348 | struct btrfs_free_space *info) | 1390 | struct btrfs_free_space *info) |
1349 | { | 1391 | { |
1350 | struct btrfs_free_space *bitmap_info; | 1392 | struct btrfs_block_group_cache *block_group = ctl->private; |
1351 | int added = 0; | ||
1352 | u64 bytes, offset, end; | ||
1353 | int ret; | ||
1354 | 1393 | ||
1355 | /* | 1394 | /* |
1356 | * If we are below the extents threshold then we can add this as an | 1395 | * If we are below the extents threshold then we can add this as an |
1357 | * extent, and don't have to deal with the bitmap | 1396 | * extent, and don't have to deal with the bitmap |
1358 | */ | 1397 | */ |
1359 | if (block_group->free_extents < block_group->extents_thresh) { | 1398 | if (ctl->free_extents < ctl->extents_thresh) { |
1360 | /* | 1399 | /* |
1361 | * If this block group has some small extents we don't want to | 1400 | * If this block group has some small extents we don't want to |
1362 | * use up all of our free slots in the cache with them, we want | 1401 | * use up all of our free slots in the cache with them, we want |
@@ -1365,11 +1404,10 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | |||
1365 | * the overhead of a bitmap if we don't have to. | 1404 | * the overhead of a bitmap if we don't have to. |
1366 | */ | 1405 | */ |
1367 | if (info->bytes <= block_group->sectorsize * 4) { | 1406 | if (info->bytes <= block_group->sectorsize * 4) { |
1368 | if (block_group->free_extents * 2 <= | 1407 | if (ctl->free_extents * 2 <= ctl->extents_thresh) |
1369 | block_group->extents_thresh) | 1408 | return false; |
1370 | return 0; | ||
1371 | } else { | 1409 | } else { |
1372 | return 0; | 1410 | return false; |
1373 | } | 1411 | } |
1374 | } | 1412 | } |
1375 | 1413 | ||
@@ -1379,31 +1417,42 @@ static int insert_into_bitmap(struct btrfs_block_group_cache *block_group, | |||
1379 | */ | 1417 | */ |
1380 | if (BITS_PER_BITMAP * block_group->sectorsize > | 1418 | if (BITS_PER_BITMAP * block_group->sectorsize > |
1381 | block_group->key.offset) | 1419 | block_group->key.offset) |
1382 | return 0; | 1420 | return false; |
1421 | |||
1422 | return true; | ||
1423 | } | ||
1424 | |||
1425 | static int insert_into_bitmap(struct btrfs_free_space_ctl *ctl, | ||
1426 | struct btrfs_free_space *info) | ||
1427 | { | ||
1428 | struct btrfs_free_space *bitmap_info; | ||
1429 | int added = 0; | ||
1430 | u64 bytes, offset, end; | ||
1431 | int ret; | ||
1383 | 1432 | ||
1384 | bytes = info->bytes; | 1433 | bytes = info->bytes; |
1385 | offset = info->offset; | 1434 | offset = info->offset; |
1386 | 1435 | ||
1436 | if (!ctl->op->use_bitmap(ctl, info)) | ||
1437 | return 0; | ||
1438 | |||
1387 | again: | 1439 | again: |
1388 | bitmap_info = tree_search_offset(block_group, | 1440 | bitmap_info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
1389 | offset_to_bitmap(block_group, offset), | ||
1390 | 1, 0); | 1441 | 1, 0); |
1391 | if (!bitmap_info) { | 1442 | if (!bitmap_info) { |
1392 | BUG_ON(added); | 1443 | BUG_ON(added); |
1393 | goto new_bitmap; | 1444 | goto new_bitmap; |
1394 | } | 1445 | } |
1395 | 1446 | ||
1396 | end = bitmap_info->offset + | 1447 | end = bitmap_info->offset + (u64)(BITS_PER_BITMAP * ctl->unit); |
1397 | (u64)(BITS_PER_BITMAP * block_group->sectorsize); | ||
1398 | 1448 | ||
1399 | if (offset >= bitmap_info->offset && offset + bytes > end) { | 1449 | if (offset >= bitmap_info->offset && offset + bytes > end) { |
1400 | bitmap_set_bits(block_group, bitmap_info, offset, | 1450 | bitmap_set_bits(ctl, bitmap_info, offset, end - offset); |
1401 | end - offset); | ||
1402 | bytes -= end - offset; | 1451 | bytes -= end - offset; |
1403 | offset = end; | 1452 | offset = end; |
1404 | added = 0; | 1453 | added = 0; |
1405 | } else if (offset >= bitmap_info->offset && offset + bytes <= end) { | 1454 | } else if (offset >= bitmap_info->offset && offset + bytes <= end) { |
1406 | bitmap_set_bits(block_group, bitmap_info, offset, bytes); | 1455 | bitmap_set_bits(ctl, bitmap_info, offset, bytes); |
1407 | bytes = 0; | 1456 | bytes = 0; |
1408 | } else { | 1457 | } else { |
1409 | BUG(); | 1458 | BUG(); |
@@ -1417,19 +1466,19 @@ again: | |||
1417 | 1466 | ||
1418 | new_bitmap: | 1467 | new_bitmap: |
1419 | if (info && info->bitmap) { | 1468 | if (info && info->bitmap) { |
1420 | add_new_bitmap(block_group, info, offset); | 1469 | add_new_bitmap(ctl, info, offset); |
1421 | added = 1; | 1470 | added = 1; |
1422 | info = NULL; | 1471 | info = NULL; |
1423 | goto again; | 1472 | goto again; |
1424 | } else { | 1473 | } else { |
1425 | spin_unlock(&block_group->tree_lock); | 1474 | spin_unlock(&ctl->tree_lock); |
1426 | 1475 | ||
1427 | /* no pre-allocated info, allocate a new one */ | 1476 | /* no pre-allocated info, allocate a new one */ |
1428 | if (!info) { | 1477 | if (!info) { |
1429 | info = kmem_cache_zalloc(btrfs_free_space_cachep, | 1478 | info = kmem_cache_zalloc(btrfs_free_space_cachep, |
1430 | GFP_NOFS); | 1479 | GFP_NOFS); |
1431 | if (!info) { | 1480 | if (!info) { |
1432 | spin_lock(&block_group->tree_lock); | 1481 | spin_lock(&ctl->tree_lock); |
1433 | ret = -ENOMEM; | 1482 | ret = -ENOMEM; |
1434 | goto out; | 1483 | goto out; |
1435 | } | 1484 | } |
@@ -1437,7 +1486,7 @@ new_bitmap: | |||
1437 | 1486 | ||
1438 | /* allocate the bitmap */ | 1487 | /* allocate the bitmap */ |
1439 | info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); | 1488 | info->bitmap = kzalloc(PAGE_CACHE_SIZE, GFP_NOFS); |
1440 | spin_lock(&block_group->tree_lock); | 1489 | spin_lock(&ctl->tree_lock); |
1441 | if (!info->bitmap) { | 1490 | if (!info->bitmap) { |
1442 | ret = -ENOMEM; | 1491 | ret = -ENOMEM; |
1443 | goto out; | 1492 | goto out; |
@@ -1455,7 +1504,7 @@ out: | |||
1455 | return ret; | 1504 | return ret; |
1456 | } | 1505 | } |
1457 | 1506 | ||
1458 | bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | 1507 | static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl, |
1459 | struct btrfs_free_space *info, bool update_stat) | 1508 | struct btrfs_free_space *info, bool update_stat) |
1460 | { | 1509 | { |
1461 | struct btrfs_free_space *left_info; | 1510 | struct btrfs_free_space *left_info; |
@@ -1469,18 +1518,18 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | |||
1469 | * are adding, if there is remove that struct and add a new one to | 1518 | * are adding, if there is remove that struct and add a new one to |
1470 | * cover the entire range | 1519 | * cover the entire range |
1471 | */ | 1520 | */ |
1472 | right_info = tree_search_offset(block_group, offset + bytes, 0, 0); | 1521 | right_info = tree_search_offset(ctl, offset + bytes, 0, 0); |
1473 | if (right_info && rb_prev(&right_info->offset_index)) | 1522 | if (right_info && rb_prev(&right_info->offset_index)) |
1474 | left_info = rb_entry(rb_prev(&right_info->offset_index), | 1523 | left_info = rb_entry(rb_prev(&right_info->offset_index), |
1475 | struct btrfs_free_space, offset_index); | 1524 | struct btrfs_free_space, offset_index); |
1476 | else | 1525 | else |
1477 | left_info = tree_search_offset(block_group, offset - 1, 0, 0); | 1526 | left_info = tree_search_offset(ctl, offset - 1, 0, 0); |
1478 | 1527 | ||
1479 | if (right_info && !right_info->bitmap) { | 1528 | if (right_info && !right_info->bitmap) { |
1480 | if (update_stat) | 1529 | if (update_stat) |
1481 | unlink_free_space(block_group, right_info); | 1530 | unlink_free_space(ctl, right_info); |
1482 | else | 1531 | else |
1483 | __unlink_free_space(block_group, right_info); | 1532 | __unlink_free_space(ctl, right_info); |
1484 | info->bytes += right_info->bytes; | 1533 | info->bytes += right_info->bytes; |
1485 | kmem_cache_free(btrfs_free_space_cachep, right_info); | 1534 | kmem_cache_free(btrfs_free_space_cachep, right_info); |
1486 | merged = true; | 1535 | merged = true; |
@@ -1489,9 +1538,9 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | |||
1489 | if (left_info && !left_info->bitmap && | 1538 | if (left_info && !left_info->bitmap && |
1490 | left_info->offset + left_info->bytes == offset) { | 1539 | left_info->offset + left_info->bytes == offset) { |
1491 | if (update_stat) | 1540 | if (update_stat) |
1492 | unlink_free_space(block_group, left_info); | 1541 | unlink_free_space(ctl, left_info); |
1493 | else | 1542 | else |
1494 | __unlink_free_space(block_group, left_info); | 1543 | __unlink_free_space(ctl, left_info); |
1495 | info->offset = left_info->offset; | 1544 | info->offset = left_info->offset; |
1496 | info->bytes += left_info->bytes; | 1545 | info->bytes += left_info->bytes; |
1497 | kmem_cache_free(btrfs_free_space_cachep, left_info); | 1546 | kmem_cache_free(btrfs_free_space_cachep, left_info); |
@@ -1501,8 +1550,8 @@ bool try_merge_free_space(struct btrfs_block_group_cache *block_group, | |||
1501 | return merged; | 1550 | return merged; |
1502 | } | 1551 | } |
1503 | 1552 | ||
1504 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 1553 | int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, |
1505 | u64 offset, u64 bytes) | 1554 | u64 offset, u64 bytes) |
1506 | { | 1555 | { |
1507 | struct btrfs_free_space *info; | 1556 | struct btrfs_free_space *info; |
1508 | int ret = 0; | 1557 | int ret = 0; |
@@ -1514,9 +1563,9 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1514 | info->offset = offset; | 1563 | info->offset = offset; |
1515 | info->bytes = bytes; | 1564 | info->bytes = bytes; |
1516 | 1565 | ||
1517 | spin_lock(&block_group->tree_lock); | 1566 | spin_lock(&ctl->tree_lock); |
1518 | 1567 | ||
1519 | if (try_merge_free_space(block_group, info, true)) | 1568 | if (try_merge_free_space(ctl, info, true)) |
1520 | goto link; | 1569 | goto link; |
1521 | 1570 | ||
1522 | /* | 1571 | /* |
@@ -1524,7 +1573,7 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1524 | * extent then we know we're going to have to allocate a new extent, so | 1573 | * extent then we know we're going to have to allocate a new extent, so |
1525 | * before we do that see if we need to drop this into a bitmap | 1574 | * before we do that see if we need to drop this into a bitmap |
1526 | */ | 1575 | */ |
1527 | ret = insert_into_bitmap(block_group, info); | 1576 | ret = insert_into_bitmap(ctl, info); |
1528 | if (ret < 0) { | 1577 | if (ret < 0) { |
1529 | goto out; | 1578 | goto out; |
1530 | } else if (ret) { | 1579 | } else if (ret) { |
@@ -1532,11 +1581,11 @@ int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | |||
1532 | goto out; | 1581 | goto out; |
1533 | } | 1582 | } |
1534 | link: | 1583 | link: |
1535 | ret = link_free_space(block_group, info); | 1584 | ret = link_free_space(ctl, info); |
1536 | if (ret) | 1585 | if (ret) |
1537 | kmem_cache_free(btrfs_free_space_cachep, info); | 1586 | kmem_cache_free(btrfs_free_space_cachep, info); |
1538 | out: | 1587 | out: |
1539 | spin_unlock(&block_group->tree_lock); | 1588 | spin_unlock(&ctl->tree_lock); |
1540 | 1589 | ||
1541 | if (ret) { | 1590 | if (ret) { |
1542 | printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); | 1591 | printk(KERN_CRIT "btrfs: unable to add free space :%d\n", ret); |
@@ -1549,21 +1598,21 @@ out: | |||
1549 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 1598 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
1550 | u64 offset, u64 bytes) | 1599 | u64 offset, u64 bytes) |
1551 | { | 1600 | { |
1601 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1552 | struct btrfs_free_space *info; | 1602 | struct btrfs_free_space *info; |
1553 | struct btrfs_free_space *next_info = NULL; | 1603 | struct btrfs_free_space *next_info = NULL; |
1554 | int ret = 0; | 1604 | int ret = 0; |
1555 | 1605 | ||
1556 | spin_lock(&block_group->tree_lock); | 1606 | spin_lock(&ctl->tree_lock); |
1557 | 1607 | ||
1558 | again: | 1608 | again: |
1559 | info = tree_search_offset(block_group, offset, 0, 0); | 1609 | info = tree_search_offset(ctl, offset, 0, 0); |
1560 | if (!info) { | 1610 | if (!info) { |
1561 | /* | 1611 | /* |
1562 | * oops didn't find an extent that matched the space we wanted | 1612 | * oops didn't find an extent that matched the space we wanted |
1563 | * to remove, look for a bitmap instead | 1613 | * to remove, look for a bitmap instead |
1564 | */ | 1614 | */ |
1565 | info = tree_search_offset(block_group, | 1615 | info = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), |
1566 | offset_to_bitmap(block_group, offset), | ||
1567 | 1, 0); | 1616 | 1, 0); |
1568 | if (!info) { | 1617 | if (!info) { |
1569 | WARN_ON(1); | 1618 | WARN_ON(1); |
@@ -1578,8 +1627,8 @@ again: | |||
1578 | offset_index); | 1627 | offset_index); |
1579 | 1628 | ||
1580 | if (next_info->bitmap) | 1629 | if (next_info->bitmap) |
1581 | end = next_info->offset + BITS_PER_BITMAP * | 1630 | end = next_info->offset + |
1582 | block_group->sectorsize - 1; | 1631 | BITS_PER_BITMAP * ctl->unit - 1; |
1583 | else | 1632 | else |
1584 | end = next_info->offset + next_info->bytes; | 1633 | end = next_info->offset + next_info->bytes; |
1585 | 1634 | ||
@@ -1599,20 +1648,20 @@ again: | |||
1599 | } | 1648 | } |
1600 | 1649 | ||
1601 | if (info->bytes == bytes) { | 1650 | if (info->bytes == bytes) { |
1602 | unlink_free_space(block_group, info); | 1651 | unlink_free_space(ctl, info); |
1603 | if (info->bitmap) { | 1652 | if (info->bitmap) { |
1604 | kfree(info->bitmap); | 1653 | kfree(info->bitmap); |
1605 | block_group->total_bitmaps--; | 1654 | ctl->total_bitmaps--; |
1606 | } | 1655 | } |
1607 | kmem_cache_free(btrfs_free_space_cachep, info); | 1656 | kmem_cache_free(btrfs_free_space_cachep, info); |
1608 | goto out_lock; | 1657 | goto out_lock; |
1609 | } | 1658 | } |
1610 | 1659 | ||
1611 | if (!info->bitmap && info->offset == offset) { | 1660 | if (!info->bitmap && info->offset == offset) { |
1612 | unlink_free_space(block_group, info); | 1661 | unlink_free_space(ctl, info); |
1613 | info->offset += bytes; | 1662 | info->offset += bytes; |
1614 | info->bytes -= bytes; | 1663 | info->bytes -= bytes; |
1615 | link_free_space(block_group, info); | 1664 | link_free_space(ctl, info); |
1616 | goto out_lock; | 1665 | goto out_lock; |
1617 | } | 1666 | } |
1618 | 1667 | ||
@@ -1626,13 +1675,13 @@ again: | |||
1626 | * first unlink the old info and then | 1675 | * first unlink the old info and then |
1627 | * insert it again after the hole we're creating | 1676 | * insert it again after the hole we're creating |
1628 | */ | 1677 | */ |
1629 | unlink_free_space(block_group, info); | 1678 | unlink_free_space(ctl, info); |
1630 | if (offset + bytes < info->offset + info->bytes) { | 1679 | if (offset + bytes < info->offset + info->bytes) { |
1631 | u64 old_end = info->offset + info->bytes; | 1680 | u64 old_end = info->offset + info->bytes; |
1632 | 1681 | ||
1633 | info->offset = offset + bytes; | 1682 | info->offset = offset + bytes; |
1634 | info->bytes = old_end - info->offset; | 1683 | info->bytes = old_end - info->offset; |
1635 | ret = link_free_space(block_group, info); | 1684 | ret = link_free_space(ctl, info); |
1636 | WARN_ON(ret); | 1685 | WARN_ON(ret); |
1637 | if (ret) | 1686 | if (ret) |
1638 | goto out_lock; | 1687 | goto out_lock; |
@@ -1642,7 +1691,7 @@ again: | |||
1642 | */ | 1691 | */ |
1643 | kmem_cache_free(btrfs_free_space_cachep, info); | 1692 | kmem_cache_free(btrfs_free_space_cachep, info); |
1644 | } | 1693 | } |
1645 | spin_unlock(&block_group->tree_lock); | 1694 | spin_unlock(&ctl->tree_lock); |
1646 | 1695 | ||
1647 | /* step two, insert a new info struct to cover | 1696 | /* step two, insert a new info struct to cover |
1648 | * anything before the hole | 1697 | * anything before the hole |
@@ -1653,12 +1702,12 @@ again: | |||
1653 | goto out; | 1702 | goto out; |
1654 | } | 1703 | } |
1655 | 1704 | ||
1656 | ret = remove_from_bitmap(block_group, info, &offset, &bytes); | 1705 | ret = remove_from_bitmap(ctl, info, &offset, &bytes); |
1657 | if (ret == -EAGAIN) | 1706 | if (ret == -EAGAIN) |
1658 | goto again; | 1707 | goto again; |
1659 | BUG_ON(ret); | 1708 | BUG_ON(ret); |
1660 | out_lock: | 1709 | out_lock: |
1661 | spin_unlock(&block_group->tree_lock); | 1710 | spin_unlock(&ctl->tree_lock); |
1662 | out: | 1711 | out: |
1663 | return ret; | 1712 | return ret; |
1664 | } | 1713 | } |
@@ -1666,11 +1715,12 @@ out: | |||
1666 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | 1715 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
1667 | u64 bytes) | 1716 | u64 bytes) |
1668 | { | 1717 | { |
1718 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1669 | struct btrfs_free_space *info; | 1719 | struct btrfs_free_space *info; |
1670 | struct rb_node *n; | 1720 | struct rb_node *n; |
1671 | int count = 0; | 1721 | int count = 0; |
1672 | 1722 | ||
1673 | for (n = rb_first(&block_group->free_space_offset); n; n = rb_next(n)) { | 1723 | for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) { |
1674 | info = rb_entry(n, struct btrfs_free_space, offset_index); | 1724 | info = rb_entry(n, struct btrfs_free_space, offset_index); |
1675 | if (info->bytes >= bytes) | 1725 | if (info->bytes >= bytes) |
1676 | count++; | 1726 | count++; |
@@ -1685,19 +1735,28 @@ void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | |||
1685 | "\n", count); | 1735 | "\n", count); |
1686 | } | 1736 | } |
1687 | 1737 | ||
1688 | u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group) | 1738 | static struct btrfs_free_space_op free_space_op = { |
1739 | .recalc_thresholds = recalculate_thresholds, | ||
1740 | .use_bitmap = use_bitmap, | ||
1741 | }; | ||
1742 | |||
1743 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group) | ||
1689 | { | 1744 | { |
1690 | struct btrfs_free_space *info; | 1745 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; |
1691 | struct rb_node *n; | ||
1692 | u64 ret = 0; | ||
1693 | 1746 | ||
1694 | for (n = rb_first(&block_group->free_space_offset); n; | 1747 | spin_lock_init(&ctl->tree_lock); |
1695 | n = rb_next(n)) { | 1748 | ctl->unit = block_group->sectorsize; |
1696 | info = rb_entry(n, struct btrfs_free_space, offset_index); | 1749 | ctl->start = block_group->key.objectid; |
1697 | ret += info->bytes; | 1750 | ctl->private = block_group; |
1698 | } | 1751 | ctl->op = &free_space_op; |
1699 | 1752 | ||
1700 | return ret; | 1753 | /* |
1754 | * we only want to have 32k of ram per block group for keeping | ||
1755 | * track of free space, and if we pass 1/2 of that we want to | ||
1756 | * start converting things over to using bitmaps | ||
1757 | */ | ||
1758 | ctl->extents_thresh = ((1024 * 32) / 2) / | ||
1759 | sizeof(struct btrfs_free_space); | ||
1701 | } | 1760 | } |
1702 | 1761 | ||
1703 | /* | 1762 | /* |
@@ -1711,6 +1770,7 @@ __btrfs_return_cluster_to_free_space( | |||
1711 | struct btrfs_block_group_cache *block_group, | 1770 | struct btrfs_block_group_cache *block_group, |
1712 | struct btrfs_free_cluster *cluster) | 1771 | struct btrfs_free_cluster *cluster) |
1713 | { | 1772 | { |
1773 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1714 | struct btrfs_free_space *entry; | 1774 | struct btrfs_free_space *entry; |
1715 | struct rb_node *node; | 1775 | struct rb_node *node; |
1716 | 1776 | ||
@@ -1732,8 +1792,8 @@ __btrfs_return_cluster_to_free_space( | |||
1732 | 1792 | ||
1733 | bitmap = (entry->bitmap != NULL); | 1793 | bitmap = (entry->bitmap != NULL); |
1734 | if (!bitmap) | 1794 | if (!bitmap) |
1735 | try_merge_free_space(block_group, entry, false); | 1795 | try_merge_free_space(ctl, entry, false); |
1736 | tree_insert_offset(&block_group->free_space_offset, | 1796 | tree_insert_offset(&ctl->free_space_offset, |
1737 | entry->offset, &entry->offset_index, bitmap); | 1797 | entry->offset, &entry->offset_index, bitmap); |
1738 | } | 1798 | } |
1739 | cluster->root = RB_ROOT; | 1799 | cluster->root = RB_ROOT; |
@@ -1744,14 +1804,38 @@ out: | |||
1744 | return 0; | 1804 | return 0; |
1745 | } | 1805 | } |
1746 | 1806 | ||
1747 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | 1807 | void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl *ctl) |
1748 | { | 1808 | { |
1749 | struct btrfs_free_space *info; | 1809 | struct btrfs_free_space *info; |
1750 | struct rb_node *node; | 1810 | struct rb_node *node; |
1811 | |||
1812 | while ((node = rb_last(&ctl->free_space_offset)) != NULL) { | ||
1813 | info = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1814 | unlink_free_space(ctl, info); | ||
1815 | kfree(info->bitmap); | ||
1816 | kmem_cache_free(btrfs_free_space_cachep, info); | ||
1817 | if (need_resched()) { | ||
1818 | spin_unlock(&ctl->tree_lock); | ||
1819 | cond_resched(); | ||
1820 | spin_lock(&ctl->tree_lock); | ||
1821 | } | ||
1822 | } | ||
1823 | } | ||
1824 | |||
1825 | void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl) | ||
1826 | { | ||
1827 | spin_lock(&ctl->tree_lock); | ||
1828 | __btrfs_remove_free_space_cache_locked(ctl); | ||
1829 | spin_unlock(&ctl->tree_lock); | ||
1830 | } | ||
1831 | |||
1832 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | ||
1833 | { | ||
1834 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1751 | struct btrfs_free_cluster *cluster; | 1835 | struct btrfs_free_cluster *cluster; |
1752 | struct list_head *head; | 1836 | struct list_head *head; |
1753 | 1837 | ||
1754 | spin_lock(&block_group->tree_lock); | 1838 | spin_lock(&ctl->tree_lock); |
1755 | while ((head = block_group->cluster_list.next) != | 1839 | while ((head = block_group->cluster_list.next) != |
1756 | &block_group->cluster_list) { | 1840 | &block_group->cluster_list) { |
1757 | cluster = list_entry(head, struct btrfs_free_cluster, | 1841 | cluster = list_entry(head, struct btrfs_free_cluster, |
@@ -1760,60 +1844,46 @@ void btrfs_remove_free_space_cache(struct btrfs_block_group_cache *block_group) | |||
1760 | WARN_ON(cluster->block_group != block_group); | 1844 | WARN_ON(cluster->block_group != block_group); |
1761 | __btrfs_return_cluster_to_free_space(block_group, cluster); | 1845 | __btrfs_return_cluster_to_free_space(block_group, cluster); |
1762 | if (need_resched()) { | 1846 | if (need_resched()) { |
1763 | spin_unlock(&block_group->tree_lock); | 1847 | spin_unlock(&ctl->tree_lock); |
1764 | cond_resched(); | 1848 | cond_resched(); |
1765 | spin_lock(&block_group->tree_lock); | 1849 | spin_lock(&ctl->tree_lock); |
1766 | } | 1850 | } |
1767 | } | 1851 | } |
1852 | __btrfs_remove_free_space_cache_locked(ctl); | ||
1853 | spin_unlock(&ctl->tree_lock); | ||
1768 | 1854 | ||
1769 | while ((node = rb_last(&block_group->free_space_offset)) != NULL) { | ||
1770 | info = rb_entry(node, struct btrfs_free_space, offset_index); | ||
1771 | if (!info->bitmap) { | ||
1772 | unlink_free_space(block_group, info); | ||
1773 | kmem_cache_free(btrfs_free_space_cachep, info); | ||
1774 | } else { | ||
1775 | free_bitmap(block_group, info); | ||
1776 | } | ||
1777 | |||
1778 | if (need_resched()) { | ||
1779 | spin_unlock(&block_group->tree_lock); | ||
1780 | cond_resched(); | ||
1781 | spin_lock(&block_group->tree_lock); | ||
1782 | } | ||
1783 | } | ||
1784 | |||
1785 | spin_unlock(&block_group->tree_lock); | ||
1786 | } | 1855 | } |
1787 | 1856 | ||
1788 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | 1857 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
1789 | u64 offset, u64 bytes, u64 empty_size) | 1858 | u64 offset, u64 bytes, u64 empty_size) |
1790 | { | 1859 | { |
1860 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1791 | struct btrfs_free_space *entry = NULL; | 1861 | struct btrfs_free_space *entry = NULL; |
1792 | u64 bytes_search = bytes + empty_size; | 1862 | u64 bytes_search = bytes + empty_size; |
1793 | u64 ret = 0; | 1863 | u64 ret = 0; |
1794 | 1864 | ||
1795 | spin_lock(&block_group->tree_lock); | 1865 | spin_lock(&ctl->tree_lock); |
1796 | entry = find_free_space(block_group, &offset, &bytes_search, 0); | 1866 | entry = find_free_space(ctl, &offset, &bytes_search); |
1797 | if (!entry) | 1867 | if (!entry) |
1798 | goto out; | 1868 | goto out; |
1799 | 1869 | ||
1800 | ret = offset; | 1870 | ret = offset; |
1801 | if (entry->bitmap) { | 1871 | if (entry->bitmap) { |
1802 | bitmap_clear_bits(block_group, entry, offset, bytes); | 1872 | bitmap_clear_bits(ctl, entry, offset, bytes); |
1803 | if (!entry->bytes) | 1873 | if (!entry->bytes) |
1804 | free_bitmap(block_group, entry); | 1874 | free_bitmap(ctl, entry); |
1805 | } else { | 1875 | } else { |
1806 | unlink_free_space(block_group, entry); | 1876 | unlink_free_space(ctl, entry); |
1807 | entry->offset += bytes; | 1877 | entry->offset += bytes; |
1808 | entry->bytes -= bytes; | 1878 | entry->bytes -= bytes; |
1809 | if (!entry->bytes) | 1879 | if (!entry->bytes) |
1810 | kmem_cache_free(btrfs_free_space_cachep, entry); | 1880 | kmem_cache_free(btrfs_free_space_cachep, entry); |
1811 | else | 1881 | else |
1812 | link_free_space(block_group, entry); | 1882 | link_free_space(ctl, entry); |
1813 | } | 1883 | } |
1814 | 1884 | ||
1815 | out: | 1885 | out: |
1816 | spin_unlock(&block_group->tree_lock); | 1886 | spin_unlock(&ctl->tree_lock); |
1817 | 1887 | ||
1818 | return ret; | 1888 | return ret; |
1819 | } | 1889 | } |
@@ -1830,6 +1900,7 @@ int btrfs_return_cluster_to_free_space( | |||
1830 | struct btrfs_block_group_cache *block_group, | 1900 | struct btrfs_block_group_cache *block_group, |
1831 | struct btrfs_free_cluster *cluster) | 1901 | struct btrfs_free_cluster *cluster) |
1832 | { | 1902 | { |
1903 | struct btrfs_free_space_ctl *ctl; | ||
1833 | int ret; | 1904 | int ret; |
1834 | 1905 | ||
1835 | /* first, get a safe pointer to the block group */ | 1906 | /* first, get a safe pointer to the block group */ |
@@ -1848,10 +1919,12 @@ int btrfs_return_cluster_to_free_space( | |||
1848 | atomic_inc(&block_group->count); | 1919 | atomic_inc(&block_group->count); |
1849 | spin_unlock(&cluster->lock); | 1920 | spin_unlock(&cluster->lock); |
1850 | 1921 | ||
1922 | ctl = block_group->free_space_ctl; | ||
1923 | |||
1851 | /* now return any extents the cluster had on it */ | 1924 | /* now return any extents the cluster had on it */ |
1852 | spin_lock(&block_group->tree_lock); | 1925 | spin_lock(&ctl->tree_lock); |
1853 | ret = __btrfs_return_cluster_to_free_space(block_group, cluster); | 1926 | ret = __btrfs_return_cluster_to_free_space(block_group, cluster); |
1854 | spin_unlock(&block_group->tree_lock); | 1927 | spin_unlock(&ctl->tree_lock); |
1855 | 1928 | ||
1856 | /* finally drop our ref */ | 1929 | /* finally drop our ref */ |
1857 | btrfs_put_block_group(block_group); | 1930 | btrfs_put_block_group(block_group); |
@@ -1863,6 +1936,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
1863 | struct btrfs_free_space *entry, | 1936 | struct btrfs_free_space *entry, |
1864 | u64 bytes, u64 min_start) | 1937 | u64 bytes, u64 min_start) |
1865 | { | 1938 | { |
1939 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1866 | int err; | 1940 | int err; |
1867 | u64 search_start = cluster->window_start; | 1941 | u64 search_start = cluster->window_start; |
1868 | u64 search_bytes = bytes; | 1942 | u64 search_bytes = bytes; |
@@ -1871,13 +1945,12 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, | |||
1871 | search_start = min_start; | 1945 | search_start = min_start; |
1872 | search_bytes = bytes; | 1946 | search_bytes = bytes; |
1873 | 1947 | ||
1874 | err = search_bitmap(block_group, entry, &search_start, | 1948 | err = search_bitmap(ctl, entry, &search_start, &search_bytes); |
1875 | &search_bytes); | ||
1876 | if (err) | 1949 | if (err) |
1877 | return 0; | 1950 | return 0; |
1878 | 1951 | ||
1879 | ret = search_start; | 1952 | ret = search_start; |
1880 | bitmap_clear_bits(block_group, entry, ret, bytes); | 1953 | bitmap_clear_bits(ctl, entry, ret, bytes); |
1881 | 1954 | ||
1882 | return ret; | 1955 | return ret; |
1883 | } | 1956 | } |
@@ -1891,6 +1964,7 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1891 | struct btrfs_free_cluster *cluster, u64 bytes, | 1964 | struct btrfs_free_cluster *cluster, u64 bytes, |
1892 | u64 min_start) | 1965 | u64 min_start) |
1893 | { | 1966 | { |
1967 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1894 | struct btrfs_free_space *entry = NULL; | 1968 | struct btrfs_free_space *entry = NULL; |
1895 | struct rb_node *node; | 1969 | struct rb_node *node; |
1896 | u64 ret = 0; | 1970 | u64 ret = 0; |
@@ -1910,8 +1984,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1910 | while(1) { | 1984 | while(1) { |
1911 | if (entry->bytes < bytes || | 1985 | if (entry->bytes < bytes || |
1912 | (!entry->bitmap && entry->offset < min_start)) { | 1986 | (!entry->bitmap && entry->offset < min_start)) { |
1913 | struct rb_node *node; | ||
1914 | |||
1915 | node = rb_next(&entry->offset_index); | 1987 | node = rb_next(&entry->offset_index); |
1916 | if (!node) | 1988 | if (!node) |
1917 | break; | 1989 | break; |
@@ -1925,7 +1997,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, | |||
1925 | cluster, entry, bytes, | 1997 | cluster, entry, bytes, |
1926 | min_start); | 1998 | min_start); |
1927 | if (ret == 0) { | 1999 | if (ret == 0) { |
1928 | struct rb_node *node; | ||
1929 | node = rb_next(&entry->offset_index); | 2000 | node = rb_next(&entry->offset_index); |
1930 | if (!node) | 2001 | if (!node) |
1931 | break; | 2002 | break; |
@@ -1951,20 +2022,20 @@ out: | |||
1951 | if (!ret) | 2022 | if (!ret) |
1952 | return 0; | 2023 | return 0; |
1953 | 2024 | ||
1954 | spin_lock(&block_group->tree_lock); | 2025 | spin_lock(&ctl->tree_lock); |
1955 | 2026 | ||
1956 | block_group->free_space -= bytes; | 2027 | ctl->free_space -= bytes; |
1957 | if (entry->bytes == 0) { | 2028 | if (entry->bytes == 0) { |
1958 | block_group->free_extents--; | 2029 | ctl->free_extents--; |
1959 | if (entry->bitmap) { | 2030 | if (entry->bitmap) { |
1960 | kfree(entry->bitmap); | 2031 | kfree(entry->bitmap); |
1961 | block_group->total_bitmaps--; | 2032 | ctl->total_bitmaps--; |
1962 | recalculate_thresholds(block_group); | 2033 | ctl->op->recalc_thresholds(ctl); |
1963 | } | 2034 | } |
1964 | kmem_cache_free(btrfs_free_space_cachep, entry); | 2035 | kmem_cache_free(btrfs_free_space_cachep, entry); |
1965 | } | 2036 | } |
1966 | 2037 | ||
1967 | spin_unlock(&block_group->tree_lock); | 2038 | spin_unlock(&ctl->tree_lock); |
1968 | 2039 | ||
1969 | return ret; | 2040 | return ret; |
1970 | } | 2041 | } |
@@ -1974,6 +2045,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, | |||
1974 | struct btrfs_free_cluster *cluster, | 2045 | struct btrfs_free_cluster *cluster, |
1975 | u64 offset, u64 bytes, u64 min_bytes) | 2046 | u64 offset, u64 bytes, u64 min_bytes) |
1976 | { | 2047 | { |
2048 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
1977 | unsigned long next_zero; | 2049 | unsigned long next_zero; |
1978 | unsigned long i; | 2050 | unsigned long i; |
1979 | unsigned long search_bits; | 2051 | unsigned long search_bits; |
@@ -2028,7 +2100,7 @@ again: | |||
2028 | 2100 | ||
2029 | cluster->window_start = start * block_group->sectorsize + | 2101 | cluster->window_start = start * block_group->sectorsize + |
2030 | entry->offset; | 2102 | entry->offset; |
2031 | rb_erase(&entry->offset_index, &block_group->free_space_offset); | 2103 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
2032 | ret = tree_insert_offset(&cluster->root, entry->offset, | 2104 | ret = tree_insert_offset(&cluster->root, entry->offset, |
2033 | &entry->offset_index, 1); | 2105 | &entry->offset_index, 1); |
2034 | BUG_ON(ret); | 2106 | BUG_ON(ret); |
@@ -2043,6 +2115,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2043 | struct btrfs_free_cluster *cluster, | 2115 | struct btrfs_free_cluster *cluster, |
2044 | u64 offset, u64 bytes, u64 min_bytes) | 2116 | u64 offset, u64 bytes, u64 min_bytes) |
2045 | { | 2117 | { |
2118 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2046 | struct btrfs_free_space *first = NULL; | 2119 | struct btrfs_free_space *first = NULL; |
2047 | struct btrfs_free_space *entry = NULL; | 2120 | struct btrfs_free_space *entry = NULL; |
2048 | struct btrfs_free_space *prev = NULL; | 2121 | struct btrfs_free_space *prev = NULL; |
@@ -2053,7 +2126,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2053 | u64 max_extent; | 2126 | u64 max_extent; |
2054 | u64 max_gap = 128 * 1024; | 2127 | u64 max_gap = 128 * 1024; |
2055 | 2128 | ||
2056 | entry = tree_search_offset(block_group, offset, 0, 1); | 2129 | entry = tree_search_offset(ctl, offset, 0, 1); |
2057 | if (!entry) | 2130 | if (!entry) |
2058 | return -ENOSPC; | 2131 | return -ENOSPC; |
2059 | 2132 | ||
@@ -2119,7 +2192,7 @@ static int setup_cluster_no_bitmap(struct btrfs_block_group_cache *block_group, | |||
2119 | if (entry->bitmap) | 2192 | if (entry->bitmap) |
2120 | continue; | 2193 | continue; |
2121 | 2194 | ||
2122 | rb_erase(&entry->offset_index, &block_group->free_space_offset); | 2195 | rb_erase(&entry->offset_index, &ctl->free_space_offset); |
2123 | ret = tree_insert_offset(&cluster->root, entry->offset, | 2196 | ret = tree_insert_offset(&cluster->root, entry->offset, |
2124 | &entry->offset_index, 0); | 2197 | &entry->offset_index, 0); |
2125 | BUG_ON(ret); | 2198 | BUG_ON(ret); |
@@ -2138,16 +2211,15 @@ static int setup_cluster_bitmap(struct btrfs_block_group_cache *block_group, | |||
2138 | struct btrfs_free_cluster *cluster, | 2211 | struct btrfs_free_cluster *cluster, |
2139 | u64 offset, u64 bytes, u64 min_bytes) | 2212 | u64 offset, u64 bytes, u64 min_bytes) |
2140 | { | 2213 | { |
2214 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2141 | struct btrfs_free_space *entry; | 2215 | struct btrfs_free_space *entry; |
2142 | struct rb_node *node; | 2216 | struct rb_node *node; |
2143 | int ret = -ENOSPC; | 2217 | int ret = -ENOSPC; |
2144 | 2218 | ||
2145 | if (block_group->total_bitmaps == 0) | 2219 | if (ctl->total_bitmaps == 0) |
2146 | return -ENOSPC; | 2220 | return -ENOSPC; |
2147 | 2221 | ||
2148 | entry = tree_search_offset(block_group, | 2222 | entry = tree_search_offset(ctl, offset_to_bitmap(ctl, offset), 0, 1); |
2149 | offset_to_bitmap(block_group, offset), | ||
2150 | 0, 1); | ||
2151 | if (!entry) | 2223 | if (!entry) |
2152 | return -ENOSPC; | 2224 | return -ENOSPC; |
2153 | 2225 | ||
@@ -2180,6 +2252,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2180 | struct btrfs_free_cluster *cluster, | 2252 | struct btrfs_free_cluster *cluster, |
2181 | u64 offset, u64 bytes, u64 empty_size) | 2253 | u64 offset, u64 bytes, u64 empty_size) |
2182 | { | 2254 | { |
2255 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2183 | u64 min_bytes; | 2256 | u64 min_bytes; |
2184 | int ret; | 2257 | int ret; |
2185 | 2258 | ||
@@ -2199,14 +2272,14 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2199 | } else | 2272 | } else |
2200 | min_bytes = max(bytes, (bytes + empty_size) >> 2); | 2273 | min_bytes = max(bytes, (bytes + empty_size) >> 2); |
2201 | 2274 | ||
2202 | spin_lock(&block_group->tree_lock); | 2275 | spin_lock(&ctl->tree_lock); |
2203 | 2276 | ||
2204 | /* | 2277 | /* |
2205 | * If we know we don't have enough space to make a cluster don't even | 2278 | * If we know we don't have enough space to make a cluster don't even |
2206 | * bother doing all the work to try and find one. | 2279 | * bother doing all the work to try and find one. |
2207 | */ | 2280 | */ |
2208 | if (block_group->free_space < min_bytes) { | 2281 | if (ctl->free_space < min_bytes) { |
2209 | spin_unlock(&block_group->tree_lock); | 2282 | spin_unlock(&ctl->tree_lock); |
2210 | return -ENOSPC; | 2283 | return -ENOSPC; |
2211 | } | 2284 | } |
2212 | 2285 | ||
@@ -2232,7 +2305,7 @@ int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | |||
2232 | } | 2305 | } |
2233 | out: | 2306 | out: |
2234 | spin_unlock(&cluster->lock); | 2307 | spin_unlock(&cluster->lock); |
2235 | spin_unlock(&block_group->tree_lock); | 2308 | spin_unlock(&ctl->tree_lock); |
2236 | 2309 | ||
2237 | return ret; | 2310 | return ret; |
2238 | } | 2311 | } |
@@ -2253,6 +2326,7 @@ void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster) | |||
2253 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | 2326 | int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, |
2254 | u64 *trimmed, u64 start, u64 end, u64 minlen) | 2327 | u64 *trimmed, u64 start, u64 end, u64 minlen) |
2255 | { | 2328 | { |
2329 | struct btrfs_free_space_ctl *ctl = block_group->free_space_ctl; | ||
2256 | struct btrfs_free_space *entry = NULL; | 2330 | struct btrfs_free_space *entry = NULL; |
2257 | struct btrfs_fs_info *fs_info = block_group->fs_info; | 2331 | struct btrfs_fs_info *fs_info = block_group->fs_info; |
2258 | u64 bytes = 0; | 2332 | u64 bytes = 0; |
@@ -2262,52 +2336,50 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2262 | *trimmed = 0; | 2336 | *trimmed = 0; |
2263 | 2337 | ||
2264 | while (start < end) { | 2338 | while (start < end) { |
2265 | spin_lock(&block_group->tree_lock); | 2339 | spin_lock(&ctl->tree_lock); |
2266 | 2340 | ||
2267 | if (block_group->free_space < minlen) { | 2341 | if (ctl->free_space < minlen) { |
2268 | spin_unlock(&block_group->tree_lock); | 2342 | spin_unlock(&ctl->tree_lock); |
2269 | break; | 2343 | break; |
2270 | } | 2344 | } |
2271 | 2345 | ||
2272 | entry = tree_search_offset(block_group, start, 0, 1); | 2346 | entry = tree_search_offset(ctl, start, 0, 1); |
2273 | if (!entry) | 2347 | if (!entry) |
2274 | entry = tree_search_offset(block_group, | 2348 | entry = tree_search_offset(ctl, |
2275 | offset_to_bitmap(block_group, | 2349 | offset_to_bitmap(ctl, start), |
2276 | start), | ||
2277 | 1, 1); | 2350 | 1, 1); |
2278 | 2351 | ||
2279 | if (!entry || entry->offset >= end) { | 2352 | if (!entry || entry->offset >= end) { |
2280 | spin_unlock(&block_group->tree_lock); | 2353 | spin_unlock(&ctl->tree_lock); |
2281 | break; | 2354 | break; |
2282 | } | 2355 | } |
2283 | 2356 | ||
2284 | if (entry->bitmap) { | 2357 | if (entry->bitmap) { |
2285 | ret = search_bitmap(block_group, entry, &start, &bytes); | 2358 | ret = search_bitmap(ctl, entry, &start, &bytes); |
2286 | if (!ret) { | 2359 | if (!ret) { |
2287 | if (start >= end) { | 2360 | if (start >= end) { |
2288 | spin_unlock(&block_group->tree_lock); | 2361 | spin_unlock(&ctl->tree_lock); |
2289 | break; | 2362 | break; |
2290 | } | 2363 | } |
2291 | bytes = min(bytes, end - start); | 2364 | bytes = min(bytes, end - start); |
2292 | bitmap_clear_bits(block_group, entry, | 2365 | bitmap_clear_bits(ctl, entry, start, bytes); |
2293 | start, bytes); | ||
2294 | if (entry->bytes == 0) | 2366 | if (entry->bytes == 0) |
2295 | free_bitmap(block_group, entry); | 2367 | free_bitmap(ctl, entry); |
2296 | } else { | 2368 | } else { |
2297 | start = entry->offset + BITS_PER_BITMAP * | 2369 | start = entry->offset + BITS_PER_BITMAP * |
2298 | block_group->sectorsize; | 2370 | block_group->sectorsize; |
2299 | spin_unlock(&block_group->tree_lock); | 2371 | spin_unlock(&ctl->tree_lock); |
2300 | ret = 0; | 2372 | ret = 0; |
2301 | continue; | 2373 | continue; |
2302 | } | 2374 | } |
2303 | } else { | 2375 | } else { |
2304 | start = entry->offset; | 2376 | start = entry->offset; |
2305 | bytes = min(entry->bytes, end - start); | 2377 | bytes = min(entry->bytes, end - start); |
2306 | unlink_free_space(block_group, entry); | 2378 | unlink_free_space(ctl, entry); |
2307 | kmem_cache_free(btrfs_free_space_cachep, entry); | 2379 | kmem_cache_free(btrfs_free_space_cachep, entry); |
2308 | } | 2380 | } |
2309 | 2381 | ||
2310 | spin_unlock(&block_group->tree_lock); | 2382 | spin_unlock(&ctl->tree_lock); |
2311 | 2383 | ||
2312 | if (bytes >= minlen) { | 2384 | if (bytes >= minlen) { |
2313 | int update_ret; | 2385 | int update_ret; |
@@ -2319,8 +2391,7 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2319 | bytes, | 2391 | bytes, |
2320 | &actually_trimmed); | 2392 | &actually_trimmed); |
2321 | 2393 | ||
2322 | btrfs_add_free_space(block_group, | 2394 | btrfs_add_free_space(block_group, start, bytes); |
2323 | start, bytes); | ||
2324 | if (!update_ret) | 2395 | if (!update_ret) |
2325 | btrfs_update_reserved_bytes(block_group, | 2396 | btrfs_update_reserved_bytes(block_group, |
2326 | bytes, 0, 1); | 2397 | bytes, 0, 1); |
@@ -2342,3 +2413,145 @@ int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, | |||
2342 | 2413 | ||
2343 | return ret; | 2414 | return ret; |
2344 | } | 2415 | } |
2416 | |||
2417 | /* | ||
2418 | * Find the left-most item in the cache tree, and then return the | ||
2419 | * smallest inode number in the item. | ||
2420 | * | ||
2421 | * Note: the returned inode number may not be the smallest one in | ||
2422 | * the tree, if the left-most item is a bitmap. | ||
2423 | */ | ||
2424 | u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root) | ||
2425 | { | ||
2426 | struct btrfs_free_space_ctl *ctl = fs_root->free_ino_ctl; | ||
2427 | struct btrfs_free_space *entry = NULL; | ||
2428 | u64 ino = 0; | ||
2429 | |||
2430 | spin_lock(&ctl->tree_lock); | ||
2431 | |||
2432 | if (RB_EMPTY_ROOT(&ctl->free_space_offset)) | ||
2433 | goto out; | ||
2434 | |||
2435 | entry = rb_entry(rb_first(&ctl->free_space_offset), | ||
2436 | struct btrfs_free_space, offset_index); | ||
2437 | |||
2438 | if (!entry->bitmap) { | ||
2439 | ino = entry->offset; | ||
2440 | |||
2441 | unlink_free_space(ctl, entry); | ||
2442 | entry->offset++; | ||
2443 | entry->bytes--; | ||
2444 | if (!entry->bytes) | ||
2445 | kmem_cache_free(btrfs_free_space_cachep, entry); | ||
2446 | else | ||
2447 | link_free_space(ctl, entry); | ||
2448 | } else { | ||
2449 | u64 offset = 0; | ||
2450 | u64 count = 1; | ||
2451 | int ret; | ||
2452 | |||
2453 | ret = search_bitmap(ctl, entry, &offset, &count); | ||
2454 | BUG_ON(ret); | ||
2455 | |||
2456 | ino = offset; | ||
2457 | bitmap_clear_bits(ctl, entry, offset, 1); | ||
2458 | if (entry->bytes == 0) | ||
2459 | free_bitmap(ctl, entry); | ||
2460 | } | ||
2461 | out: | ||
2462 | spin_unlock(&ctl->tree_lock); | ||
2463 | |||
2464 | return ino; | ||
2465 | } | ||
2466 | |||
2467 | struct inode *lookup_free_ino_inode(struct btrfs_root *root, | ||
2468 | struct btrfs_path *path) | ||
2469 | { | ||
2470 | struct inode *inode = NULL; | ||
2471 | |||
2472 | spin_lock(&root->cache_lock); | ||
2473 | if (root->cache_inode) | ||
2474 | inode = igrab(root->cache_inode); | ||
2475 | spin_unlock(&root->cache_lock); | ||
2476 | if (inode) | ||
2477 | return inode; | ||
2478 | |||
2479 | inode = __lookup_free_space_inode(root, path, 0); | ||
2480 | if (IS_ERR(inode)) | ||
2481 | return inode; | ||
2482 | |||
2483 | spin_lock(&root->cache_lock); | ||
2484 | if (!root->fs_info->closing) | ||
2485 | root->cache_inode = igrab(inode); | ||
2486 | spin_unlock(&root->cache_lock); | ||
2487 | |||
2488 | return inode; | ||
2489 | } | ||
2490 | |||
2491 | int create_free_ino_inode(struct btrfs_root *root, | ||
2492 | struct btrfs_trans_handle *trans, | ||
2493 | struct btrfs_path *path) | ||
2494 | { | ||
2495 | return __create_free_space_inode(root, trans, path, | ||
2496 | BTRFS_FREE_INO_OBJECTID, 0); | ||
2497 | } | ||
2498 | |||
2499 | int load_free_ino_cache(struct btrfs_fs_info *fs_info, struct btrfs_root *root) | ||
2500 | { | ||
2501 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
2502 | struct btrfs_path *path; | ||
2503 | struct inode *inode; | ||
2504 | int ret = 0; | ||
2505 | u64 root_gen = btrfs_root_generation(&root->root_item); | ||
2506 | |||
2507 | /* | ||
2508 | * If we're unmounting then just return, since this does a search on the | ||
2509 | * normal root and not the commit root and we could deadlock. | ||
2510 | */ | ||
2511 | smp_mb(); | ||
2512 | if (fs_info->closing) | ||
2513 | return 0; | ||
2514 | |||
2515 | path = btrfs_alloc_path(); | ||
2516 | if (!path) | ||
2517 | return 0; | ||
2518 | |||
2519 | inode = lookup_free_ino_inode(root, path); | ||
2520 | if (IS_ERR(inode)) | ||
2521 | goto out; | ||
2522 | |||
2523 | if (root_gen != BTRFS_I(inode)->generation) | ||
2524 | goto out_put; | ||
2525 | |||
2526 | ret = __load_free_space_cache(root, inode, ctl, path, 0); | ||
2527 | |||
2528 | if (ret < 0) | ||
2529 | printk(KERN_ERR "btrfs: failed to load free ino cache for " | ||
2530 | "root %llu\n", root->root_key.objectid); | ||
2531 | out_put: | ||
2532 | iput(inode); | ||
2533 | out: | ||
2534 | btrfs_free_path(path); | ||
2535 | return ret; | ||
2536 | } | ||
2537 | |||
2538 | int btrfs_write_out_ino_cache(struct btrfs_root *root, | ||
2539 | struct btrfs_trans_handle *trans, | ||
2540 | struct btrfs_path *path) | ||
2541 | { | ||
2542 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
2543 | struct inode *inode; | ||
2544 | int ret; | ||
2545 | |||
2546 | inode = lookup_free_ino_inode(root, path); | ||
2547 | if (IS_ERR(inode)) | ||
2548 | return 0; | ||
2549 | |||
2550 | ret = __btrfs_write_out_cache(root, inode, ctl, NULL, trans, path, 0); | ||
2551 | if (ret < 0) | ||
2552 | printk(KERN_ERR "btrfs: failed to write free ino cache " | ||
2553 | "for root %llu\n", root->root_key.objectid); | ||
2554 | |||
2555 | iput(inode); | ||
2556 | return ret; | ||
2557 | } | ||
diff --git a/fs/btrfs/free-space-cache.h b/fs/btrfs/free-space-cache.h index 65c3b935289f..8f2613f779ed 100644 --- a/fs/btrfs/free-space-cache.h +++ b/fs/btrfs/free-space-cache.h | |||
@@ -27,6 +27,25 @@ struct btrfs_free_space { | |||
27 | struct list_head list; | 27 | struct list_head list; |
28 | }; | 28 | }; |
29 | 29 | ||
30 | struct btrfs_free_space_ctl { | ||
31 | spinlock_t tree_lock; | ||
32 | struct rb_root free_space_offset; | ||
33 | u64 free_space; | ||
34 | int extents_thresh; | ||
35 | int free_extents; | ||
36 | int total_bitmaps; | ||
37 | int unit; | ||
38 | u64 start; | ||
39 | struct btrfs_free_space_op *op; | ||
40 | void *private; | ||
41 | }; | ||
42 | |||
43 | struct btrfs_free_space_op { | ||
44 | void (*recalc_thresholds)(struct btrfs_free_space_ctl *ctl); | ||
45 | bool (*use_bitmap)(struct btrfs_free_space_ctl *ctl, | ||
46 | struct btrfs_free_space *info); | ||
47 | }; | ||
48 | |||
30 | struct inode *lookup_free_space_inode(struct btrfs_root *root, | 49 | struct inode *lookup_free_space_inode(struct btrfs_root *root, |
31 | struct btrfs_block_group_cache | 50 | struct btrfs_block_group_cache |
32 | *block_group, struct btrfs_path *path); | 51 | *block_group, struct btrfs_path *path); |
@@ -45,17 +64,38 @@ int btrfs_write_out_cache(struct btrfs_root *root, | |||
45 | struct btrfs_trans_handle *trans, | 64 | struct btrfs_trans_handle *trans, |
46 | struct btrfs_block_group_cache *block_group, | 65 | struct btrfs_block_group_cache *block_group, |
47 | struct btrfs_path *path); | 66 | struct btrfs_path *path); |
48 | int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | 67 | |
49 | u64 bytenr, u64 size); | 68 | struct inode *lookup_free_ino_inode(struct btrfs_root *root, |
69 | struct btrfs_path *path); | ||
70 | int create_free_ino_inode(struct btrfs_root *root, | ||
71 | struct btrfs_trans_handle *trans, | ||
72 | struct btrfs_path *path); | ||
73 | int load_free_ino_cache(struct btrfs_fs_info *fs_info, | ||
74 | struct btrfs_root *root); | ||
75 | int btrfs_write_out_ino_cache(struct btrfs_root *root, | ||
76 | struct btrfs_trans_handle *trans, | ||
77 | struct btrfs_path *path); | ||
78 | |||
79 | void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); | ||
80 | int __btrfs_add_free_space(struct btrfs_free_space_ctl *ctl, | ||
81 | u64 bytenr, u64 size); | ||
82 | static inline int | ||
83 | btrfs_add_free_space(struct btrfs_block_group_cache *block_group, | ||
84 | u64 bytenr, u64 size) | ||
85 | { | ||
86 | return __btrfs_add_free_space(block_group->free_space_ctl, | ||
87 | bytenr, size); | ||
88 | } | ||
50 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, | 89 | int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, |
51 | u64 bytenr, u64 size); | 90 | u64 bytenr, u64 size); |
91 | void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); | ||
52 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache | 92 | void btrfs_remove_free_space_cache(struct btrfs_block_group_cache |
53 | *block_group); | 93 | *block_group); |
54 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, | 94 | u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group, |
55 | u64 offset, u64 bytes, u64 empty_size); | 95 | u64 offset, u64 bytes, u64 empty_size); |
96 | u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); | ||
56 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, | 97 | void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, |
57 | u64 bytes); | 98 | u64 bytes); |
58 | u64 btrfs_block_group_free_space(struct btrfs_block_group_cache *block_group); | ||
59 | int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, | 99 | int btrfs_find_space_cluster(struct btrfs_trans_handle *trans, |
60 | struct btrfs_root *root, | 100 | struct btrfs_root *root, |
61 | struct btrfs_block_group_cache *block_group, | 101 | struct btrfs_block_group_cache *block_group, |
diff --git a/fs/btrfs/inode-item.c b/fs/btrfs/inode-item.c index 64f1150bb48d..baa74f3db691 100644 --- a/fs/btrfs/inode-item.c +++ b/fs/btrfs/inode-item.c | |||
@@ -130,7 +130,6 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, | |||
130 | item_size - (ptr + sub_item_len - item_start)); | 130 | item_size - (ptr + sub_item_len - item_start)); |
131 | ret = btrfs_truncate_item(trans, root, path, | 131 | ret = btrfs_truncate_item(trans, root, path, |
132 | item_size - sub_item_len, 1); | 132 | item_size - sub_item_len, 1); |
133 | BUG_ON(ret); | ||
134 | out: | 133 | out: |
135 | btrfs_free_path(path); | 134 | btrfs_free_path(path); |
136 | return ret; | 135 | return ret; |
@@ -167,7 +166,6 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, | |||
167 | 166 | ||
168 | old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); | 167 | old_size = btrfs_item_size_nr(path->nodes[0], path->slots[0]); |
169 | ret = btrfs_extend_item(trans, root, path, ins_len); | 168 | ret = btrfs_extend_item(trans, root, path, ins_len); |
170 | BUG_ON(ret); | ||
171 | ref = btrfs_item_ptr(path->nodes[0], path->slots[0], | 169 | ref = btrfs_item_ptr(path->nodes[0], path->slots[0], |
172 | struct btrfs_inode_ref); | 170 | struct btrfs_inode_ref); |
173 | ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); | 171 | ref = (struct btrfs_inode_ref *)((unsigned long)ref + old_size); |
diff --git a/fs/btrfs/inode-map.c b/fs/btrfs/inode-map.c index c05a08f4c411..3262cd17a12f 100644 --- a/fs/btrfs/inode-map.c +++ b/fs/btrfs/inode-map.c | |||
@@ -16,11 +16,446 @@ | |||
16 | * Boston, MA 021110-1307, USA. | 16 | * Boston, MA 021110-1307, USA. |
17 | */ | 17 | */ |
18 | 18 | ||
19 | #include <linux/delay.h> | ||
20 | #include <linux/kthread.h> | ||
21 | #include <linux/pagemap.h> | ||
22 | |||
19 | #include "ctree.h" | 23 | #include "ctree.h" |
20 | #include "disk-io.h" | 24 | #include "disk-io.h" |
25 | #include "free-space-cache.h" | ||
26 | #include "inode-map.h" | ||
21 | #include "transaction.h" | 27 | #include "transaction.h" |
22 | 28 | ||
23 | int btrfs_find_highest_inode(struct btrfs_root *root, u64 *objectid) | 29 | static int caching_kthread(void *data) |
30 | { | ||
31 | struct btrfs_root *root = data; | ||
32 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
33 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
34 | struct btrfs_key key; | ||
35 | struct btrfs_path *path; | ||
36 | struct extent_buffer *leaf; | ||
37 | u64 last = (u64)-1; | ||
38 | int slot; | ||
39 | int ret; | ||
40 | |||
41 | path = btrfs_alloc_path(); | ||
42 | if (!path) | ||
43 | return -ENOMEM; | ||
44 | |||
45 | /* Since the commit root is read-only, we can safely skip locking. */ | ||
46 | path->skip_locking = 1; | ||
47 | path->search_commit_root = 1; | ||
48 | path->reada = 2; | ||
49 | |||
50 | key.objectid = BTRFS_FIRST_FREE_OBJECTID; | ||
51 | key.offset = 0; | ||
52 | key.type = BTRFS_INODE_ITEM_KEY; | ||
53 | again: | ||
54 | /* need to make sure the commit_root doesn't disappear */ | ||
55 | mutex_lock(&root->fs_commit_mutex); | ||
56 | |||
57 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
58 | if (ret < 0) | ||
59 | goto out; | ||
60 | |||
61 | while (1) { | ||
62 | smp_mb(); | ||
63 | if (fs_info->closing) | ||
64 | goto out; | ||
65 | |||
66 | leaf = path->nodes[0]; | ||
67 | slot = path->slots[0]; | ||
68 | if (slot >= btrfs_header_nritems(leaf)) { | ||
69 | ret = btrfs_next_leaf(root, path); | ||
70 | if (ret < 0) | ||
71 | goto out; | ||
72 | else if (ret > 0) | ||
73 | break; | ||
74 | |||
75 | if (need_resched() || | ||
76 | btrfs_transaction_in_commit(fs_info)) { | ||
77 | leaf = path->nodes[0]; | ||
78 | |||
79 | if (btrfs_header_nritems(leaf) == 0) { | ||
80 | WARN_ON(1); | ||
81 | break; | ||
82 | } | ||
83 | |||
84 | /* | ||
85 | * Save the key so we can advances forward | ||
86 | * in the next search. | ||
87 | */ | ||
88 | btrfs_item_key_to_cpu(leaf, &key, 0); | ||
89 | btrfs_release_path(path); | ||
90 | root->cache_progress = last; | ||
91 | mutex_unlock(&root->fs_commit_mutex); | ||
92 | schedule_timeout(1); | ||
93 | goto again; | ||
94 | } else | ||
95 | continue; | ||
96 | } | ||
97 | |||
98 | btrfs_item_key_to_cpu(leaf, &key, slot); | ||
99 | |||
100 | if (key.type != BTRFS_INODE_ITEM_KEY) | ||
101 | goto next; | ||
102 | |||
103 | if (key.objectid >= root->highest_objectid) | ||
104 | break; | ||
105 | |||
106 | if (last != (u64)-1 && last + 1 != key.objectid) { | ||
107 | __btrfs_add_free_space(ctl, last + 1, | ||
108 | key.objectid - last - 1); | ||
109 | wake_up(&root->cache_wait); | ||
110 | } | ||
111 | |||
112 | last = key.objectid; | ||
113 | next: | ||
114 | path->slots[0]++; | ||
115 | } | ||
116 | |||
117 | if (last < root->highest_objectid - 1) { | ||
118 | __btrfs_add_free_space(ctl, last + 1, | ||
119 | root->highest_objectid - last - 1); | ||
120 | } | ||
121 | |||
122 | spin_lock(&root->cache_lock); | ||
123 | root->cached = BTRFS_CACHE_FINISHED; | ||
124 | spin_unlock(&root->cache_lock); | ||
125 | |||
126 | root->cache_progress = (u64)-1; | ||
127 | btrfs_unpin_free_ino(root); | ||
128 | out: | ||
129 | wake_up(&root->cache_wait); | ||
130 | mutex_unlock(&root->fs_commit_mutex); | ||
131 | |||
132 | btrfs_free_path(path); | ||
133 | |||
134 | return ret; | ||
135 | } | ||
136 | |||
137 | static void start_caching(struct btrfs_root *root) | ||
138 | { | ||
139 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
140 | struct task_struct *tsk; | ||
141 | int ret; | ||
142 | u64 objectid; | ||
143 | |||
144 | spin_lock(&root->cache_lock); | ||
145 | if (root->cached != BTRFS_CACHE_NO) { | ||
146 | spin_unlock(&root->cache_lock); | ||
147 | return; | ||
148 | } | ||
149 | |||
150 | root->cached = BTRFS_CACHE_STARTED; | ||
151 | spin_unlock(&root->cache_lock); | ||
152 | |||
153 | ret = load_free_ino_cache(root->fs_info, root); | ||
154 | if (ret == 1) { | ||
155 | spin_lock(&root->cache_lock); | ||
156 | root->cached = BTRFS_CACHE_FINISHED; | ||
157 | spin_unlock(&root->cache_lock); | ||
158 | return; | ||
159 | } | ||
160 | |||
161 | /* | ||
162 | * It can be quite time-consuming to fill the cache by searching | ||
163 | * through the extent tree, and this can keep ino allocation path | ||
164 | * waiting. Therefore at start we quickly find out the highest | ||
165 | * inode number and we know we can use inode numbers which fall in | ||
166 | * [highest_ino + 1, BTRFS_LAST_FREE_OBJECTID]. | ||
167 | */ | ||
168 | ret = btrfs_find_free_objectid(root, &objectid); | ||
169 | if (!ret && objectid <= BTRFS_LAST_FREE_OBJECTID) { | ||
170 | __btrfs_add_free_space(ctl, objectid, | ||
171 | BTRFS_LAST_FREE_OBJECTID - objectid + 1); | ||
172 | } | ||
173 | |||
174 | tsk = kthread_run(caching_kthread, root, "btrfs-ino-cache-%llu\n", | ||
175 | root->root_key.objectid); | ||
176 | BUG_ON(IS_ERR(tsk)); | ||
177 | } | ||
178 | |||
179 | int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid) | ||
180 | { | ||
181 | again: | ||
182 | *objectid = btrfs_find_ino_for_alloc(root); | ||
183 | |||
184 | if (*objectid != 0) | ||
185 | return 0; | ||
186 | |||
187 | start_caching(root); | ||
188 | |||
189 | wait_event(root->cache_wait, | ||
190 | root->cached == BTRFS_CACHE_FINISHED || | ||
191 | root->free_ino_ctl->free_space > 0); | ||
192 | |||
193 | if (root->cached == BTRFS_CACHE_FINISHED && | ||
194 | root->free_ino_ctl->free_space == 0) | ||
195 | return -ENOSPC; | ||
196 | else | ||
197 | goto again; | ||
198 | } | ||
199 | |||
200 | void btrfs_return_ino(struct btrfs_root *root, u64 objectid) | ||
201 | { | ||
202 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
203 | struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; | ||
204 | again: | ||
205 | if (root->cached == BTRFS_CACHE_FINISHED) { | ||
206 | __btrfs_add_free_space(ctl, objectid, 1); | ||
207 | } else { | ||
208 | /* | ||
209 | * If we are in the process of caching free ino chunks, | ||
210 | * to avoid adding the same inode number to the free_ino | ||
211 | * tree twice due to cross transaction, we'll leave it | ||
212 | * in the pinned tree until a transaction is committed | ||
213 | * or the caching work is done. | ||
214 | */ | ||
215 | |||
216 | mutex_lock(&root->fs_commit_mutex); | ||
217 | spin_lock(&root->cache_lock); | ||
218 | if (root->cached == BTRFS_CACHE_FINISHED) { | ||
219 | spin_unlock(&root->cache_lock); | ||
220 | mutex_unlock(&root->fs_commit_mutex); | ||
221 | goto again; | ||
222 | } | ||
223 | spin_unlock(&root->cache_lock); | ||
224 | |||
225 | start_caching(root); | ||
226 | |||
227 | if (objectid <= root->cache_progress || | ||
228 | objectid > root->highest_objectid) | ||
229 | __btrfs_add_free_space(ctl, objectid, 1); | ||
230 | else | ||
231 | __btrfs_add_free_space(pinned, objectid, 1); | ||
232 | |||
233 | mutex_unlock(&root->fs_commit_mutex); | ||
234 | } | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * When a transaction is committed, we'll move those inode numbers which | ||
239 | * are smaller than root->cache_progress from pinned tree to free_ino tree, | ||
240 | * and others will just be dropped, because the commit root we were | ||
241 | * searching has changed. | ||
242 | * | ||
243 | * Must be called with root->fs_commit_mutex held | ||
244 | */ | ||
245 | void btrfs_unpin_free_ino(struct btrfs_root *root) | ||
246 | { | ||
247 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
248 | struct rb_root *rbroot = &root->free_ino_pinned->free_space_offset; | ||
249 | struct btrfs_free_space *info; | ||
250 | struct rb_node *n; | ||
251 | u64 count; | ||
252 | |||
253 | while (1) { | ||
254 | n = rb_first(rbroot); | ||
255 | if (!n) | ||
256 | break; | ||
257 | |||
258 | info = rb_entry(n, struct btrfs_free_space, offset_index); | ||
259 | BUG_ON(info->bitmap); | ||
260 | |||
261 | if (info->offset > root->cache_progress) | ||
262 | goto free; | ||
263 | else if (info->offset + info->bytes > root->cache_progress) | ||
264 | count = root->cache_progress - info->offset + 1; | ||
265 | else | ||
266 | count = info->bytes; | ||
267 | |||
268 | __btrfs_add_free_space(ctl, info->offset, count); | ||
269 | free: | ||
270 | rb_erase(&info->offset_index, rbroot); | ||
271 | kfree(info); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | #define INIT_THRESHOLD (((1024 * 32) / 2) / sizeof(struct btrfs_free_space)) | ||
276 | #define INODES_PER_BITMAP (PAGE_CACHE_SIZE * 8) | ||
277 | |||
278 | /* | ||
279 | * The goal is to keep the memory used by the free_ino tree won't | ||
280 | * exceed the memory if we use bitmaps only. | ||
281 | */ | ||
282 | static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) | ||
283 | { | ||
284 | struct btrfs_free_space *info; | ||
285 | struct rb_node *n; | ||
286 | int max_ino; | ||
287 | int max_bitmaps; | ||
288 | |||
289 | n = rb_last(&ctl->free_space_offset); | ||
290 | if (!n) { | ||
291 | ctl->extents_thresh = INIT_THRESHOLD; | ||
292 | return; | ||
293 | } | ||
294 | info = rb_entry(n, struct btrfs_free_space, offset_index); | ||
295 | |||
296 | /* | ||
297 | * Find the maximum inode number in the filesystem. Note we | ||
298 | * ignore the fact that this can be a bitmap, because we are | ||
299 | * not doing precise calculation. | ||
300 | */ | ||
301 | max_ino = info->bytes - 1; | ||
302 | |||
303 | max_bitmaps = ALIGN(max_ino, INODES_PER_BITMAP) / INODES_PER_BITMAP; | ||
304 | if (max_bitmaps <= ctl->total_bitmaps) { | ||
305 | ctl->extents_thresh = 0; | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | ctl->extents_thresh = (max_bitmaps - ctl->total_bitmaps) * | ||
310 | PAGE_CACHE_SIZE / sizeof(*info); | ||
311 | } | ||
312 | |||
313 | /* | ||
314 | * We don't fall back to bitmap, if we are below the extents threshold | ||
315 | * or this chunk of inode numbers is a big one. | ||
316 | */ | ||
317 | static bool use_bitmap(struct btrfs_free_space_ctl *ctl, | ||
318 | struct btrfs_free_space *info) | ||
319 | { | ||
320 | if (ctl->free_extents < ctl->extents_thresh || | ||
321 | info->bytes > INODES_PER_BITMAP / 10) | ||
322 | return false; | ||
323 | |||
324 | return true; | ||
325 | } | ||
326 | |||
327 | static struct btrfs_free_space_op free_ino_op = { | ||
328 | .recalc_thresholds = recalculate_thresholds, | ||
329 | .use_bitmap = use_bitmap, | ||
330 | }; | ||
331 | |||
332 | static void pinned_recalc_thresholds(struct btrfs_free_space_ctl *ctl) | ||
333 | { | ||
334 | } | ||
335 | |||
336 | static bool pinned_use_bitmap(struct btrfs_free_space_ctl *ctl, | ||
337 | struct btrfs_free_space *info) | ||
338 | { | ||
339 | /* | ||
340 | * We always use extents for two reasons: | ||
341 | * | ||
342 | * - The pinned tree is only used during the process of caching | ||
343 | * work. | ||
344 | * - Make code simpler. See btrfs_unpin_free_ino(). | ||
345 | */ | ||
346 | return false; | ||
347 | } | ||
348 | |||
349 | static struct btrfs_free_space_op pinned_free_ino_op = { | ||
350 | .recalc_thresholds = pinned_recalc_thresholds, | ||
351 | .use_bitmap = pinned_use_bitmap, | ||
352 | }; | ||
353 | |||
354 | void btrfs_init_free_ino_ctl(struct btrfs_root *root) | ||
355 | { | ||
356 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
357 | struct btrfs_free_space_ctl *pinned = root->free_ino_pinned; | ||
358 | |||
359 | spin_lock_init(&ctl->tree_lock); | ||
360 | ctl->unit = 1; | ||
361 | ctl->start = 0; | ||
362 | ctl->private = NULL; | ||
363 | ctl->op = &free_ino_op; | ||
364 | |||
365 | /* | ||
366 | * Initially we allow to use 16K of ram to cache chunks of | ||
367 | * inode numbers before we resort to bitmaps. This is somewhat | ||
368 | * arbitrary, but it will be adjusted in runtime. | ||
369 | */ | ||
370 | ctl->extents_thresh = INIT_THRESHOLD; | ||
371 | |||
372 | spin_lock_init(&pinned->tree_lock); | ||
373 | pinned->unit = 1; | ||
374 | pinned->start = 0; | ||
375 | pinned->private = NULL; | ||
376 | pinned->extents_thresh = 0; | ||
377 | pinned->op = &pinned_free_ino_op; | ||
378 | } | ||
379 | |||
380 | int btrfs_save_ino_cache(struct btrfs_root *root, | ||
381 | struct btrfs_trans_handle *trans) | ||
382 | { | ||
383 | struct btrfs_free_space_ctl *ctl = root->free_ino_ctl; | ||
384 | struct btrfs_path *path; | ||
385 | struct inode *inode; | ||
386 | u64 alloc_hint = 0; | ||
387 | int ret; | ||
388 | int prealloc; | ||
389 | bool retry = false; | ||
390 | |||
391 | path = btrfs_alloc_path(); | ||
392 | if (!path) | ||
393 | return -ENOMEM; | ||
394 | again: | ||
395 | inode = lookup_free_ino_inode(root, path); | ||
396 | if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) { | ||
397 | ret = PTR_ERR(inode); | ||
398 | goto out; | ||
399 | } | ||
400 | |||
401 | if (IS_ERR(inode)) { | ||
402 | BUG_ON(retry); | ||
403 | retry = true; | ||
404 | |||
405 | ret = create_free_ino_inode(root, trans, path); | ||
406 | if (ret) | ||
407 | goto out; | ||
408 | goto again; | ||
409 | } | ||
410 | |||
411 | BTRFS_I(inode)->generation = 0; | ||
412 | ret = btrfs_update_inode(trans, root, inode); | ||
413 | WARN_ON(ret); | ||
414 | |||
415 | if (i_size_read(inode) > 0) { | ||
416 | ret = btrfs_truncate_free_space_cache(root, trans, path, inode); | ||
417 | if (ret) | ||
418 | goto out_put; | ||
419 | } | ||
420 | |||
421 | spin_lock(&root->cache_lock); | ||
422 | if (root->cached != BTRFS_CACHE_FINISHED) { | ||
423 | ret = -1; | ||
424 | spin_unlock(&root->cache_lock); | ||
425 | goto out_put; | ||
426 | } | ||
427 | spin_unlock(&root->cache_lock); | ||
428 | |||
429 | spin_lock(&ctl->tree_lock); | ||
430 | prealloc = sizeof(struct btrfs_free_space) * ctl->free_extents; | ||
431 | prealloc = ALIGN(prealloc, PAGE_CACHE_SIZE); | ||
432 | prealloc += ctl->total_bitmaps * PAGE_CACHE_SIZE; | ||
433 | spin_unlock(&ctl->tree_lock); | ||
434 | |||
435 | /* Just to make sure we have enough space */ | ||
436 | prealloc += 8 * PAGE_CACHE_SIZE; | ||
437 | |||
438 | ret = btrfs_check_data_free_space(inode, prealloc); | ||
439 | if (ret) | ||
440 | goto out_put; | ||
441 | |||
442 | ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, prealloc, | ||
443 | prealloc, prealloc, &alloc_hint); | ||
444 | if (ret) | ||
445 | goto out_put; | ||
446 | btrfs_free_reserved_data_space(inode, prealloc); | ||
447 | |||
448 | out_put: | ||
449 | iput(inode); | ||
450 | out: | ||
451 | if (ret == 0) | ||
452 | ret = btrfs_write_out_ino_cache(root, trans, path); | ||
453 | |||
454 | btrfs_free_path(path); | ||
455 | return ret; | ||
456 | } | ||
457 | |||
458 | static int btrfs_find_highest_objectid(struct btrfs_root *root, u64 *objectid) | ||
24 | { | 459 | { |
25 | struct btrfs_path *path; | 460 | struct btrfs_path *path; |
26 | int ret; | 461 | int ret; |
@@ -55,15 +490,14 @@ error: | |||
55 | return ret; | 490 | return ret; |
56 | } | 491 | } |
57 | 492 | ||
58 | int btrfs_find_free_objectid(struct btrfs_trans_handle *trans, | 493 | int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid) |
59 | struct btrfs_root *root, | ||
60 | u64 dirid, u64 *objectid) | ||
61 | { | 494 | { |
62 | int ret; | 495 | int ret; |
63 | mutex_lock(&root->objectid_mutex); | 496 | mutex_lock(&root->objectid_mutex); |
64 | 497 | ||
65 | if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { | 498 | if (unlikely(root->highest_objectid < BTRFS_FIRST_FREE_OBJECTID)) { |
66 | ret = btrfs_find_highest_inode(root, &root->highest_objectid); | 499 | ret = btrfs_find_highest_objectid(root, |
500 | &root->highest_objectid); | ||
67 | if (ret) | 501 | if (ret) |
68 | goto out; | 502 | goto out; |
69 | } | 503 | } |
diff --git a/fs/btrfs/inode-map.h b/fs/btrfs/inode-map.h new file mode 100644 index 000000000000..ddb347bfee23 --- /dev/null +++ b/fs/btrfs/inode-map.h | |||
@@ -0,0 +1,13 @@ | |||
1 | #ifndef __BTRFS_INODE_MAP | ||
2 | #define __BTRFS_INODE_MAP | ||
3 | |||
4 | void btrfs_init_free_ino_ctl(struct btrfs_root *root); | ||
5 | void btrfs_unpin_free_ino(struct btrfs_root *root); | ||
6 | void btrfs_return_ino(struct btrfs_root *root, u64 objectid); | ||
7 | int btrfs_find_free_ino(struct btrfs_root *root, u64 *objectid); | ||
8 | int btrfs_save_ino_cache(struct btrfs_root *root, | ||
9 | struct btrfs_trans_handle *trans); | ||
10 | |||
11 | int btrfs_find_free_objectid(struct btrfs_root *root, u64 *objectid); | ||
12 | |||
13 | #endif | ||
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c index 7cd8ab0ef04d..bb51bb1fa44f 100644 --- a/fs/btrfs/inode.c +++ b/fs/btrfs/inode.c | |||
@@ -37,6 +37,7 @@ | |||
37 | #include <linux/posix_acl.h> | 37 | #include <linux/posix_acl.h> |
38 | #include <linux/falloc.h> | 38 | #include <linux/falloc.h> |
39 | #include <linux/slab.h> | 39 | #include <linux/slab.h> |
40 | #include <linux/ratelimit.h> | ||
40 | #include "compat.h" | 41 | #include "compat.h" |
41 | #include "ctree.h" | 42 | #include "ctree.h" |
42 | #include "disk-io.h" | 43 | #include "disk-io.h" |
@@ -51,6 +52,7 @@ | |||
51 | #include "compression.h" | 52 | #include "compression.h" |
52 | #include "locking.h" | 53 | #include "locking.h" |
53 | #include "free-space-cache.h" | 54 | #include "free-space-cache.h" |
55 | #include "inode-map.h" | ||
54 | 56 | ||
55 | struct btrfs_iget_args { | 57 | struct btrfs_iget_args { |
56 | u64 ino; | 58 | u64 ino; |
@@ -138,7 +140,7 @@ static noinline int insert_inline_extent(struct btrfs_trans_handle *trans, | |||
138 | path->leave_spinning = 1; | 140 | path->leave_spinning = 1; |
139 | btrfs_set_trans_block_group(trans, inode); | 141 | btrfs_set_trans_block_group(trans, inode); |
140 | 142 | ||
141 | key.objectid = inode->i_ino; | 143 | key.objectid = btrfs_ino(inode); |
142 | key.offset = start; | 144 | key.offset = start; |
143 | btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); | 145 | btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); |
144 | datasize = btrfs_file_extent_calc_inline_size(cur_size); | 146 | datasize = btrfs_file_extent_calc_inline_size(cur_size); |
@@ -340,6 +342,10 @@ static noinline int compress_file_range(struct inode *inode, | |||
340 | int will_compress; | 342 | int will_compress; |
341 | int compress_type = root->fs_info->compress_type; | 343 | int compress_type = root->fs_info->compress_type; |
342 | 344 | ||
345 | /* if this is a small write inside eof, kick off a defragbot */ | ||
346 | if (end <= BTRFS_I(inode)->disk_i_size && (end - start + 1) < 16 * 1024) | ||
347 | btrfs_add_inode_defrag(NULL, inode); | ||
348 | |||
343 | actual_end = min_t(u64, isize, end + 1); | 349 | actual_end = min_t(u64, isize, end + 1); |
344 | again: | 350 | again: |
345 | will_compress = 0; | 351 | will_compress = 0; |
@@ -649,7 +655,7 @@ retry: | |||
649 | async_extent->start + | 655 | async_extent->start + |
650 | async_extent->ram_size - 1, 0); | 656 | async_extent->ram_size - 1, 0); |
651 | 657 | ||
652 | em = alloc_extent_map(GFP_NOFS); | 658 | em = alloc_extent_map(); |
653 | BUG_ON(!em); | 659 | BUG_ON(!em); |
654 | em->start = async_extent->start; | 660 | em->start = async_extent->start; |
655 | em->len = async_extent->ram_size; | 661 | em->len = async_extent->ram_size; |
@@ -745,6 +751,15 @@ static u64 get_extent_allocation_hint(struct inode *inode, u64 start, | |||
745 | return alloc_hint; | 751 | return alloc_hint; |
746 | } | 752 | } |
747 | 753 | ||
754 | static inline bool is_free_space_inode(struct btrfs_root *root, | ||
755 | struct inode *inode) | ||
756 | { | ||
757 | if (root == root->fs_info->tree_root || | ||
758 | BTRFS_I(inode)->location.objectid == BTRFS_FREE_INO_OBJECTID) | ||
759 | return true; | ||
760 | return false; | ||
761 | } | ||
762 | |||
748 | /* | 763 | /* |
749 | * when extent_io.c finds a delayed allocation range in the file, | 764 | * when extent_io.c finds a delayed allocation range in the file, |
750 | * the call backs end up in this code. The basic idea is to | 765 | * the call backs end up in this code. The basic idea is to |
@@ -777,7 +792,7 @@ static noinline int cow_file_range(struct inode *inode, | |||
777 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 792 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
778 | int ret = 0; | 793 | int ret = 0; |
779 | 794 | ||
780 | BUG_ON(root == root->fs_info->tree_root); | 795 | BUG_ON(is_free_space_inode(root, inode)); |
781 | trans = btrfs_join_transaction(root, 1); | 796 | trans = btrfs_join_transaction(root, 1); |
782 | BUG_ON(IS_ERR(trans)); | 797 | BUG_ON(IS_ERR(trans)); |
783 | btrfs_set_trans_block_group(trans, inode); | 798 | btrfs_set_trans_block_group(trans, inode); |
@@ -788,6 +803,10 @@ static noinline int cow_file_range(struct inode *inode, | |||
788 | disk_num_bytes = num_bytes; | 803 | disk_num_bytes = num_bytes; |
789 | ret = 0; | 804 | ret = 0; |
790 | 805 | ||
806 | /* if this is a small write inside eof, kick off defrag */ | ||
807 | if (end <= BTRFS_I(inode)->disk_i_size && num_bytes < 64 * 1024) | ||
808 | btrfs_add_inode_defrag(trans, inode); | ||
809 | |||
791 | if (start == 0) { | 810 | if (start == 0) { |
792 | /* lets try to make an inline extent */ | 811 | /* lets try to make an inline extent */ |
793 | ret = cow_file_range_inline(trans, root, inode, | 812 | ret = cow_file_range_inline(trans, root, inode, |
@@ -826,7 +845,7 @@ static noinline int cow_file_range(struct inode *inode, | |||
826 | (u64)-1, &ins, 1); | 845 | (u64)-1, &ins, 1); |
827 | BUG_ON(ret); | 846 | BUG_ON(ret); |
828 | 847 | ||
829 | em = alloc_extent_map(GFP_NOFS); | 848 | em = alloc_extent_map(); |
830 | BUG_ON(!em); | 849 | BUG_ON(!em); |
831 | em->start = start; | 850 | em->start = start; |
832 | em->orig_start = em->start; | 851 | em->orig_start = em->start; |
@@ -1008,7 +1027,7 @@ static noinline int csum_exist_in_range(struct btrfs_root *root, | |||
1008 | LIST_HEAD(list); | 1027 | LIST_HEAD(list); |
1009 | 1028 | ||
1010 | ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, | 1029 | ret = btrfs_lookup_csums_range(root->fs_info->csum_root, bytenr, |
1011 | bytenr + num_bytes - 1, &list); | 1030 | bytenr + num_bytes - 1, &list, 0); |
1012 | if (ret == 0 && list_empty(&list)) | 1031 | if (ret == 0 && list_empty(&list)) |
1013 | return 0; | 1032 | return 0; |
1014 | 1033 | ||
@@ -1049,29 +1068,31 @@ static noinline int run_delalloc_nocow(struct inode *inode, | |||
1049 | int type; | 1068 | int type; |
1050 | int nocow; | 1069 | int nocow; |
1051 | int check_prev = 1; | 1070 | int check_prev = 1; |
1052 | bool nolock = false; | 1071 | bool nolock; |
1072 | u64 ino = btrfs_ino(inode); | ||
1053 | 1073 | ||
1054 | path = btrfs_alloc_path(); | 1074 | path = btrfs_alloc_path(); |
1055 | BUG_ON(!path); | 1075 | BUG_ON(!path); |
1056 | if (root == root->fs_info->tree_root) { | 1076 | |
1057 | nolock = true; | 1077 | nolock = is_free_space_inode(root, inode); |
1078 | |||
1079 | if (nolock) | ||
1058 | trans = btrfs_join_transaction_nolock(root, 1); | 1080 | trans = btrfs_join_transaction_nolock(root, 1); |
1059 | } else { | 1081 | else |
1060 | trans = btrfs_join_transaction(root, 1); | 1082 | trans = btrfs_join_transaction(root, 1); |
1061 | } | ||
1062 | BUG_ON(IS_ERR(trans)); | 1083 | BUG_ON(IS_ERR(trans)); |
1063 | 1084 | ||
1064 | cow_start = (u64)-1; | 1085 | cow_start = (u64)-1; |
1065 | cur_offset = start; | 1086 | cur_offset = start; |
1066 | while (1) { | 1087 | while (1) { |
1067 | ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, | 1088 | ret = btrfs_lookup_file_extent(trans, root, path, ino, |
1068 | cur_offset, 0); | 1089 | cur_offset, 0); |
1069 | BUG_ON(ret < 0); | 1090 | BUG_ON(ret < 0); |
1070 | if (ret > 0 && path->slots[0] > 0 && check_prev) { | 1091 | if (ret > 0 && path->slots[0] > 0 && check_prev) { |
1071 | leaf = path->nodes[0]; | 1092 | leaf = path->nodes[0]; |
1072 | btrfs_item_key_to_cpu(leaf, &found_key, | 1093 | btrfs_item_key_to_cpu(leaf, &found_key, |
1073 | path->slots[0] - 1); | 1094 | path->slots[0] - 1); |
1074 | if (found_key.objectid == inode->i_ino && | 1095 | if (found_key.objectid == ino && |
1075 | found_key.type == BTRFS_EXTENT_DATA_KEY) | 1096 | found_key.type == BTRFS_EXTENT_DATA_KEY) |
1076 | path->slots[0]--; | 1097 | path->slots[0]--; |
1077 | } | 1098 | } |
@@ -1092,7 +1113,7 @@ next_slot: | |||
1092 | num_bytes = 0; | 1113 | num_bytes = 0; |
1093 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | 1114 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
1094 | 1115 | ||
1095 | if (found_key.objectid > inode->i_ino || | 1116 | if (found_key.objectid > ino || |
1096 | found_key.type > BTRFS_EXTENT_DATA_KEY || | 1117 | found_key.type > BTRFS_EXTENT_DATA_KEY || |
1097 | found_key.offset > end) | 1118 | found_key.offset > end) |
1098 | break; | 1119 | break; |
@@ -1127,7 +1148,7 @@ next_slot: | |||
1127 | goto out_check; | 1148 | goto out_check; |
1128 | if (btrfs_extent_readonly(root, disk_bytenr)) | 1149 | if (btrfs_extent_readonly(root, disk_bytenr)) |
1129 | goto out_check; | 1150 | goto out_check; |
1130 | if (btrfs_cross_ref_exist(trans, root, inode->i_ino, | 1151 | if (btrfs_cross_ref_exist(trans, root, ino, |
1131 | found_key.offset - | 1152 | found_key.offset - |
1132 | extent_offset, disk_bytenr)) | 1153 | extent_offset, disk_bytenr)) |
1133 | goto out_check; | 1154 | goto out_check; |
@@ -1164,7 +1185,7 @@ out_check: | |||
1164 | goto next_slot; | 1185 | goto next_slot; |
1165 | } | 1186 | } |
1166 | 1187 | ||
1167 | btrfs_release_path(root, path); | 1188 | btrfs_release_path(path); |
1168 | if (cow_start != (u64)-1) { | 1189 | if (cow_start != (u64)-1) { |
1169 | ret = cow_file_range(inode, locked_page, cow_start, | 1190 | ret = cow_file_range(inode, locked_page, cow_start, |
1170 | found_key.offset - 1, page_started, | 1191 | found_key.offset - 1, page_started, |
@@ -1177,7 +1198,7 @@ out_check: | |||
1177 | struct extent_map *em; | 1198 | struct extent_map *em; |
1178 | struct extent_map_tree *em_tree; | 1199 | struct extent_map_tree *em_tree; |
1179 | em_tree = &BTRFS_I(inode)->extent_tree; | 1200 | em_tree = &BTRFS_I(inode)->extent_tree; |
1180 | em = alloc_extent_map(GFP_NOFS); | 1201 | em = alloc_extent_map(); |
1181 | BUG_ON(!em); | 1202 | BUG_ON(!em); |
1182 | em->start = cur_offset; | 1203 | em->start = cur_offset; |
1183 | em->orig_start = em->start; | 1204 | em->orig_start = em->start; |
@@ -1222,7 +1243,7 @@ out_check: | |||
1222 | if (cur_offset > end) | 1243 | if (cur_offset > end) |
1223 | break; | 1244 | break; |
1224 | } | 1245 | } |
1225 | btrfs_release_path(root, path); | 1246 | btrfs_release_path(path); |
1226 | 1247 | ||
1227 | if (cur_offset <= end && cow_start == (u64)-1) | 1248 | if (cur_offset <= end && cow_start == (u64)-1) |
1228 | cow_start = cur_offset; | 1249 | cow_start = cur_offset; |
@@ -1310,14 +1331,13 @@ static int btrfs_set_bit_hook(struct inode *inode, | |||
1310 | 1331 | ||
1311 | /* | 1332 | /* |
1312 | * set_bit and clear bit hooks normally require _irqsave/restore | 1333 | * set_bit and clear bit hooks normally require _irqsave/restore |
1313 | * but in this case, we are only testeing for the DELALLOC | 1334 | * but in this case, we are only testing for the DELALLOC |
1314 | * bit, which is only set or cleared with irqs on | 1335 | * bit, which is only set or cleared with irqs on |
1315 | */ | 1336 | */ |
1316 | if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { | 1337 | if (!(state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { |
1317 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1338 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1318 | u64 len = state->end + 1 - state->start; | 1339 | u64 len = state->end + 1 - state->start; |
1319 | int do_list = (root->root_key.objectid != | 1340 | bool do_list = !is_free_space_inode(root, inode); |
1320 | BTRFS_ROOT_TREE_OBJECTID); | ||
1321 | 1341 | ||
1322 | if (*bits & EXTENT_FIRST_DELALLOC) | 1342 | if (*bits & EXTENT_FIRST_DELALLOC) |
1323 | *bits &= ~EXTENT_FIRST_DELALLOC; | 1343 | *bits &= ~EXTENT_FIRST_DELALLOC; |
@@ -1344,14 +1364,13 @@ static int btrfs_clear_bit_hook(struct inode *inode, | |||
1344 | { | 1364 | { |
1345 | /* | 1365 | /* |
1346 | * set_bit and clear bit hooks normally require _irqsave/restore | 1366 | * set_bit and clear bit hooks normally require _irqsave/restore |
1347 | * but in this case, we are only testeing for the DELALLOC | 1367 | * but in this case, we are only testing for the DELALLOC |
1348 | * bit, which is only set or cleared with irqs on | 1368 | * bit, which is only set or cleared with irqs on |
1349 | */ | 1369 | */ |
1350 | if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { | 1370 | if ((state->state & EXTENT_DELALLOC) && (*bits & EXTENT_DELALLOC)) { |
1351 | struct btrfs_root *root = BTRFS_I(inode)->root; | 1371 | struct btrfs_root *root = BTRFS_I(inode)->root; |
1352 | u64 len = state->end + 1 - state->start; | 1372 | u64 len = state->end + 1 - state->start; |
1353 | int do_list = (root->root_key.objectid != | 1373 | bool do_list = !is_free_space_inode(root, inode); |
1354 | BTRFS_ROOT_TREE_OBJECTID); | ||
1355 | 1374 | ||
1356 | if (*bits & EXTENT_FIRST_DELALLOC) | 1375 | if (*bits & EXTENT_FIRST_DELALLOC) |
1357 | *bits &= ~EXTENT_FIRST_DELALLOC; | 1376 | *bits &= ~EXTENT_FIRST_DELALLOC; |
@@ -1458,7 +1477,7 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio, | |||
1458 | 1477 | ||
1459 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; | 1478 | skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM; |
1460 | 1479 | ||
1461 | if (root == root->fs_info->tree_root) | 1480 | if (is_free_space_inode(root, inode)) |
1462 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); | 1481 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 2); |
1463 | else | 1482 | else |
1464 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); | 1483 | ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); |
@@ -1644,7 +1663,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, | |||
1644 | &hint, 0); | 1663 | &hint, 0); |
1645 | BUG_ON(ret); | 1664 | BUG_ON(ret); |
1646 | 1665 | ||
1647 | ins.objectid = inode->i_ino; | 1666 | ins.objectid = btrfs_ino(inode); |
1648 | ins.offset = file_pos; | 1667 | ins.offset = file_pos; |
1649 | ins.type = BTRFS_EXTENT_DATA_KEY; | 1668 | ins.type = BTRFS_EXTENT_DATA_KEY; |
1650 | ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); | 1669 | ret = btrfs_insert_empty_item(trans, root, path, &ins, sizeof(*fi)); |
@@ -1675,7 +1694,7 @@ static int insert_reserved_file_extent(struct btrfs_trans_handle *trans, | |||
1675 | ins.type = BTRFS_EXTENT_ITEM_KEY; | 1694 | ins.type = BTRFS_EXTENT_ITEM_KEY; |
1676 | ret = btrfs_alloc_reserved_file_extent(trans, root, | 1695 | ret = btrfs_alloc_reserved_file_extent(trans, root, |
1677 | root->root_key.objectid, | 1696 | root->root_key.objectid, |
1678 | inode->i_ino, file_pos, &ins); | 1697 | btrfs_ino(inode), file_pos, &ins); |
1679 | BUG_ON(ret); | 1698 | BUG_ON(ret); |
1680 | btrfs_free_path(path); | 1699 | btrfs_free_path(path); |
1681 | 1700 | ||
@@ -1701,7 +1720,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1701 | struct extent_state *cached_state = NULL; | 1720 | struct extent_state *cached_state = NULL; |
1702 | int compress_type = 0; | 1721 | int compress_type = 0; |
1703 | int ret; | 1722 | int ret; |
1704 | bool nolock = false; | 1723 | bool nolock; |
1705 | 1724 | ||
1706 | ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, | 1725 | ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start, |
1707 | end - start + 1); | 1726 | end - start + 1); |
@@ -1709,7 +1728,7 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) | |||
1709 | return 0; | 1728 | return 0; |
1710 | BUG_ON(!ordered_extent); | 1729 | BUG_ON(!ordered_extent); |
1711 | 1730 | ||
1712 | nolock = (root == root->fs_info->tree_root); | 1731 | nolock = is_free_space_inode(root, inode); |
1713 | 1732 | ||
1714 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { | 1733 | if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { |
1715 | BUG_ON(!list_empty(&ordered_extent->list)); | 1734 | BUG_ON(!list_empty(&ordered_extent->list)); |
@@ -1855,7 +1874,7 @@ static int btrfs_io_failed_hook(struct bio *failed_bio, | |||
1855 | } | 1874 | } |
1856 | read_unlock(&em_tree->lock); | 1875 | read_unlock(&em_tree->lock); |
1857 | 1876 | ||
1858 | if (!em || IS_ERR(em)) { | 1877 | if (IS_ERR_OR_NULL(em)) { |
1859 | kfree(failrec); | 1878 | kfree(failrec); |
1860 | return -EIO; | 1879 | return -EIO; |
1861 | } | 1880 | } |
@@ -2004,12 +2023,11 @@ good: | |||
2004 | return 0; | 2023 | return 0; |
2005 | 2024 | ||
2006 | zeroit: | 2025 | zeroit: |
2007 | if (printk_ratelimit()) { | 2026 | printk_ratelimited(KERN_INFO "btrfs csum failed ino %llu off %llu csum %u " |
2008 | printk(KERN_INFO "btrfs csum failed ino %lu off %llu csum %u " | 2027 | "private %llu\n", |
2009 | "private %llu\n", page->mapping->host->i_ino, | 2028 | (unsigned long long)btrfs_ino(page->mapping->host), |
2010 | (unsigned long long)start, csum, | 2029 | (unsigned long long)start, csum, |
2011 | (unsigned long long)private); | 2030 | (unsigned long long)private); |
2012 | } | ||
2013 | memset(kaddr + offset, 1, end - start + 1); | 2031 | memset(kaddr + offset, 1, end - start + 1); |
2014 | flush_dcache_page(page); | 2032 | flush_dcache_page(page); |
2015 | kunmap_atomic(kaddr, KM_USER0); | 2033 | kunmap_atomic(kaddr, KM_USER0); |
@@ -2244,7 +2262,7 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode) | |||
2244 | 2262 | ||
2245 | /* insert an orphan item to track this unlinked/truncated file */ | 2263 | /* insert an orphan item to track this unlinked/truncated file */ |
2246 | if (insert >= 1) { | 2264 | if (insert >= 1) { |
2247 | ret = btrfs_insert_orphan_item(trans, root, inode->i_ino); | 2265 | ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); |
2248 | BUG_ON(ret); | 2266 | BUG_ON(ret); |
2249 | } | 2267 | } |
2250 | 2268 | ||
@@ -2281,7 +2299,7 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode) | |||
2281 | spin_unlock(&root->orphan_lock); | 2299 | spin_unlock(&root->orphan_lock); |
2282 | 2300 | ||
2283 | if (trans && delete_item) { | 2301 | if (trans && delete_item) { |
2284 | ret = btrfs_del_orphan_item(trans, root, inode->i_ino); | 2302 | ret = btrfs_del_orphan_item(trans, root, btrfs_ino(inode)); |
2285 | BUG_ON(ret); | 2303 | BUG_ON(ret); |
2286 | } | 2304 | } |
2287 | 2305 | ||
@@ -2346,7 +2364,7 @@ int btrfs_orphan_cleanup(struct btrfs_root *root) | |||
2346 | break; | 2364 | break; |
2347 | 2365 | ||
2348 | /* release the path since we're done with it */ | 2366 | /* release the path since we're done with it */ |
2349 | btrfs_release_path(root, path); | 2367 | btrfs_release_path(path); |
2350 | 2368 | ||
2351 | /* | 2369 | /* |
2352 | * this is where we are basically btrfs_lookup, without the | 2370 | * this is where we are basically btrfs_lookup, without the |
@@ -2543,7 +2561,8 @@ static void btrfs_read_locked_inode(struct inode *inode) | |||
2543 | * try to precache a NULL acl entry for files that don't have | 2561 | * try to precache a NULL acl entry for files that don't have |
2544 | * any xattrs or acls | 2562 | * any xattrs or acls |
2545 | */ | 2563 | */ |
2546 | maybe_acls = acls_after_inode_item(leaf, path->slots[0], inode->i_ino); | 2564 | maybe_acls = acls_after_inode_item(leaf, path->slots[0], |
2565 | btrfs_ino(inode)); | ||
2547 | if (!maybe_acls) | 2566 | if (!maybe_acls) |
2548 | cache_no_acl(inode); | 2567 | cache_no_acl(inode); |
2549 | 2568 | ||
@@ -2647,11 +2666,26 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, | |||
2647 | struct extent_buffer *leaf; | 2666 | struct extent_buffer *leaf; |
2648 | int ret; | 2667 | int ret; |
2649 | 2668 | ||
2669 | /* | ||
2670 | * If root is tree root, it means this inode is used to | ||
2671 | * store free space information. And these inodes are updated | ||
2672 | * when committing the transaction, so they needn't delaye to | ||
2673 | * be updated, or deadlock will occured. | ||
2674 | */ | ||
2675 | if (!is_free_space_inode(root, inode)) { | ||
2676 | ret = btrfs_delayed_update_inode(trans, root, inode); | ||
2677 | if (!ret) | ||
2678 | btrfs_set_inode_last_trans(trans, inode); | ||
2679 | return ret; | ||
2680 | } | ||
2681 | |||
2650 | path = btrfs_alloc_path(); | 2682 | path = btrfs_alloc_path(); |
2651 | BUG_ON(!path); | 2683 | if (!path) |
2684 | return -ENOMEM; | ||
2685 | |||
2652 | path->leave_spinning = 1; | 2686 | path->leave_spinning = 1; |
2653 | ret = btrfs_lookup_inode(trans, root, path, | 2687 | ret = btrfs_lookup_inode(trans, root, path, &BTRFS_I(inode)->location, |
2654 | &BTRFS_I(inode)->location, 1); | 2688 | 1); |
2655 | if (ret) { | 2689 | if (ret) { |
2656 | if (ret > 0) | 2690 | if (ret > 0) |
2657 | ret = -ENOENT; | 2691 | ret = -ENOENT; |
@@ -2661,7 +2695,7 @@ noinline int btrfs_update_inode(struct btrfs_trans_handle *trans, | |||
2661 | btrfs_unlock_up_safe(path, 1); | 2695 | btrfs_unlock_up_safe(path, 1); |
2662 | leaf = path->nodes[0]; | 2696 | leaf = path->nodes[0]; |
2663 | inode_item = btrfs_item_ptr(leaf, path->slots[0], | 2697 | inode_item = btrfs_item_ptr(leaf, path->slots[0], |
2664 | struct btrfs_inode_item); | 2698 | struct btrfs_inode_item); |
2665 | 2699 | ||
2666 | fill_inode_item(trans, leaf, inode_item, inode); | 2700 | fill_inode_item(trans, leaf, inode_item, inode); |
2667 | btrfs_mark_buffer_dirty(leaf); | 2701 | btrfs_mark_buffer_dirty(leaf); |
@@ -2672,7 +2706,6 @@ failed: | |||
2672 | return ret; | 2706 | return ret; |
2673 | } | 2707 | } |
2674 | 2708 | ||
2675 | |||
2676 | /* | 2709 | /* |
2677 | * unlink helper that gets used here in inode.c and in the tree logging | 2710 | * unlink helper that gets used here in inode.c and in the tree logging |
2678 | * recovery code. It remove a link in a directory with a given name, and | 2711 | * recovery code. It remove a link in a directory with a given name, and |
@@ -2689,6 +2722,8 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, | |||
2689 | struct btrfs_dir_item *di; | 2722 | struct btrfs_dir_item *di; |
2690 | struct btrfs_key key; | 2723 | struct btrfs_key key; |
2691 | u64 index; | 2724 | u64 index; |
2725 | u64 ino = btrfs_ino(inode); | ||
2726 | u64 dir_ino = btrfs_ino(dir); | ||
2692 | 2727 | ||
2693 | path = btrfs_alloc_path(); | 2728 | path = btrfs_alloc_path(); |
2694 | if (!path) { | 2729 | if (!path) { |
@@ -2697,7 +2732,7 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, | |||
2697 | } | 2732 | } |
2698 | 2733 | ||
2699 | path->leave_spinning = 1; | 2734 | path->leave_spinning = 1; |
2700 | di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, | 2735 | di = btrfs_lookup_dir_item(trans, root, path, dir_ino, |
2701 | name, name_len, -1); | 2736 | name, name_len, -1); |
2702 | if (IS_ERR(di)) { | 2737 | if (IS_ERR(di)) { |
2703 | ret = PTR_ERR(di); | 2738 | ret = PTR_ERR(di); |
@@ -2712,33 +2747,23 @@ static int __btrfs_unlink_inode(struct btrfs_trans_handle *trans, | |||
2712 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | 2747 | ret = btrfs_delete_one_dir_name(trans, root, path, di); |
2713 | if (ret) | 2748 | if (ret) |
2714 | goto err; | 2749 | goto err; |
2715 | btrfs_release_path(root, path); | 2750 | btrfs_release_path(path); |
2716 | 2751 | ||
2717 | ret = btrfs_del_inode_ref(trans, root, name, name_len, | 2752 | ret = btrfs_del_inode_ref(trans, root, name, name_len, ino, |
2718 | inode->i_ino, | 2753 | dir_ino, &index); |
2719 | dir->i_ino, &index); | ||
2720 | if (ret) { | 2754 | if (ret) { |
2721 | printk(KERN_INFO "btrfs failed to delete reference to %.*s, " | 2755 | printk(KERN_INFO "btrfs failed to delete reference to %.*s, " |
2722 | "inode %lu parent %lu\n", name_len, name, | 2756 | "inode %llu parent %llu\n", name_len, name, |
2723 | inode->i_ino, dir->i_ino); | 2757 | (unsigned long long)ino, (unsigned long long)dir_ino); |
2724 | goto err; | 2758 | goto err; |
2725 | } | 2759 | } |
2726 | 2760 | ||
2727 | di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, | 2761 | ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); |
2728 | index, name, name_len, -1); | 2762 | if (ret) |
2729 | if (IS_ERR(di)) { | ||
2730 | ret = PTR_ERR(di); | ||
2731 | goto err; | ||
2732 | } | ||
2733 | if (!di) { | ||
2734 | ret = -ENOENT; | ||
2735 | goto err; | 2763 | goto err; |
2736 | } | ||
2737 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | ||
2738 | btrfs_release_path(root, path); | ||
2739 | 2764 | ||
2740 | ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, | 2765 | ret = btrfs_del_inode_ref_in_log(trans, root, name, name_len, |
2741 | inode, dir->i_ino); | 2766 | inode, dir_ino); |
2742 | BUG_ON(ret != 0 && ret != -ENOENT); | 2767 | BUG_ON(ret != 0 && ret != -ENOENT); |
2743 | 2768 | ||
2744 | ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, | 2769 | ret = btrfs_del_dir_entries_in_log(trans, root, name, name_len, |
@@ -2816,12 +2841,14 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, | |||
2816 | int check_link = 1; | 2841 | int check_link = 1; |
2817 | int err = -ENOSPC; | 2842 | int err = -ENOSPC; |
2818 | int ret; | 2843 | int ret; |
2844 | u64 ino = btrfs_ino(inode); | ||
2845 | u64 dir_ino = btrfs_ino(dir); | ||
2819 | 2846 | ||
2820 | trans = btrfs_start_transaction(root, 10); | 2847 | trans = btrfs_start_transaction(root, 10); |
2821 | if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) | 2848 | if (!IS_ERR(trans) || PTR_ERR(trans) != -ENOSPC) |
2822 | return trans; | 2849 | return trans; |
2823 | 2850 | ||
2824 | if (inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) | 2851 | if (ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) |
2825 | return ERR_PTR(-ENOSPC); | 2852 | return ERR_PTR(-ENOSPC); |
2826 | 2853 | ||
2827 | /* check if there is someone else holds reference */ | 2854 | /* check if there is someone else holds reference */ |
@@ -2862,7 +2889,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, | |||
2862 | } else { | 2889 | } else { |
2863 | check_link = 0; | 2890 | check_link = 0; |
2864 | } | 2891 | } |
2865 | btrfs_release_path(root, path); | 2892 | btrfs_release_path(path); |
2866 | 2893 | ||
2867 | ret = btrfs_lookup_inode(trans, root, path, | 2894 | ret = btrfs_lookup_inode(trans, root, path, |
2868 | &BTRFS_I(inode)->location, 0); | 2895 | &BTRFS_I(inode)->location, 0); |
@@ -2876,11 +2903,11 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, | |||
2876 | } else { | 2903 | } else { |
2877 | check_link = 0; | 2904 | check_link = 0; |
2878 | } | 2905 | } |
2879 | btrfs_release_path(root, path); | 2906 | btrfs_release_path(path); |
2880 | 2907 | ||
2881 | if (ret == 0 && S_ISREG(inode->i_mode)) { | 2908 | if (ret == 0 && S_ISREG(inode->i_mode)) { |
2882 | ret = btrfs_lookup_file_extent(trans, root, path, | 2909 | ret = btrfs_lookup_file_extent(trans, root, path, |
2883 | inode->i_ino, (u64)-1, 0); | 2910 | ino, (u64)-1, 0); |
2884 | if (ret < 0) { | 2911 | if (ret < 0) { |
2885 | err = ret; | 2912 | err = ret; |
2886 | goto out; | 2913 | goto out; |
@@ -2888,7 +2915,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, | |||
2888 | BUG_ON(ret == 0); | 2915 | BUG_ON(ret == 0); |
2889 | if (check_path_shared(root, path)) | 2916 | if (check_path_shared(root, path)) |
2890 | goto out; | 2917 | goto out; |
2891 | btrfs_release_path(root, path); | 2918 | btrfs_release_path(path); |
2892 | } | 2919 | } |
2893 | 2920 | ||
2894 | if (!check_link) { | 2921 | if (!check_link) { |
@@ -2896,7 +2923,7 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, | |||
2896 | goto out; | 2923 | goto out; |
2897 | } | 2924 | } |
2898 | 2925 | ||
2899 | di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, | 2926 | di = btrfs_lookup_dir_item(trans, root, path, dir_ino, |
2900 | dentry->d_name.name, dentry->d_name.len, 0); | 2927 | dentry->d_name.name, dentry->d_name.len, 0); |
2901 | if (IS_ERR(di)) { | 2928 | if (IS_ERR(di)) { |
2902 | err = PTR_ERR(di); | 2929 | err = PTR_ERR(di); |
@@ -2909,11 +2936,11 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, | |||
2909 | err = 0; | 2936 | err = 0; |
2910 | goto out; | 2937 | goto out; |
2911 | } | 2938 | } |
2912 | btrfs_release_path(root, path); | 2939 | btrfs_release_path(path); |
2913 | 2940 | ||
2914 | ref = btrfs_lookup_inode_ref(trans, root, path, | 2941 | ref = btrfs_lookup_inode_ref(trans, root, path, |
2915 | dentry->d_name.name, dentry->d_name.len, | 2942 | dentry->d_name.name, dentry->d_name.len, |
2916 | inode->i_ino, dir->i_ino, 0); | 2943 | ino, dir_ino, 0); |
2917 | if (IS_ERR(ref)) { | 2944 | if (IS_ERR(ref)) { |
2918 | err = PTR_ERR(ref); | 2945 | err = PTR_ERR(ref); |
2919 | goto out; | 2946 | goto out; |
@@ -2922,9 +2949,17 @@ static struct btrfs_trans_handle *__unlink_start_trans(struct inode *dir, | |||
2922 | if (check_path_shared(root, path)) | 2949 | if (check_path_shared(root, path)) |
2923 | goto out; | 2950 | goto out; |
2924 | index = btrfs_inode_ref_index(path->nodes[0], ref); | 2951 | index = btrfs_inode_ref_index(path->nodes[0], ref); |
2925 | btrfs_release_path(root, path); | 2952 | btrfs_release_path(path); |
2926 | 2953 | ||
2927 | di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, index, | 2954 | /* |
2955 | * This is a commit root search, if we can lookup inode item and other | ||
2956 | * relative items in the commit root, it means the transaction of | ||
2957 | * dir/file creation has been committed, and the dir index item that we | ||
2958 | * delay to insert has also been inserted into the commit root. So | ||
2959 | * we needn't worry about the delayed insertion of the dir index item | ||
2960 | * here. | ||
2961 | */ | ||
2962 | di = btrfs_lookup_dir_index_item(trans, root, path, dir_ino, index, | ||
2928 | dentry->d_name.name, dentry->d_name.len, 0); | 2963 | dentry->d_name.name, dentry->d_name.len, 0); |
2929 | if (IS_ERR(di)) { | 2964 | if (IS_ERR(di)) { |
2930 | err = PTR_ERR(di); | 2965 | err = PTR_ERR(di); |
@@ -2999,54 +3034,47 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, | |||
2999 | struct btrfs_key key; | 3034 | struct btrfs_key key; |
3000 | u64 index; | 3035 | u64 index; |
3001 | int ret; | 3036 | int ret; |
3037 | u64 dir_ino = btrfs_ino(dir); | ||
3002 | 3038 | ||
3003 | path = btrfs_alloc_path(); | 3039 | path = btrfs_alloc_path(); |
3004 | if (!path) | 3040 | if (!path) |
3005 | return -ENOMEM; | 3041 | return -ENOMEM; |
3006 | 3042 | ||
3007 | di = btrfs_lookup_dir_item(trans, root, path, dir->i_ino, | 3043 | di = btrfs_lookup_dir_item(trans, root, path, dir_ino, |
3008 | name, name_len, -1); | 3044 | name, name_len, -1); |
3009 | BUG_ON(!di || IS_ERR(di)); | 3045 | BUG_ON(IS_ERR_OR_NULL(di)); |
3010 | 3046 | ||
3011 | leaf = path->nodes[0]; | 3047 | leaf = path->nodes[0]; |
3012 | btrfs_dir_item_key_to_cpu(leaf, di, &key); | 3048 | btrfs_dir_item_key_to_cpu(leaf, di, &key); |
3013 | WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); | 3049 | WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); |
3014 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | 3050 | ret = btrfs_delete_one_dir_name(trans, root, path, di); |
3015 | BUG_ON(ret); | 3051 | BUG_ON(ret); |
3016 | btrfs_release_path(root, path); | 3052 | btrfs_release_path(path); |
3017 | 3053 | ||
3018 | ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, | 3054 | ret = btrfs_del_root_ref(trans, root->fs_info->tree_root, |
3019 | objectid, root->root_key.objectid, | 3055 | objectid, root->root_key.objectid, |
3020 | dir->i_ino, &index, name, name_len); | 3056 | dir_ino, &index, name, name_len); |
3021 | if (ret < 0) { | 3057 | if (ret < 0) { |
3022 | BUG_ON(ret != -ENOENT); | 3058 | BUG_ON(ret != -ENOENT); |
3023 | di = btrfs_search_dir_index_item(root, path, dir->i_ino, | 3059 | di = btrfs_search_dir_index_item(root, path, dir_ino, |
3024 | name, name_len); | 3060 | name, name_len); |
3025 | BUG_ON(!di || IS_ERR(di)); | 3061 | BUG_ON(IS_ERR_OR_NULL(di)); |
3026 | 3062 | ||
3027 | leaf = path->nodes[0]; | 3063 | leaf = path->nodes[0]; |
3028 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | 3064 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); |
3029 | btrfs_release_path(root, path); | 3065 | btrfs_release_path(path); |
3030 | index = key.offset; | 3066 | index = key.offset; |
3031 | } | 3067 | } |
3068 | btrfs_release_path(path); | ||
3032 | 3069 | ||
3033 | di = btrfs_lookup_dir_index_item(trans, root, path, dir->i_ino, | 3070 | ret = btrfs_delete_delayed_dir_index(trans, root, dir, index); |
3034 | index, name, name_len, -1); | ||
3035 | BUG_ON(!di || IS_ERR(di)); | ||
3036 | |||
3037 | leaf = path->nodes[0]; | ||
3038 | btrfs_dir_item_key_to_cpu(leaf, di, &key); | ||
3039 | WARN_ON(key.type != BTRFS_ROOT_ITEM_KEY || key.objectid != objectid); | ||
3040 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | ||
3041 | BUG_ON(ret); | 3071 | BUG_ON(ret); |
3042 | btrfs_release_path(root, path); | ||
3043 | 3072 | ||
3044 | btrfs_i_size_write(dir, dir->i_size - name_len * 2); | 3073 | btrfs_i_size_write(dir, dir->i_size - name_len * 2); |
3045 | dir->i_mtime = dir->i_ctime = CURRENT_TIME; | 3074 | dir->i_mtime = dir->i_ctime = CURRENT_TIME; |
3046 | ret = btrfs_update_inode(trans, root, dir); | 3075 | ret = btrfs_update_inode(trans, root, dir); |
3047 | BUG_ON(ret); | 3076 | BUG_ON(ret); |
3048 | 3077 | ||
3049 | btrfs_free_path(path); | ||
3050 | return 0; | 3078 | return 0; |
3051 | } | 3079 | } |
3052 | 3080 | ||
@@ -3059,7 +3087,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
3059 | unsigned long nr = 0; | 3087 | unsigned long nr = 0; |
3060 | 3088 | ||
3061 | if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || | 3089 | if (inode->i_size > BTRFS_EMPTY_DIR_SIZE || |
3062 | inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) | 3090 | btrfs_ino(inode) == BTRFS_FIRST_FREE_OBJECTID) |
3063 | return -ENOTEMPTY; | 3091 | return -ENOTEMPTY; |
3064 | 3092 | ||
3065 | trans = __unlink_start_trans(dir, dentry); | 3093 | trans = __unlink_start_trans(dir, dentry); |
@@ -3068,7 +3096,7 @@ static int btrfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
3068 | 3096 | ||
3069 | btrfs_set_trans_block_group(trans, dir); | 3097 | btrfs_set_trans_block_group(trans, dir); |
3070 | 3098 | ||
3071 | if (unlikely(inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { | 3099 | if (unlikely(btrfs_ino(inode) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { |
3072 | err = btrfs_unlink_subvol(trans, root, dir, | 3100 | err = btrfs_unlink_subvol(trans, root, dir, |
3073 | BTRFS_I(inode)->location.objectid, | 3101 | BTRFS_I(inode)->location.objectid, |
3074 | dentry->d_name.name, | 3102 | dentry->d_name.name, |
@@ -3093,178 +3121,6 @@ out: | |||
3093 | return err; | 3121 | return err; |
3094 | } | 3122 | } |
3095 | 3123 | ||
3096 | #if 0 | ||
3097 | /* | ||
3098 | * when truncating bytes in a file, it is possible to avoid reading | ||
3099 | * the leaves that contain only checksum items. This can be the | ||
3100 | * majority of the IO required to delete a large file, but it must | ||
3101 | * be done carefully. | ||
3102 | * | ||
3103 | * The keys in the level just above the leaves are checked to make sure | ||
3104 | * the lowest key in a given leaf is a csum key, and starts at an offset | ||
3105 | * after the new size. | ||
3106 | * | ||
3107 | * Then the key for the next leaf is checked to make sure it also has | ||
3108 | * a checksum item for the same file. If it does, we know our target leaf | ||
3109 | * contains only checksum items, and it can be safely freed without reading | ||
3110 | * it. | ||
3111 | * | ||
3112 | * This is just an optimization targeted at large files. It may do | ||
3113 | * nothing. It will return 0 unless things went badly. | ||
3114 | */ | ||
3115 | static noinline int drop_csum_leaves(struct btrfs_trans_handle *trans, | ||
3116 | struct btrfs_root *root, | ||
3117 | struct btrfs_path *path, | ||
3118 | struct inode *inode, u64 new_size) | ||
3119 | { | ||
3120 | struct btrfs_key key; | ||
3121 | int ret; | ||
3122 | int nritems; | ||
3123 | struct btrfs_key found_key; | ||
3124 | struct btrfs_key other_key; | ||
3125 | struct btrfs_leaf_ref *ref; | ||
3126 | u64 leaf_gen; | ||
3127 | u64 leaf_start; | ||
3128 | |||
3129 | path->lowest_level = 1; | ||
3130 | key.objectid = inode->i_ino; | ||
3131 | key.type = BTRFS_CSUM_ITEM_KEY; | ||
3132 | key.offset = new_size; | ||
3133 | again: | ||
3134 | ret = btrfs_search_slot(trans, root, &key, path, -1, 1); | ||
3135 | if (ret < 0) | ||
3136 | goto out; | ||
3137 | |||
3138 | if (path->nodes[1] == NULL) { | ||
3139 | ret = 0; | ||
3140 | goto out; | ||
3141 | } | ||
3142 | ret = 0; | ||
3143 | btrfs_node_key_to_cpu(path->nodes[1], &found_key, path->slots[1]); | ||
3144 | nritems = btrfs_header_nritems(path->nodes[1]); | ||
3145 | |||
3146 | if (!nritems) | ||
3147 | goto out; | ||
3148 | |||
3149 | if (path->slots[1] >= nritems) | ||
3150 | goto next_node; | ||
3151 | |||
3152 | /* did we find a key greater than anything we want to delete? */ | ||
3153 | if (found_key.objectid > inode->i_ino || | ||
3154 | (found_key.objectid == inode->i_ino && found_key.type > key.type)) | ||
3155 | goto out; | ||
3156 | |||
3157 | /* we check the next key in the node to make sure the leave contains | ||
3158 | * only checksum items. This comparison doesn't work if our | ||
3159 | * leaf is the last one in the node | ||
3160 | */ | ||
3161 | if (path->slots[1] + 1 >= nritems) { | ||
3162 | next_node: | ||
3163 | /* search forward from the last key in the node, this | ||
3164 | * will bring us into the next node in the tree | ||
3165 | */ | ||
3166 | btrfs_node_key_to_cpu(path->nodes[1], &found_key, nritems - 1); | ||
3167 | |||
3168 | /* unlikely, but we inc below, so check to be safe */ | ||
3169 | if (found_key.offset == (u64)-1) | ||
3170 | goto out; | ||
3171 | |||
3172 | /* search_forward needs a path with locks held, do the | ||
3173 | * search again for the original key. It is possible | ||
3174 | * this will race with a balance and return a path that | ||
3175 | * we could modify, but this drop is just an optimization | ||
3176 | * and is allowed to miss some leaves. | ||
3177 | */ | ||
3178 | btrfs_release_path(root, path); | ||
3179 | found_key.offset++; | ||
3180 | |||
3181 | /* setup a max key for search_forward */ | ||
3182 | other_key.offset = (u64)-1; | ||
3183 | other_key.type = key.type; | ||
3184 | other_key.objectid = key.objectid; | ||
3185 | |||
3186 | path->keep_locks = 1; | ||
3187 | ret = btrfs_search_forward(root, &found_key, &other_key, | ||
3188 | path, 0, 0); | ||
3189 | path->keep_locks = 0; | ||
3190 | if (ret || found_key.objectid != key.objectid || | ||
3191 | found_key.type != key.type) { | ||
3192 | ret = 0; | ||
3193 | goto out; | ||
3194 | } | ||
3195 | |||
3196 | key.offset = found_key.offset; | ||
3197 | btrfs_release_path(root, path); | ||
3198 | cond_resched(); | ||
3199 | goto again; | ||
3200 | } | ||
3201 | |||
3202 | /* we know there's one more slot after us in the tree, | ||
3203 | * read that key so we can verify it is also a checksum item | ||
3204 | */ | ||
3205 | btrfs_node_key_to_cpu(path->nodes[1], &other_key, path->slots[1] + 1); | ||
3206 | |||
3207 | if (found_key.objectid < inode->i_ino) | ||
3208 | goto next_key; | ||
3209 | |||
3210 | if (found_key.type != key.type || found_key.offset < new_size) | ||
3211 | goto next_key; | ||
3212 | |||
3213 | /* | ||
3214 | * if the key for the next leaf isn't a csum key from this objectid, | ||
3215 | * we can't be sure there aren't good items inside this leaf. | ||
3216 | * Bail out | ||
3217 | */ | ||
3218 | if (other_key.objectid != inode->i_ino || other_key.type != key.type) | ||
3219 | goto out; | ||
3220 | |||
3221 | leaf_start = btrfs_node_blockptr(path->nodes[1], path->slots[1]); | ||
3222 | leaf_gen = btrfs_node_ptr_generation(path->nodes[1], path->slots[1]); | ||
3223 | /* | ||
3224 | * it is safe to delete this leaf, it contains only | ||
3225 | * csum items from this inode at an offset >= new_size | ||
3226 | */ | ||
3227 | ret = btrfs_del_leaf(trans, root, path, leaf_start); | ||
3228 | BUG_ON(ret); | ||
3229 | |||
3230 | if (root->ref_cows && leaf_gen < trans->transid) { | ||
3231 | ref = btrfs_alloc_leaf_ref(root, 0); | ||
3232 | if (ref) { | ||
3233 | ref->root_gen = root->root_key.offset; | ||
3234 | ref->bytenr = leaf_start; | ||
3235 | ref->owner = 0; | ||
3236 | ref->generation = leaf_gen; | ||
3237 | ref->nritems = 0; | ||
3238 | |||
3239 | btrfs_sort_leaf_ref(ref); | ||
3240 | |||
3241 | ret = btrfs_add_leaf_ref(root, ref, 0); | ||
3242 | WARN_ON(ret); | ||
3243 | btrfs_free_leaf_ref(root, ref); | ||
3244 | } else { | ||
3245 | WARN_ON(1); | ||
3246 | } | ||
3247 | } | ||
3248 | next_key: | ||
3249 | btrfs_release_path(root, path); | ||
3250 | |||
3251 | if (other_key.objectid == inode->i_ino && | ||
3252 | other_key.type == key.type && other_key.offset > key.offset) { | ||
3253 | key.offset = other_key.offset; | ||
3254 | cond_resched(); | ||
3255 | goto again; | ||
3256 | } | ||
3257 | ret = 0; | ||
3258 | out: | ||
3259 | /* fixup any changes we've made to the path */ | ||
3260 | path->lowest_level = 0; | ||
3261 | path->keep_locks = 0; | ||
3262 | btrfs_release_path(root, path); | ||
3263 | return ret; | ||
3264 | } | ||
3265 | |||
3266 | #endif | ||
3267 | |||
3268 | /* | 3124 | /* |
3269 | * this can truncate away extent items, csum items and directory items. | 3125 | * this can truncate away extent items, csum items and directory items. |
3270 | * It starts at a high offset and removes keys until it can't find | 3126 | * It starts at a high offset and removes keys until it can't find |
@@ -3300,17 +3156,27 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, | |||
3300 | int encoding; | 3156 | int encoding; |
3301 | int ret; | 3157 | int ret; |
3302 | int err = 0; | 3158 | int err = 0; |
3159 | u64 ino = btrfs_ino(inode); | ||
3303 | 3160 | ||
3304 | BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); | 3161 | BUG_ON(new_size > 0 && min_type != BTRFS_EXTENT_DATA_KEY); |
3305 | 3162 | ||
3306 | if (root->ref_cows || root == root->fs_info->tree_root) | 3163 | if (root->ref_cows || root == root->fs_info->tree_root) |
3307 | btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); | 3164 | btrfs_drop_extent_cache(inode, new_size & (~mask), (u64)-1, 0); |
3308 | 3165 | ||
3166 | /* | ||
3167 | * This function is also used to drop the items in the log tree before | ||
3168 | * we relog the inode, so if root != BTRFS_I(inode)->root, it means | ||
3169 | * it is used to drop the loged items. So we shouldn't kill the delayed | ||
3170 | * items. | ||
3171 | */ | ||
3172 | if (min_type == 0 && root == BTRFS_I(inode)->root) | ||
3173 | btrfs_kill_delayed_inode_items(inode); | ||
3174 | |||
3309 | path = btrfs_alloc_path(); | 3175 | path = btrfs_alloc_path(); |
3310 | BUG_ON(!path); | 3176 | BUG_ON(!path); |
3311 | path->reada = -1; | 3177 | path->reada = -1; |
3312 | 3178 | ||
3313 | key.objectid = inode->i_ino; | 3179 | key.objectid = ino; |
3314 | key.offset = (u64)-1; | 3180 | key.offset = (u64)-1; |
3315 | key.type = (u8)-1; | 3181 | key.type = (u8)-1; |
3316 | 3182 | ||
@@ -3338,7 +3204,7 @@ search_again: | |||
3338 | found_type = btrfs_key_type(&found_key); | 3204 | found_type = btrfs_key_type(&found_key); |
3339 | encoding = 0; | 3205 | encoding = 0; |
3340 | 3206 | ||
3341 | if (found_key.objectid != inode->i_ino) | 3207 | if (found_key.objectid != ino) |
3342 | break; | 3208 | break; |
3343 | 3209 | ||
3344 | if (found_type < min_type) | 3210 | if (found_type < min_type) |
@@ -3428,7 +3294,6 @@ search_again: | |||
3428 | btrfs_file_extent_calc_inline_size(size); | 3294 | btrfs_file_extent_calc_inline_size(size); |
3429 | ret = btrfs_truncate_item(trans, root, path, | 3295 | ret = btrfs_truncate_item(trans, root, path, |
3430 | size, 1); | 3296 | size, 1); |
3431 | BUG_ON(ret); | ||
3432 | } else if (root->ref_cows) { | 3297 | } else if (root->ref_cows) { |
3433 | inode_sub_bytes(inode, item_end + 1 - | 3298 | inode_sub_bytes(inode, item_end + 1 - |
3434 | found_key.offset); | 3299 | found_key.offset); |
@@ -3457,7 +3322,7 @@ delete: | |||
3457 | ret = btrfs_free_extent(trans, root, extent_start, | 3322 | ret = btrfs_free_extent(trans, root, extent_start, |
3458 | extent_num_bytes, 0, | 3323 | extent_num_bytes, 0, |
3459 | btrfs_header_owner(leaf), | 3324 | btrfs_header_owner(leaf), |
3460 | inode->i_ino, extent_offset); | 3325 | ino, extent_offset); |
3461 | BUG_ON(ret); | 3326 | BUG_ON(ret); |
3462 | } | 3327 | } |
3463 | 3328 | ||
@@ -3466,7 +3331,9 @@ delete: | |||
3466 | 3331 | ||
3467 | if (path->slots[0] == 0 || | 3332 | if (path->slots[0] == 0 || |
3468 | path->slots[0] != pending_del_slot) { | 3333 | path->slots[0] != pending_del_slot) { |
3469 | if (root->ref_cows) { | 3334 | if (root->ref_cows && |
3335 | BTRFS_I(inode)->location.objectid != | ||
3336 | BTRFS_FREE_INO_OBJECTID) { | ||
3470 | err = -EAGAIN; | 3337 | err = -EAGAIN; |
3471 | goto out; | 3338 | goto out; |
3472 | } | 3339 | } |
@@ -3477,7 +3344,7 @@ delete: | |||
3477 | BUG_ON(ret); | 3344 | BUG_ON(ret); |
3478 | pending_del_nr = 0; | 3345 | pending_del_nr = 0; |
3479 | } | 3346 | } |
3480 | btrfs_release_path(root, path); | 3347 | btrfs_release_path(path); |
3481 | goto search_again; | 3348 | goto search_again; |
3482 | } else { | 3349 | } else { |
3483 | path->slots[0]--; | 3350 | path->slots[0]--; |
@@ -3635,7 +3502,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) | |||
3635 | while (1) { | 3502 | while (1) { |
3636 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, | 3503 | em = btrfs_get_extent(inode, NULL, 0, cur_offset, |
3637 | block_end - cur_offset, 0); | 3504 | block_end - cur_offset, 0); |
3638 | BUG_ON(IS_ERR(em) || !em); | 3505 | BUG_ON(IS_ERR_OR_NULL(em)); |
3639 | last_byte = min(extent_map_end(em), block_end); | 3506 | last_byte = min(extent_map_end(em), block_end); |
3640 | last_byte = (last_byte + mask) & ~mask; | 3507 | last_byte = (last_byte + mask) & ~mask; |
3641 | if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { | 3508 | if (!test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) { |
@@ -3656,7 +3523,7 @@ int btrfs_cont_expand(struct inode *inode, loff_t oldsize, loff_t size) | |||
3656 | break; | 3523 | break; |
3657 | 3524 | ||
3658 | err = btrfs_insert_file_extent(trans, root, | 3525 | err = btrfs_insert_file_extent(trans, root, |
3659 | inode->i_ino, cur_offset, 0, | 3526 | btrfs_ino(inode), cur_offset, 0, |
3660 | 0, hole_size, 0, hole_size, | 3527 | 0, hole_size, 0, hole_size, |
3661 | 0, 0, 0); | 3528 | 0, 0, 0); |
3662 | if (err) | 3529 | if (err) |
@@ -3758,7 +3625,7 @@ void btrfs_evict_inode(struct inode *inode) | |||
3758 | 3625 | ||
3759 | truncate_inode_pages(&inode->i_data, 0); | 3626 | truncate_inode_pages(&inode->i_data, 0); |
3760 | if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || | 3627 | if (inode->i_nlink && (btrfs_root_refs(&root->root_item) != 0 || |
3761 | root == root->fs_info->tree_root)) | 3628 | is_free_space_inode(root, inode))) |
3762 | goto no_delete; | 3629 | goto no_delete; |
3763 | 3630 | ||
3764 | if (is_bad_inode(inode)) { | 3631 | if (is_bad_inode(inode)) { |
@@ -3811,6 +3678,10 @@ void btrfs_evict_inode(struct inode *inode) | |||
3811 | BUG_ON(ret); | 3678 | BUG_ON(ret); |
3812 | } | 3679 | } |
3813 | 3680 | ||
3681 | if (!(root == root->fs_info->tree_root || | ||
3682 | root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)) | ||
3683 | btrfs_return_ino(root, btrfs_ino(inode)); | ||
3684 | |||
3814 | nr = trans->blocks_used; | 3685 | nr = trans->blocks_used; |
3815 | btrfs_end_transaction(trans, root); | 3686 | btrfs_end_transaction(trans, root); |
3816 | btrfs_btree_balance_dirty(root, nr); | 3687 | btrfs_btree_balance_dirty(root, nr); |
@@ -3836,12 +3707,12 @@ static int btrfs_inode_by_name(struct inode *dir, struct dentry *dentry, | |||
3836 | path = btrfs_alloc_path(); | 3707 | path = btrfs_alloc_path(); |
3837 | BUG_ON(!path); | 3708 | BUG_ON(!path); |
3838 | 3709 | ||
3839 | di = btrfs_lookup_dir_item(NULL, root, path, dir->i_ino, name, | 3710 | di = btrfs_lookup_dir_item(NULL, root, path, btrfs_ino(dir), name, |
3840 | namelen, 0); | 3711 | namelen, 0); |
3841 | if (IS_ERR(di)) | 3712 | if (IS_ERR(di)) |
3842 | ret = PTR_ERR(di); | 3713 | ret = PTR_ERR(di); |
3843 | 3714 | ||
3844 | if (!di || IS_ERR(di)) | 3715 | if (IS_ERR_OR_NULL(di)) |
3845 | goto out_err; | 3716 | goto out_err; |
3846 | 3717 | ||
3847 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); | 3718 | btrfs_dir_item_key_to_cpu(path->nodes[0], di, location); |
@@ -3889,7 +3760,7 @@ static int fixup_tree_root_location(struct btrfs_root *root, | |||
3889 | 3760 | ||
3890 | leaf = path->nodes[0]; | 3761 | leaf = path->nodes[0]; |
3891 | ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); | 3762 | ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_root_ref); |
3892 | if (btrfs_root_ref_dirid(leaf, ref) != dir->i_ino || | 3763 | if (btrfs_root_ref_dirid(leaf, ref) != btrfs_ino(dir) || |
3893 | btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) | 3764 | btrfs_root_ref_name_len(leaf, ref) != dentry->d_name.len) |
3894 | goto out; | 3765 | goto out; |
3895 | 3766 | ||
@@ -3899,7 +3770,7 @@ static int fixup_tree_root_location(struct btrfs_root *root, | |||
3899 | if (ret) | 3770 | if (ret) |
3900 | goto out; | 3771 | goto out; |
3901 | 3772 | ||
3902 | btrfs_release_path(root->fs_info->tree_root, path); | 3773 | btrfs_release_path(path); |
3903 | 3774 | ||
3904 | new_root = btrfs_read_fs_root_no_name(root->fs_info, location); | 3775 | new_root = btrfs_read_fs_root_no_name(root->fs_info, location); |
3905 | if (IS_ERR(new_root)) { | 3776 | if (IS_ERR(new_root)) { |
@@ -3928,6 +3799,7 @@ static void inode_tree_add(struct inode *inode) | |||
3928 | struct btrfs_inode *entry; | 3799 | struct btrfs_inode *entry; |
3929 | struct rb_node **p; | 3800 | struct rb_node **p; |
3930 | struct rb_node *parent; | 3801 | struct rb_node *parent; |
3802 | u64 ino = btrfs_ino(inode); | ||
3931 | again: | 3803 | again: |
3932 | p = &root->inode_tree.rb_node; | 3804 | p = &root->inode_tree.rb_node; |
3933 | parent = NULL; | 3805 | parent = NULL; |
@@ -3940,9 +3812,9 @@ again: | |||
3940 | parent = *p; | 3812 | parent = *p; |
3941 | entry = rb_entry(parent, struct btrfs_inode, rb_node); | 3813 | entry = rb_entry(parent, struct btrfs_inode, rb_node); |
3942 | 3814 | ||
3943 | if (inode->i_ino < entry->vfs_inode.i_ino) | 3815 | if (ino < btrfs_ino(&entry->vfs_inode)) |
3944 | p = &parent->rb_left; | 3816 | p = &parent->rb_left; |
3945 | else if (inode->i_ino > entry->vfs_inode.i_ino) | 3817 | else if (ino > btrfs_ino(&entry->vfs_inode)) |
3946 | p = &parent->rb_right; | 3818 | p = &parent->rb_right; |
3947 | else { | 3819 | else { |
3948 | WARN_ON(!(entry->vfs_inode.i_state & | 3820 | WARN_ON(!(entry->vfs_inode.i_state & |
@@ -4006,9 +3878,9 @@ again: | |||
4006 | prev = node; | 3878 | prev = node; |
4007 | entry = rb_entry(node, struct btrfs_inode, rb_node); | 3879 | entry = rb_entry(node, struct btrfs_inode, rb_node); |
4008 | 3880 | ||
4009 | if (objectid < entry->vfs_inode.i_ino) | 3881 | if (objectid < btrfs_ino(&entry->vfs_inode)) |
4010 | node = node->rb_left; | 3882 | node = node->rb_left; |
4011 | else if (objectid > entry->vfs_inode.i_ino) | 3883 | else if (objectid > btrfs_ino(&entry->vfs_inode)) |
4012 | node = node->rb_right; | 3884 | node = node->rb_right; |
4013 | else | 3885 | else |
4014 | break; | 3886 | break; |
@@ -4016,7 +3888,7 @@ again: | |||
4016 | if (!node) { | 3888 | if (!node) { |
4017 | while (prev) { | 3889 | while (prev) { |
4018 | entry = rb_entry(prev, struct btrfs_inode, rb_node); | 3890 | entry = rb_entry(prev, struct btrfs_inode, rb_node); |
4019 | if (objectid <= entry->vfs_inode.i_ino) { | 3891 | if (objectid <= btrfs_ino(&entry->vfs_inode)) { |
4020 | node = prev; | 3892 | node = prev; |
4021 | break; | 3893 | break; |
4022 | } | 3894 | } |
@@ -4025,7 +3897,7 @@ again: | |||
4025 | } | 3897 | } |
4026 | while (node) { | 3898 | while (node) { |
4027 | entry = rb_entry(node, struct btrfs_inode, rb_node); | 3899 | entry = rb_entry(node, struct btrfs_inode, rb_node); |
4028 | objectid = entry->vfs_inode.i_ino + 1; | 3900 | objectid = btrfs_ino(&entry->vfs_inode) + 1; |
4029 | inode = igrab(&entry->vfs_inode); | 3901 | inode = igrab(&entry->vfs_inode); |
4030 | if (inode) { | 3902 | if (inode) { |
4031 | spin_unlock(&root->inode_lock); | 3903 | spin_unlock(&root->inode_lock); |
@@ -4063,7 +3935,7 @@ static int btrfs_init_locked_inode(struct inode *inode, void *p) | |||
4063 | static int btrfs_find_actor(struct inode *inode, void *opaque) | 3935 | static int btrfs_find_actor(struct inode *inode, void *opaque) |
4064 | { | 3936 | { |
4065 | struct btrfs_iget_args *args = opaque; | 3937 | struct btrfs_iget_args *args = opaque; |
4066 | return args->ino == inode->i_ino && | 3938 | return args->ino == btrfs_ino(inode) && |
4067 | args->root == BTRFS_I(inode)->root; | 3939 | args->root == BTRFS_I(inode)->root; |
4068 | } | 3940 | } |
4069 | 3941 | ||
@@ -4208,7 +4080,7 @@ static struct dentry *btrfs_lookup(struct inode *dir, struct dentry *dentry, | |||
4208 | return d_splice_alias(inode, dentry); | 4080 | return d_splice_alias(inode, dentry); |
4209 | } | 4081 | } |
4210 | 4082 | ||
4211 | static unsigned char btrfs_filetype_table[] = { | 4083 | unsigned char btrfs_filetype_table[] = { |
4212 | DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK | 4084 | DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK |
4213 | }; | 4085 | }; |
4214 | 4086 | ||
@@ -4222,6 +4094,8 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, | |||
4222 | struct btrfs_key key; | 4094 | struct btrfs_key key; |
4223 | struct btrfs_key found_key; | 4095 | struct btrfs_key found_key; |
4224 | struct btrfs_path *path; | 4096 | struct btrfs_path *path; |
4097 | struct list_head ins_list; | ||
4098 | struct list_head del_list; | ||
4225 | int ret; | 4099 | int ret; |
4226 | struct extent_buffer *leaf; | 4100 | struct extent_buffer *leaf; |
4227 | int slot; | 4101 | int slot; |
@@ -4234,6 +4108,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, | |||
4234 | char tmp_name[32]; | 4108 | char tmp_name[32]; |
4235 | char *name_ptr; | 4109 | char *name_ptr; |
4236 | int name_len; | 4110 | int name_len; |
4111 | int is_curr = 0; /* filp->f_pos points to the current index? */ | ||
4237 | 4112 | ||
4238 | /* FIXME, use a real flag for deciding about the key type */ | 4113 | /* FIXME, use a real flag for deciding about the key type */ |
4239 | if (root->fs_info->tree_root == root) | 4114 | if (root->fs_info->tree_root == root) |
@@ -4241,9 +4116,7 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, | |||
4241 | 4116 | ||
4242 | /* special case for "." */ | 4117 | /* special case for "." */ |
4243 | if (filp->f_pos == 0) { | 4118 | if (filp->f_pos == 0) { |
4244 | over = filldir(dirent, ".", 1, | 4119 | over = filldir(dirent, ".", 1, 1, btrfs_ino(inode), DT_DIR); |
4245 | 1, inode->i_ino, | ||
4246 | DT_DIR); | ||
4247 | if (over) | 4120 | if (over) |
4248 | return 0; | 4121 | return 0; |
4249 | filp->f_pos = 1; | 4122 | filp->f_pos = 1; |
@@ -4258,11 +4131,19 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, | |||
4258 | filp->f_pos = 2; | 4131 | filp->f_pos = 2; |
4259 | } | 4132 | } |
4260 | path = btrfs_alloc_path(); | 4133 | path = btrfs_alloc_path(); |
4134 | if (!path) | ||
4135 | return -ENOMEM; | ||
4261 | path->reada = 2; | 4136 | path->reada = 2; |
4262 | 4137 | ||
4138 | if (key_type == BTRFS_DIR_INDEX_KEY) { | ||
4139 | INIT_LIST_HEAD(&ins_list); | ||
4140 | INIT_LIST_HEAD(&del_list); | ||
4141 | btrfs_get_delayed_items(inode, &ins_list, &del_list); | ||
4142 | } | ||
4143 | |||
4263 | btrfs_set_key_type(&key, key_type); | 4144 | btrfs_set_key_type(&key, key_type); |
4264 | key.offset = filp->f_pos; | 4145 | key.offset = filp->f_pos; |
4265 | key.objectid = inode->i_ino; | 4146 | key.objectid = btrfs_ino(inode); |
4266 | 4147 | ||
4267 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | 4148 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); |
4268 | if (ret < 0) | 4149 | if (ret < 0) |
@@ -4289,8 +4170,13 @@ static int btrfs_real_readdir(struct file *filp, void *dirent, | |||
4289 | break; | 4170 | break; |
4290 | if (found_key.offset < filp->f_pos) | 4171 | if (found_key.offset < filp->f_pos) |
4291 | goto next; | 4172 | goto next; |
4173 | if (key_type == BTRFS_DIR_INDEX_KEY && | ||
4174 | btrfs_should_delete_dir_index(&del_list, | ||
4175 | found_key.offset)) | ||
4176 | goto next; | ||
4292 | 4177 | ||
4293 | filp->f_pos = found_key.offset; | 4178 | filp->f_pos = found_key.offset; |
4179 | is_curr = 1; | ||
4294 | 4180 | ||
4295 | di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); | 4181 | di = btrfs_item_ptr(leaf, slot, struct btrfs_dir_item); |
4296 | di_cur = 0; | 4182 | di_cur = 0; |
@@ -4345,6 +4231,15 @@ next: | |||
4345 | path->slots[0]++; | 4231 | path->slots[0]++; |
4346 | } | 4232 | } |
4347 | 4233 | ||
4234 | if (key_type == BTRFS_DIR_INDEX_KEY) { | ||
4235 | if (is_curr) | ||
4236 | filp->f_pos++; | ||
4237 | ret = btrfs_readdir_delayed_dir_index(filp, dirent, filldir, | ||
4238 | &ins_list); | ||
4239 | if (ret) | ||
4240 | goto nopos; | ||
4241 | } | ||
4242 | |||
4348 | /* Reached end of directory/root. Bump pos past the last item. */ | 4243 | /* Reached end of directory/root. Bump pos past the last item. */ |
4349 | if (key_type == BTRFS_DIR_INDEX_KEY) | 4244 | if (key_type == BTRFS_DIR_INDEX_KEY) |
4350 | /* | 4245 | /* |
@@ -4357,6 +4252,8 @@ next: | |||
4357 | nopos: | 4252 | nopos: |
4358 | ret = 0; | 4253 | ret = 0; |
4359 | err: | 4254 | err: |
4255 | if (key_type == BTRFS_DIR_INDEX_KEY) | ||
4256 | btrfs_put_delayed_items(&ins_list, &del_list); | ||
4360 | btrfs_free_path(path); | 4257 | btrfs_free_path(path); |
4361 | return ret; | 4258 | return ret; |
4362 | } | 4259 | } |
@@ -4372,7 +4269,8 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc) | |||
4372 | return 0; | 4269 | return 0; |
4373 | 4270 | ||
4374 | smp_mb(); | 4271 | smp_mb(); |
4375 | nolock = (root->fs_info->closing && root == root->fs_info->tree_root); | 4272 | if (root->fs_info->closing && is_free_space_inode(root, inode)) |
4273 | nolock = true; | ||
4376 | 4274 | ||
4377 | if (wbc->sync_mode == WB_SYNC_ALL) { | 4275 | if (wbc->sync_mode == WB_SYNC_ALL) { |
4378 | if (nolock) | 4276 | if (nolock) |
@@ -4415,25 +4313,25 @@ void btrfs_dirty_inode(struct inode *inode) | |||
4415 | btrfs_end_transaction(trans, root); | 4313 | btrfs_end_transaction(trans, root); |
4416 | trans = btrfs_start_transaction(root, 1); | 4314 | trans = btrfs_start_transaction(root, 1); |
4417 | if (IS_ERR(trans)) { | 4315 | if (IS_ERR(trans)) { |
4418 | if (printk_ratelimit()) { | 4316 | printk_ratelimited(KERN_ERR "btrfs: fail to " |
4419 | printk(KERN_ERR "btrfs: fail to " | 4317 | "dirty inode %llu error %ld\n", |
4420 | "dirty inode %lu error %ld\n", | 4318 | (unsigned long long)btrfs_ino(inode), |
4421 | inode->i_ino, PTR_ERR(trans)); | 4319 | PTR_ERR(trans)); |
4422 | } | ||
4423 | return; | 4320 | return; |
4424 | } | 4321 | } |
4425 | btrfs_set_trans_block_group(trans, inode); | 4322 | btrfs_set_trans_block_group(trans, inode); |
4426 | 4323 | ||
4427 | ret = btrfs_update_inode(trans, root, inode); | 4324 | ret = btrfs_update_inode(trans, root, inode); |
4428 | if (ret) { | 4325 | if (ret) { |
4429 | if (printk_ratelimit()) { | 4326 | printk_ratelimited(KERN_ERR "btrfs: fail to " |
4430 | printk(KERN_ERR "btrfs: fail to " | 4327 | "dirty inode %llu error %d\n", |
4431 | "dirty inode %lu error %d\n", | 4328 | (unsigned long long)btrfs_ino(inode), |
4432 | inode->i_ino, ret); | 4329 | ret); |
4433 | } | ||
4434 | } | 4330 | } |
4435 | } | 4331 | } |
4436 | btrfs_end_transaction(trans, root); | 4332 | btrfs_end_transaction(trans, root); |
4333 | if (BTRFS_I(inode)->delayed_node) | ||
4334 | btrfs_balance_delayed_items(root); | ||
4437 | } | 4335 | } |
4438 | 4336 | ||
4439 | /* | 4337 | /* |
@@ -4449,7 +4347,7 @@ static int btrfs_set_inode_index_count(struct inode *inode) | |||
4449 | struct extent_buffer *leaf; | 4347 | struct extent_buffer *leaf; |
4450 | int ret; | 4348 | int ret; |
4451 | 4349 | ||
4452 | key.objectid = inode->i_ino; | 4350 | key.objectid = btrfs_ino(inode); |
4453 | btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); | 4351 | btrfs_set_key_type(&key, BTRFS_DIR_INDEX_KEY); |
4454 | key.offset = (u64)-1; | 4352 | key.offset = (u64)-1; |
4455 | 4353 | ||
@@ -4481,7 +4379,7 @@ static int btrfs_set_inode_index_count(struct inode *inode) | |||
4481 | leaf = path->nodes[0]; | 4379 | leaf = path->nodes[0]; |
4482 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | 4380 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
4483 | 4381 | ||
4484 | if (found_key.objectid != inode->i_ino || | 4382 | if (found_key.objectid != btrfs_ino(inode) || |
4485 | btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { | 4383 | btrfs_key_type(&found_key) != BTRFS_DIR_INDEX_KEY) { |
4486 | BTRFS_I(inode)->index_cnt = 2; | 4384 | BTRFS_I(inode)->index_cnt = 2; |
4487 | goto out; | 4385 | goto out; |
@@ -4502,9 +4400,12 @@ int btrfs_set_inode_index(struct inode *dir, u64 *index) | |||
4502 | int ret = 0; | 4400 | int ret = 0; |
4503 | 4401 | ||
4504 | if (BTRFS_I(dir)->index_cnt == (u64)-1) { | 4402 | if (BTRFS_I(dir)->index_cnt == (u64)-1) { |
4505 | ret = btrfs_set_inode_index_count(dir); | 4403 | ret = btrfs_inode_delayed_dir_index_count(dir); |
4506 | if (ret) | 4404 | if (ret) { |
4507 | return ret; | 4405 | ret = btrfs_set_inode_index_count(dir); |
4406 | if (ret) | ||
4407 | return ret; | ||
4408 | } | ||
4508 | } | 4409 | } |
4509 | 4410 | ||
4510 | *index = BTRFS_I(dir)->index_cnt; | 4411 | *index = BTRFS_I(dir)->index_cnt; |
@@ -4540,6 +4441,12 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
4540 | return ERR_PTR(-ENOMEM); | 4441 | return ERR_PTR(-ENOMEM); |
4541 | } | 4442 | } |
4542 | 4443 | ||
4444 | /* | ||
4445 | * we have to initialize this early, so we can reclaim the inode | ||
4446 | * number if we fail afterwards in this function. | ||
4447 | */ | ||
4448 | inode->i_ino = objectid; | ||
4449 | |||
4543 | if (dir) { | 4450 | if (dir) { |
4544 | trace_btrfs_inode_request(dir); | 4451 | trace_btrfs_inode_request(dir); |
4545 | 4452 | ||
@@ -4585,7 +4492,6 @@ static struct inode *btrfs_new_inode(struct btrfs_trans_handle *trans, | |||
4585 | goto fail; | 4492 | goto fail; |
4586 | 4493 | ||
4587 | inode_init_owner(inode, dir, mode); | 4494 | inode_init_owner(inode, dir, mode); |
4588 | inode->i_ino = objectid; | ||
4589 | inode_set_bytes(inode, 0); | 4495 | inode_set_bytes(inode, 0); |
4590 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; | 4496 | inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME; |
4591 | inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], | 4497 | inode_item = btrfs_item_ptr(path->nodes[0], path->slots[0], |
@@ -4649,29 +4555,29 @@ int btrfs_add_link(struct btrfs_trans_handle *trans, | |||
4649 | int ret = 0; | 4555 | int ret = 0; |
4650 | struct btrfs_key key; | 4556 | struct btrfs_key key; |
4651 | struct btrfs_root *root = BTRFS_I(parent_inode)->root; | 4557 | struct btrfs_root *root = BTRFS_I(parent_inode)->root; |
4558 | u64 ino = btrfs_ino(inode); | ||
4559 | u64 parent_ino = btrfs_ino(parent_inode); | ||
4652 | 4560 | ||
4653 | if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { | 4561 | if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { |
4654 | memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); | 4562 | memcpy(&key, &BTRFS_I(inode)->root->root_key, sizeof(key)); |
4655 | } else { | 4563 | } else { |
4656 | key.objectid = inode->i_ino; | 4564 | key.objectid = ino; |
4657 | btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); | 4565 | btrfs_set_key_type(&key, BTRFS_INODE_ITEM_KEY); |
4658 | key.offset = 0; | 4566 | key.offset = 0; |
4659 | } | 4567 | } |
4660 | 4568 | ||
4661 | if (unlikely(inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { | 4569 | if (unlikely(ino == BTRFS_FIRST_FREE_OBJECTID)) { |
4662 | ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, | 4570 | ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, |
4663 | key.objectid, root->root_key.objectid, | 4571 | key.objectid, root->root_key.objectid, |
4664 | parent_inode->i_ino, | 4572 | parent_ino, index, name, name_len); |
4665 | index, name, name_len); | ||
4666 | } else if (add_backref) { | 4573 | } else if (add_backref) { |
4667 | ret = btrfs_insert_inode_ref(trans, root, | 4574 | ret = btrfs_insert_inode_ref(trans, root, name, name_len, ino, |
4668 | name, name_len, inode->i_ino, | 4575 | parent_ino, index); |
4669 | parent_inode->i_ino, index); | ||
4670 | } | 4576 | } |
4671 | 4577 | ||
4672 | if (ret == 0) { | 4578 | if (ret == 0) { |
4673 | ret = btrfs_insert_dir_item(trans, root, name, name_len, | 4579 | ret = btrfs_insert_dir_item(trans, root, name, name_len, |
4674 | parent_inode->i_ino, &key, | 4580 | parent_inode, &key, |
4675 | btrfs_inode_type(inode), index); | 4581 | btrfs_inode_type(inode), index); |
4676 | BUG_ON(ret); | 4582 | BUG_ON(ret); |
4677 | 4583 | ||
@@ -4714,10 +4620,6 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, | |||
4714 | if (!new_valid_dev(rdev)) | 4620 | if (!new_valid_dev(rdev)) |
4715 | return -EINVAL; | 4621 | return -EINVAL; |
4716 | 4622 | ||
4717 | err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); | ||
4718 | if (err) | ||
4719 | return err; | ||
4720 | |||
4721 | /* | 4623 | /* |
4722 | * 2 for inode item and ref | 4624 | * 2 for inode item and ref |
4723 | * 2 for dir items | 4625 | * 2 for dir items |
@@ -4729,8 +4631,12 @@ static int btrfs_mknod(struct inode *dir, struct dentry *dentry, | |||
4729 | 4631 | ||
4730 | btrfs_set_trans_block_group(trans, dir); | 4632 | btrfs_set_trans_block_group(trans, dir); |
4731 | 4633 | ||
4634 | err = btrfs_find_free_ino(root, &objectid); | ||
4635 | if (err) | ||
4636 | goto out_unlock; | ||
4637 | |||
4732 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, | 4638 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
4733 | dentry->d_name.len, dir->i_ino, objectid, | 4639 | dentry->d_name.len, btrfs_ino(dir), objectid, |
4734 | BTRFS_I(dir)->block_group, mode, &index); | 4640 | BTRFS_I(dir)->block_group, mode, &index); |
4735 | if (IS_ERR(inode)) { | 4641 | if (IS_ERR(inode)) { |
4736 | err = PTR_ERR(inode); | 4642 | err = PTR_ERR(inode); |
@@ -4777,9 +4683,6 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, | |||
4777 | u64 objectid; | 4683 | u64 objectid; |
4778 | u64 index = 0; | 4684 | u64 index = 0; |
4779 | 4685 | ||
4780 | err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); | ||
4781 | if (err) | ||
4782 | return err; | ||
4783 | /* | 4686 | /* |
4784 | * 2 for inode item and ref | 4687 | * 2 for inode item and ref |
4785 | * 2 for dir items | 4688 | * 2 for dir items |
@@ -4791,8 +4694,12 @@ static int btrfs_create(struct inode *dir, struct dentry *dentry, | |||
4791 | 4694 | ||
4792 | btrfs_set_trans_block_group(trans, dir); | 4695 | btrfs_set_trans_block_group(trans, dir); |
4793 | 4696 | ||
4697 | err = btrfs_find_free_ino(root, &objectid); | ||
4698 | if (err) | ||
4699 | goto out_unlock; | ||
4700 | |||
4794 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, | 4701 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
4795 | dentry->d_name.len, dir->i_ino, objectid, | 4702 | dentry->d_name.len, btrfs_ino(dir), objectid, |
4796 | BTRFS_I(dir)->block_group, mode, &index); | 4703 | BTRFS_I(dir)->block_group, mode, &index); |
4797 | if (IS_ERR(inode)) { | 4704 | if (IS_ERR(inode)) { |
4798 | err = PTR_ERR(inode); | 4705 | err = PTR_ERR(inode); |
@@ -4903,10 +4810,6 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
4903 | u64 index = 0; | 4810 | u64 index = 0; |
4904 | unsigned long nr = 1; | 4811 | unsigned long nr = 1; |
4905 | 4812 | ||
4906 | err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); | ||
4907 | if (err) | ||
4908 | return err; | ||
4909 | |||
4910 | /* | 4813 | /* |
4911 | * 2 items for inode and ref | 4814 | * 2 items for inode and ref |
4912 | * 2 items for dir items | 4815 | * 2 items for dir items |
@@ -4917,8 +4820,12 @@ static int btrfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) | |||
4917 | return PTR_ERR(trans); | 4820 | return PTR_ERR(trans); |
4918 | btrfs_set_trans_block_group(trans, dir); | 4821 | btrfs_set_trans_block_group(trans, dir); |
4919 | 4822 | ||
4823 | err = btrfs_find_free_ino(root, &objectid); | ||
4824 | if (err) | ||
4825 | goto out_fail; | ||
4826 | |||
4920 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, | 4827 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
4921 | dentry->d_name.len, dir->i_ino, objectid, | 4828 | dentry->d_name.len, btrfs_ino(dir), objectid, |
4922 | BTRFS_I(dir)->block_group, S_IFDIR | mode, | 4829 | BTRFS_I(dir)->block_group, S_IFDIR | mode, |
4923 | &index); | 4830 | &index); |
4924 | if (IS_ERR(inode)) { | 4831 | if (IS_ERR(inode)) { |
@@ -5041,7 +4948,7 @@ struct extent_map *btrfs_get_extent(struct inode *inode, struct page *page, | |||
5041 | u64 bytenr; | 4948 | u64 bytenr; |
5042 | u64 extent_start = 0; | 4949 | u64 extent_start = 0; |
5043 | u64 extent_end = 0; | 4950 | u64 extent_end = 0; |
5044 | u64 objectid = inode->i_ino; | 4951 | u64 objectid = btrfs_ino(inode); |
5045 | u32 found_type; | 4952 | u32 found_type; |
5046 | struct btrfs_path *path = NULL; | 4953 | struct btrfs_path *path = NULL; |
5047 | struct btrfs_root *root = BTRFS_I(inode)->root; | 4954 | struct btrfs_root *root = BTRFS_I(inode)->root; |
@@ -5069,7 +4976,7 @@ again: | |||
5069 | else | 4976 | else |
5070 | goto out; | 4977 | goto out; |
5071 | } | 4978 | } |
5072 | em = alloc_extent_map(GFP_NOFS); | 4979 | em = alloc_extent_map(); |
5073 | if (!em) { | 4980 | if (!em) { |
5074 | err = -ENOMEM; | 4981 | err = -ENOMEM; |
5075 | goto out; | 4982 | goto out; |
@@ -5223,7 +5130,7 @@ again: | |||
5223 | kunmap(page); | 5130 | kunmap(page); |
5224 | free_extent_map(em); | 5131 | free_extent_map(em); |
5225 | em = NULL; | 5132 | em = NULL; |
5226 | btrfs_release_path(root, path); | 5133 | btrfs_release_path(path); |
5227 | trans = btrfs_join_transaction(root, 1); | 5134 | trans = btrfs_join_transaction(root, 1); |
5228 | if (IS_ERR(trans)) | 5135 | if (IS_ERR(trans)) |
5229 | return ERR_CAST(trans); | 5136 | return ERR_CAST(trans); |
@@ -5249,7 +5156,7 @@ not_found_em: | |||
5249 | em->block_start = EXTENT_MAP_HOLE; | 5156 | em->block_start = EXTENT_MAP_HOLE; |
5250 | set_bit(EXTENT_FLAG_VACANCY, &em->flags); | 5157 | set_bit(EXTENT_FLAG_VACANCY, &em->flags); |
5251 | insert: | 5158 | insert: |
5252 | btrfs_release_path(root, path); | 5159 | btrfs_release_path(path); |
5253 | if (em->start > start || extent_map_end(em) <= start) { | 5160 | if (em->start > start || extent_map_end(em) <= start) { |
5254 | printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed " | 5161 | printk(KERN_ERR "Btrfs: bad extent! em: [%llu %llu] passed " |
5255 | "[%llu %llu]\n", (unsigned long long)em->start, | 5162 | "[%llu %llu]\n", (unsigned long long)em->start, |
@@ -5382,7 +5289,7 @@ struct extent_map *btrfs_get_extent_fiemap(struct inode *inode, struct page *pag | |||
5382 | u64 hole_start = start; | 5289 | u64 hole_start = start; |
5383 | u64 hole_len = len; | 5290 | u64 hole_len = len; |
5384 | 5291 | ||
5385 | em = alloc_extent_map(GFP_NOFS); | 5292 | em = alloc_extent_map(); |
5386 | if (!em) { | 5293 | if (!em) { |
5387 | err = -ENOMEM; | 5294 | err = -ENOMEM; |
5388 | goto out; | 5295 | goto out; |
@@ -5472,6 +5379,9 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, | |||
5472 | if (IS_ERR(trans)) | 5379 | if (IS_ERR(trans)) |
5473 | return ERR_CAST(trans); | 5380 | return ERR_CAST(trans); |
5474 | 5381 | ||
5382 | if (start <= BTRFS_I(inode)->disk_i_size && len < 64 * 1024) | ||
5383 | btrfs_add_inode_defrag(trans, inode); | ||
5384 | |||
5475 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; | 5385 | trans->block_rsv = &root->fs_info->delalloc_block_rsv; |
5476 | 5386 | ||
5477 | alloc_hint = get_extent_allocation_hint(inode, start, len); | 5387 | alloc_hint = get_extent_allocation_hint(inode, start, len); |
@@ -5483,7 +5393,7 @@ static struct extent_map *btrfs_new_extent_direct(struct inode *inode, | |||
5483 | } | 5393 | } |
5484 | 5394 | ||
5485 | if (!em) { | 5395 | if (!em) { |
5486 | em = alloc_extent_map(GFP_NOFS); | 5396 | em = alloc_extent_map(); |
5487 | if (!em) { | 5397 | if (!em) { |
5488 | em = ERR_PTR(-ENOMEM); | 5398 | em = ERR_PTR(-ENOMEM); |
5489 | goto out; | 5399 | goto out; |
@@ -5549,7 +5459,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, | |||
5549 | if (!path) | 5459 | if (!path) |
5550 | return -ENOMEM; | 5460 | return -ENOMEM; |
5551 | 5461 | ||
5552 | ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, | 5462 | ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), |
5553 | offset, 0); | 5463 | offset, 0); |
5554 | if (ret < 0) | 5464 | if (ret < 0) |
5555 | goto out; | 5465 | goto out; |
@@ -5566,7 +5476,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, | |||
5566 | ret = 0; | 5476 | ret = 0; |
5567 | leaf = path->nodes[0]; | 5477 | leaf = path->nodes[0]; |
5568 | btrfs_item_key_to_cpu(leaf, &key, slot); | 5478 | btrfs_item_key_to_cpu(leaf, &key, slot); |
5569 | if (key.objectid != inode->i_ino || | 5479 | if (key.objectid != btrfs_ino(inode) || |
5570 | key.type != BTRFS_EXTENT_DATA_KEY) { | 5480 | key.type != BTRFS_EXTENT_DATA_KEY) { |
5571 | /* not our file or wrong item type, must cow */ | 5481 | /* not our file or wrong item type, must cow */ |
5572 | goto out; | 5482 | goto out; |
@@ -5600,7 +5510,7 @@ static noinline int can_nocow_odirect(struct btrfs_trans_handle *trans, | |||
5600 | * look for other files referencing this extent, if we | 5510 | * look for other files referencing this extent, if we |
5601 | * find any we must cow | 5511 | * find any we must cow |
5602 | */ | 5512 | */ |
5603 | if (btrfs_cross_ref_exist(trans, root, inode->i_ino, | 5513 | if (btrfs_cross_ref_exist(trans, root, btrfs_ino(inode), |
5604 | key.offset - backref_offset, disk_bytenr)) | 5514 | key.offset - backref_offset, disk_bytenr)) |
5605 | goto out; | 5515 | goto out; |
5606 | 5516 | ||
@@ -5790,9 +5700,10 @@ static void btrfs_endio_direct_read(struct bio *bio, int err) | |||
5790 | 5700 | ||
5791 | flush_dcache_page(bvec->bv_page); | 5701 | flush_dcache_page(bvec->bv_page); |
5792 | if (csum != *private) { | 5702 | if (csum != *private) { |
5793 | printk(KERN_ERR "btrfs csum failed ino %lu off" | 5703 | printk(KERN_ERR "btrfs csum failed ino %llu off" |
5794 | " %llu csum %u private %u\n", | 5704 | " %llu csum %u private %u\n", |
5795 | inode->i_ino, (unsigned long long)start, | 5705 | (unsigned long long)btrfs_ino(inode), |
5706 | (unsigned long long)start, | ||
5796 | csum, *private); | 5707 | csum, *private); |
5797 | err = -EIO; | 5708 | err = -EIO; |
5798 | } | 5709 | } |
@@ -5939,9 +5850,9 @@ static void btrfs_end_dio_bio(struct bio *bio, int err) | |||
5939 | struct btrfs_dio_private *dip = bio->bi_private; | 5850 | struct btrfs_dio_private *dip = bio->bi_private; |
5940 | 5851 | ||
5941 | if (err) { | 5852 | if (err) { |
5942 | printk(KERN_ERR "btrfs direct IO failed ino %lu rw %lu " | 5853 | printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " |
5943 | "sector %#Lx len %u err no %d\n", | 5854 | "sector %#Lx len %u err no %d\n", |
5944 | dip->inode->i_ino, bio->bi_rw, | 5855 | (unsigned long long)btrfs_ino(dip->inode), bio->bi_rw, |
5945 | (unsigned long long)bio->bi_sector, bio->bi_size, err); | 5856 | (unsigned long long)bio->bi_sector, bio->bi_size, err); |
5946 | dip->errors = 1; | 5857 | dip->errors = 1; |
5947 | 5858 | ||
@@ -6782,12 +6693,15 @@ struct inode *btrfs_alloc_inode(struct super_block *sb) | |||
6782 | ei->ordered_data_close = 0; | 6693 | ei->ordered_data_close = 0; |
6783 | ei->orphan_meta_reserved = 0; | 6694 | ei->orphan_meta_reserved = 0; |
6784 | ei->dummy_inode = 0; | 6695 | ei->dummy_inode = 0; |
6696 | ei->in_defrag = 0; | ||
6785 | ei->force_compress = BTRFS_COMPRESS_NONE; | 6697 | ei->force_compress = BTRFS_COMPRESS_NONE; |
6786 | 6698 | ||
6699 | ei->delayed_node = NULL; | ||
6700 | |||
6787 | inode = &ei->vfs_inode; | 6701 | inode = &ei->vfs_inode; |
6788 | extent_map_tree_init(&ei->extent_tree, GFP_NOFS); | 6702 | extent_map_tree_init(&ei->extent_tree); |
6789 | extent_io_tree_init(&ei->io_tree, &inode->i_data, GFP_NOFS); | 6703 | extent_io_tree_init(&ei->io_tree, &inode->i_data); |
6790 | extent_io_tree_init(&ei->io_failure_tree, &inode->i_data, GFP_NOFS); | 6704 | extent_io_tree_init(&ei->io_failure_tree, &inode->i_data); |
6791 | mutex_init(&ei->log_mutex); | 6705 | mutex_init(&ei->log_mutex); |
6792 | btrfs_ordered_inode_tree_init(&ei->ordered_tree); | 6706 | btrfs_ordered_inode_tree_init(&ei->ordered_tree); |
6793 | INIT_LIST_HEAD(&ei->i_orphan); | 6707 | INIT_LIST_HEAD(&ei->i_orphan); |
@@ -6851,8 +6765,8 @@ void btrfs_destroy_inode(struct inode *inode) | |||
6851 | 6765 | ||
6852 | spin_lock(&root->orphan_lock); | 6766 | spin_lock(&root->orphan_lock); |
6853 | if (!list_empty(&BTRFS_I(inode)->i_orphan)) { | 6767 | if (!list_empty(&BTRFS_I(inode)->i_orphan)) { |
6854 | printk(KERN_INFO "BTRFS: inode %lu still on the orphan list\n", | 6768 | printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", |
6855 | inode->i_ino); | 6769 | (unsigned long long)btrfs_ino(inode)); |
6856 | list_del_init(&BTRFS_I(inode)->i_orphan); | 6770 | list_del_init(&BTRFS_I(inode)->i_orphan); |
6857 | } | 6771 | } |
6858 | spin_unlock(&root->orphan_lock); | 6772 | spin_unlock(&root->orphan_lock); |
@@ -6874,6 +6788,7 @@ void btrfs_destroy_inode(struct inode *inode) | |||
6874 | inode_tree_del(inode); | 6788 | inode_tree_del(inode); |
6875 | btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); | 6789 | btrfs_drop_extent_cache(inode, 0, (u64)-1, 0); |
6876 | free: | 6790 | free: |
6791 | btrfs_remove_delayed_node(inode); | ||
6877 | call_rcu(&inode->i_rcu, btrfs_i_callback); | 6792 | call_rcu(&inode->i_rcu, btrfs_i_callback); |
6878 | } | 6793 | } |
6879 | 6794 | ||
@@ -6882,7 +6797,7 @@ int btrfs_drop_inode(struct inode *inode) | |||
6882 | struct btrfs_root *root = BTRFS_I(inode)->root; | 6797 | struct btrfs_root *root = BTRFS_I(inode)->root; |
6883 | 6798 | ||
6884 | if (btrfs_root_refs(&root->root_item) == 0 && | 6799 | if (btrfs_root_refs(&root->root_item) == 0 && |
6885 | root != root->fs_info->tree_root) | 6800 | !is_free_space_inode(root, inode)) |
6886 | return 1; | 6801 | return 1; |
6887 | else | 6802 | else |
6888 | return generic_drop_inode(inode); | 6803 | return generic_drop_inode(inode); |
@@ -6991,16 +6906,17 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
6991 | u64 index = 0; | 6906 | u64 index = 0; |
6992 | u64 root_objectid; | 6907 | u64 root_objectid; |
6993 | int ret; | 6908 | int ret; |
6909 | u64 old_ino = btrfs_ino(old_inode); | ||
6994 | 6910 | ||
6995 | if (new_dir->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) | 6911 | if (btrfs_ino(new_dir) == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID) |
6996 | return -EPERM; | 6912 | return -EPERM; |
6997 | 6913 | ||
6998 | /* we only allow rename subvolume link between subvolumes */ | 6914 | /* we only allow rename subvolume link between subvolumes */ |
6999 | if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) | 6915 | if (old_ino != BTRFS_FIRST_FREE_OBJECTID && root != dest) |
7000 | return -EXDEV; | 6916 | return -EXDEV; |
7001 | 6917 | ||
7002 | if (old_inode->i_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || | 6918 | if (old_ino == BTRFS_EMPTY_SUBVOL_DIR_OBJECTID || |
7003 | (new_inode && new_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) | 6919 | (new_inode && btrfs_ino(new_inode) == BTRFS_FIRST_FREE_OBJECTID)) |
7004 | return -ENOTEMPTY; | 6920 | return -ENOTEMPTY; |
7005 | 6921 | ||
7006 | if (S_ISDIR(old_inode->i_mode) && new_inode && | 6922 | if (S_ISDIR(old_inode->i_mode) && new_inode && |
@@ -7016,7 +6932,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
7016 | filemap_flush(old_inode->i_mapping); | 6932 | filemap_flush(old_inode->i_mapping); |
7017 | 6933 | ||
7018 | /* close the racy window with snapshot create/destroy ioctl */ | 6934 | /* close the racy window with snapshot create/destroy ioctl */ |
7019 | if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) | 6935 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) |
7020 | down_read(&root->fs_info->subvol_sem); | 6936 | down_read(&root->fs_info->subvol_sem); |
7021 | /* | 6937 | /* |
7022 | * We want to reserve the absolute worst case amount of items. So if | 6938 | * We want to reserve the absolute worst case amount of items. So if |
@@ -7041,15 +6957,15 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
7041 | if (ret) | 6957 | if (ret) |
7042 | goto out_fail; | 6958 | goto out_fail; |
7043 | 6959 | ||
7044 | if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { | 6960 | if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { |
7045 | /* force full log commit if subvolume involved. */ | 6961 | /* force full log commit if subvolume involved. */ |
7046 | root->fs_info->last_trans_log_full_commit = trans->transid; | 6962 | root->fs_info->last_trans_log_full_commit = trans->transid; |
7047 | } else { | 6963 | } else { |
7048 | ret = btrfs_insert_inode_ref(trans, dest, | 6964 | ret = btrfs_insert_inode_ref(trans, dest, |
7049 | new_dentry->d_name.name, | 6965 | new_dentry->d_name.name, |
7050 | new_dentry->d_name.len, | 6966 | new_dentry->d_name.len, |
7051 | old_inode->i_ino, | 6967 | old_ino, |
7052 | new_dir->i_ino, index); | 6968 | btrfs_ino(new_dir), index); |
7053 | if (ret) | 6969 | if (ret) |
7054 | goto out_fail; | 6970 | goto out_fail; |
7055 | /* | 6971 | /* |
@@ -7065,10 +6981,8 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
7065 | * make sure the inode gets flushed if it is replacing | 6981 | * make sure the inode gets flushed if it is replacing |
7066 | * something. | 6982 | * something. |
7067 | */ | 6983 | */ |
7068 | if (new_inode && new_inode->i_size && | 6984 | if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode)) |
7069 | old_inode && S_ISREG(old_inode->i_mode)) { | ||
7070 | btrfs_add_ordered_operation(trans, root, old_inode); | 6985 | btrfs_add_ordered_operation(trans, root, old_inode); |
7071 | } | ||
7072 | 6986 | ||
7073 | old_dir->i_ctime = old_dir->i_mtime = ctime; | 6987 | old_dir->i_ctime = old_dir->i_mtime = ctime; |
7074 | new_dir->i_ctime = new_dir->i_mtime = ctime; | 6988 | new_dir->i_ctime = new_dir->i_mtime = ctime; |
@@ -7077,7 +6991,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
7077 | if (old_dentry->d_parent != new_dentry->d_parent) | 6991 | if (old_dentry->d_parent != new_dentry->d_parent) |
7078 | btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); | 6992 | btrfs_record_unlink_dir(trans, old_dir, old_inode, 1); |
7079 | 6993 | ||
7080 | if (unlikely(old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID)) { | 6994 | if (unlikely(old_ino == BTRFS_FIRST_FREE_OBJECTID)) { |
7081 | root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; | 6995 | root_objectid = BTRFS_I(old_inode)->root->root_key.objectid; |
7082 | ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, | 6996 | ret = btrfs_unlink_subvol(trans, root, old_dir, root_objectid, |
7083 | old_dentry->d_name.name, | 6997 | old_dentry->d_name.name, |
@@ -7094,7 +7008,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
7094 | 7008 | ||
7095 | if (new_inode) { | 7009 | if (new_inode) { |
7096 | new_inode->i_ctime = CURRENT_TIME; | 7010 | new_inode->i_ctime = CURRENT_TIME; |
7097 | if (unlikely(new_inode->i_ino == | 7011 | if (unlikely(btrfs_ino(new_inode) == |
7098 | BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { | 7012 | BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { |
7099 | root_objectid = BTRFS_I(new_inode)->location.objectid; | 7013 | root_objectid = BTRFS_I(new_inode)->location.objectid; |
7100 | ret = btrfs_unlink_subvol(trans, dest, new_dir, | 7014 | ret = btrfs_unlink_subvol(trans, dest, new_dir, |
@@ -7122,7 +7036,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
7122 | new_dentry->d_name.len, 0, index); | 7036 | new_dentry->d_name.len, 0, index); |
7123 | BUG_ON(ret); | 7037 | BUG_ON(ret); |
7124 | 7038 | ||
7125 | if (old_inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { | 7039 | if (old_ino != BTRFS_FIRST_FREE_OBJECTID) { |
7126 | struct dentry *parent = dget_parent(new_dentry); | 7040 | struct dentry *parent = dget_parent(new_dentry); |
7127 | btrfs_log_new_name(trans, old_inode, old_dir, parent); | 7041 | btrfs_log_new_name(trans, old_inode, old_dir, parent); |
7128 | dput(parent); | 7042 | dput(parent); |
@@ -7131,7 +7045,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
7131 | out_fail: | 7045 | out_fail: |
7132 | btrfs_end_transaction_throttle(trans, root); | 7046 | btrfs_end_transaction_throttle(trans, root); |
7133 | out_notrans: | 7047 | out_notrans: |
7134 | if (old_inode->i_ino == BTRFS_FIRST_FREE_OBJECTID) | 7048 | if (old_ino == BTRFS_FIRST_FREE_OBJECTID) |
7135 | up_read(&root->fs_info->subvol_sem); | 7049 | up_read(&root->fs_info->subvol_sem); |
7136 | 7050 | ||
7137 | return ret; | 7051 | return ret; |
@@ -7185,58 +7099,6 @@ int btrfs_start_delalloc_inodes(struct btrfs_root *root, int delay_iput) | |||
7185 | return 0; | 7099 | return 0; |
7186 | } | 7100 | } |
7187 | 7101 | ||
7188 | int btrfs_start_one_delalloc_inode(struct btrfs_root *root, int delay_iput, | ||
7189 | int sync) | ||
7190 | { | ||
7191 | struct btrfs_inode *binode; | ||
7192 | struct inode *inode = NULL; | ||
7193 | |||
7194 | spin_lock(&root->fs_info->delalloc_lock); | ||
7195 | while (!list_empty(&root->fs_info->delalloc_inodes)) { | ||
7196 | binode = list_entry(root->fs_info->delalloc_inodes.next, | ||
7197 | struct btrfs_inode, delalloc_inodes); | ||
7198 | inode = igrab(&binode->vfs_inode); | ||
7199 | if (inode) { | ||
7200 | list_move_tail(&binode->delalloc_inodes, | ||
7201 | &root->fs_info->delalloc_inodes); | ||
7202 | break; | ||
7203 | } | ||
7204 | |||
7205 | list_del_init(&binode->delalloc_inodes); | ||
7206 | cond_resched_lock(&root->fs_info->delalloc_lock); | ||
7207 | } | ||
7208 | spin_unlock(&root->fs_info->delalloc_lock); | ||
7209 | |||
7210 | if (inode) { | ||
7211 | if (sync) { | ||
7212 | filemap_write_and_wait(inode->i_mapping); | ||
7213 | /* | ||
7214 | * We have to do this because compression doesn't | ||
7215 | * actually set PG_writeback until it submits the pages | ||
7216 | * for IO, which happens in an async thread, so we could | ||
7217 | * race and not actually wait for any writeback pages | ||
7218 | * because they've not been submitted yet. Technically | ||
7219 | * this could still be the case for the ordered stuff | ||
7220 | * since the async thread may not have started to do its | ||
7221 | * work yet. If this becomes the case then we need to | ||
7222 | * figure out a way to make sure that in writepage we | ||
7223 | * wait for any async pages to be submitted before | ||
7224 | * returning so that fdatawait does what its supposed to | ||
7225 | * do. | ||
7226 | */ | ||
7227 | btrfs_wait_ordered_range(inode, 0, (u64)-1); | ||
7228 | } else { | ||
7229 | filemap_flush(inode->i_mapping); | ||
7230 | } | ||
7231 | if (delay_iput) | ||
7232 | btrfs_add_delayed_iput(inode); | ||
7233 | else | ||
7234 | iput(inode); | ||
7235 | return 1; | ||
7236 | } | ||
7237 | return 0; | ||
7238 | } | ||
7239 | |||
7240 | static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | 7102 | static int btrfs_symlink(struct inode *dir, struct dentry *dentry, |
7241 | const char *symname) | 7103 | const char *symname) |
7242 | { | 7104 | { |
@@ -7260,9 +7122,6 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |||
7260 | if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) | 7122 | if (name_len > BTRFS_MAX_INLINE_DATA_SIZE(root)) |
7261 | return -ENAMETOOLONG; | 7123 | return -ENAMETOOLONG; |
7262 | 7124 | ||
7263 | err = btrfs_find_free_objectid(NULL, root, dir->i_ino, &objectid); | ||
7264 | if (err) | ||
7265 | return err; | ||
7266 | /* | 7125 | /* |
7267 | * 2 items for inode item and ref | 7126 | * 2 items for inode item and ref |
7268 | * 2 items for dir items | 7127 | * 2 items for dir items |
@@ -7274,8 +7133,12 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |||
7274 | 7133 | ||
7275 | btrfs_set_trans_block_group(trans, dir); | 7134 | btrfs_set_trans_block_group(trans, dir); |
7276 | 7135 | ||
7136 | err = btrfs_find_free_ino(root, &objectid); | ||
7137 | if (err) | ||
7138 | goto out_unlock; | ||
7139 | |||
7277 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, | 7140 | inode = btrfs_new_inode(trans, root, dir, dentry->d_name.name, |
7278 | dentry->d_name.len, dir->i_ino, objectid, | 7141 | dentry->d_name.len, btrfs_ino(dir), objectid, |
7279 | BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, | 7142 | BTRFS_I(dir)->block_group, S_IFLNK|S_IRWXUGO, |
7280 | &index); | 7143 | &index); |
7281 | if (IS_ERR(inode)) { | 7144 | if (IS_ERR(inode)) { |
@@ -7307,7 +7170,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |||
7307 | 7170 | ||
7308 | path = btrfs_alloc_path(); | 7171 | path = btrfs_alloc_path(); |
7309 | BUG_ON(!path); | 7172 | BUG_ON(!path); |
7310 | key.objectid = inode->i_ino; | 7173 | key.objectid = btrfs_ino(inode); |
7311 | key.offset = 0; | 7174 | key.offset = 0; |
7312 | btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); | 7175 | btrfs_set_key_type(&key, BTRFS_EXTENT_DATA_KEY); |
7313 | datasize = btrfs_file_extent_calc_inline_size(name_len); | 7176 | datasize = btrfs_file_extent_calc_inline_size(name_len); |
@@ -7315,6 +7178,7 @@ static int btrfs_symlink(struct inode *dir, struct dentry *dentry, | |||
7315 | datasize); | 7178 | datasize); |
7316 | if (err) { | 7179 | if (err) { |
7317 | drop_inode = 1; | 7180 | drop_inode = 1; |
7181 | btrfs_free_path(path); | ||
7318 | goto out_unlock; | 7182 | goto out_unlock; |
7319 | } | 7183 | } |
7320 | leaf = path->nodes[0]; | 7184 | leaf = path->nodes[0]; |
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 2616f7ed4799..85e818ce00c5 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include "print-tree.h" | 50 | #include "print-tree.h" |
51 | #include "volumes.h" | 51 | #include "volumes.h" |
52 | #include "locking.h" | 52 | #include "locking.h" |
53 | #include "inode-map.h" | ||
53 | 54 | ||
54 | /* Mask out flags that are inappropriate for the given type of inode. */ | 55 | /* Mask out flags that are inappropriate for the given type of inode. */ |
55 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) | 56 | static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) |
@@ -281,8 +282,9 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) | |||
281 | if (!capable(CAP_SYS_ADMIN)) | 282 | if (!capable(CAP_SYS_ADMIN)) |
282 | return -EPERM; | 283 | return -EPERM; |
283 | 284 | ||
284 | mutex_lock(&fs_info->fs_devices->device_list_mutex); | 285 | rcu_read_lock(); |
285 | list_for_each_entry(device, &fs_info->fs_devices->devices, dev_list) { | 286 | list_for_each_entry_rcu(device, &fs_info->fs_devices->devices, |
287 | dev_list) { | ||
286 | if (!device->bdev) | 288 | if (!device->bdev) |
287 | continue; | 289 | continue; |
288 | q = bdev_get_queue(device->bdev); | 290 | q = bdev_get_queue(device->bdev); |
@@ -292,7 +294,7 @@ static noinline int btrfs_ioctl_fitrim(struct file *file, void __user *arg) | |||
292 | minlen); | 294 | minlen); |
293 | } | 295 | } |
294 | } | 296 | } |
295 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | 297 | rcu_read_unlock(); |
296 | if (!num_devices) | 298 | if (!num_devices) |
297 | return -EOPNOTSUPP; | 299 | return -EOPNOTSUPP; |
298 | 300 | ||
@@ -329,8 +331,7 @@ static noinline int create_subvol(struct btrfs_root *root, | |||
329 | u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; | 331 | u64 new_dirid = BTRFS_FIRST_FREE_OBJECTID; |
330 | u64 index = 0; | 332 | u64 index = 0; |
331 | 333 | ||
332 | ret = btrfs_find_free_objectid(NULL, root->fs_info->tree_root, | 334 | ret = btrfs_find_free_objectid(root->fs_info->tree_root, &objectid); |
333 | 0, &objectid); | ||
334 | if (ret) { | 335 | if (ret) { |
335 | dput(parent); | 336 | dput(parent); |
336 | return ret; | 337 | return ret; |
@@ -422,7 +423,7 @@ static noinline int create_subvol(struct btrfs_root *root, | |||
422 | BUG_ON(ret); | 423 | BUG_ON(ret); |
423 | 424 | ||
424 | ret = btrfs_insert_dir_item(trans, root, | 425 | ret = btrfs_insert_dir_item(trans, root, |
425 | name, namelen, dir->i_ino, &key, | 426 | name, namelen, dir, &key, |
426 | BTRFS_FT_DIR, index); | 427 | BTRFS_FT_DIR, index); |
427 | if (ret) | 428 | if (ret) |
428 | goto fail; | 429 | goto fail; |
@@ -433,7 +434,7 @@ static noinline int create_subvol(struct btrfs_root *root, | |||
433 | 434 | ||
434 | ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, | 435 | ret = btrfs_add_root_ref(trans, root->fs_info->tree_root, |
435 | objectid, root->root_key.objectid, | 436 | objectid, root->root_key.objectid, |
436 | dir->i_ino, index, name, namelen); | 437 | btrfs_ino(dir), index, name, namelen); |
437 | 438 | ||
438 | BUG_ON(ret); | 439 | BUG_ON(ret); |
439 | 440 | ||
@@ -655,6 +656,106 @@ out_unlock: | |||
655 | return error; | 656 | return error; |
656 | } | 657 | } |
657 | 658 | ||
659 | /* | ||
660 | * When we're defragging a range, we don't want to kick it off again | ||
661 | * if it is really just waiting for delalloc to send it down. | ||
662 | * If we find a nice big extent or delalloc range for the bytes in the | ||
663 | * file you want to defrag, we return 0 to let you know to skip this | ||
664 | * part of the file | ||
665 | */ | ||
666 | static int check_defrag_in_cache(struct inode *inode, u64 offset, int thresh) | ||
667 | { | ||
668 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | ||
669 | struct extent_map *em = NULL; | ||
670 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | ||
671 | u64 end; | ||
672 | |||
673 | read_lock(&em_tree->lock); | ||
674 | em = lookup_extent_mapping(em_tree, offset, PAGE_CACHE_SIZE); | ||
675 | read_unlock(&em_tree->lock); | ||
676 | |||
677 | if (em) { | ||
678 | end = extent_map_end(em); | ||
679 | free_extent_map(em); | ||
680 | if (end - offset > thresh) | ||
681 | return 0; | ||
682 | } | ||
683 | /* if we already have a nice delalloc here, just stop */ | ||
684 | thresh /= 2; | ||
685 | end = count_range_bits(io_tree, &offset, offset + thresh, | ||
686 | thresh, EXTENT_DELALLOC, 1); | ||
687 | if (end >= thresh) | ||
688 | return 0; | ||
689 | return 1; | ||
690 | } | ||
691 | |||
692 | /* | ||
693 | * helper function to walk through a file and find extents | ||
694 | * newer than a specific transid, and smaller than thresh. | ||
695 | * | ||
696 | * This is used by the defragging code to find new and small | ||
697 | * extents | ||
698 | */ | ||
699 | static int find_new_extents(struct btrfs_root *root, | ||
700 | struct inode *inode, u64 newer_than, | ||
701 | u64 *off, int thresh) | ||
702 | { | ||
703 | struct btrfs_path *path; | ||
704 | struct btrfs_key min_key; | ||
705 | struct btrfs_key max_key; | ||
706 | struct extent_buffer *leaf; | ||
707 | struct btrfs_file_extent_item *extent; | ||
708 | int type; | ||
709 | int ret; | ||
710 | |||
711 | path = btrfs_alloc_path(); | ||
712 | if (!path) | ||
713 | return -ENOMEM; | ||
714 | |||
715 | min_key.objectid = inode->i_ino; | ||
716 | min_key.type = BTRFS_EXTENT_DATA_KEY; | ||
717 | min_key.offset = *off; | ||
718 | |||
719 | max_key.objectid = inode->i_ino; | ||
720 | max_key.type = (u8)-1; | ||
721 | max_key.offset = (u64)-1; | ||
722 | |||
723 | path->keep_locks = 1; | ||
724 | |||
725 | while(1) { | ||
726 | ret = btrfs_search_forward(root, &min_key, &max_key, | ||
727 | path, 0, newer_than); | ||
728 | if (ret != 0) | ||
729 | goto none; | ||
730 | if (min_key.objectid != inode->i_ino) | ||
731 | goto none; | ||
732 | if (min_key.type != BTRFS_EXTENT_DATA_KEY) | ||
733 | goto none; | ||
734 | |||
735 | leaf = path->nodes[0]; | ||
736 | extent = btrfs_item_ptr(leaf, path->slots[0], | ||
737 | struct btrfs_file_extent_item); | ||
738 | |||
739 | type = btrfs_file_extent_type(leaf, extent); | ||
740 | if (type == BTRFS_FILE_EXTENT_REG && | ||
741 | btrfs_file_extent_num_bytes(leaf, extent) < thresh && | ||
742 | check_defrag_in_cache(inode, min_key.offset, thresh)) { | ||
743 | *off = min_key.offset; | ||
744 | btrfs_free_path(path); | ||
745 | return 0; | ||
746 | } | ||
747 | |||
748 | if (min_key.offset == (u64)-1) | ||
749 | goto none; | ||
750 | |||
751 | min_key.offset++; | ||
752 | btrfs_release_path(path); | ||
753 | } | ||
754 | none: | ||
755 | btrfs_free_path(path); | ||
756 | return -ENOENT; | ||
757 | } | ||
758 | |||
658 | static int should_defrag_range(struct inode *inode, u64 start, u64 len, | 759 | static int should_defrag_range(struct inode *inode, u64 start, u64 len, |
659 | int thresh, u64 *last_len, u64 *skip, | 760 | int thresh, u64 *last_len, u64 *skip, |
660 | u64 *defrag_end) | 761 | u64 *defrag_end) |
@@ -664,10 +765,6 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, | |||
664 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; | 765 | struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; |
665 | int ret = 1; | 766 | int ret = 1; |
666 | 767 | ||
667 | |||
668 | if (thresh == 0) | ||
669 | thresh = 256 * 1024; | ||
670 | |||
671 | /* | 768 | /* |
672 | * make sure that once we start defragging and extent, we keep on | 769 | * make sure that once we start defragging and extent, we keep on |
673 | * defragging it | 770 | * defragging it |
@@ -726,27 +823,176 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len, | |||
726 | return ret; | 823 | return ret; |
727 | } | 824 | } |
728 | 825 | ||
729 | static int btrfs_defrag_file(struct file *file, | 826 | /* |
730 | struct btrfs_ioctl_defrag_range_args *range) | 827 | * it doesn't do much good to defrag one or two pages |
828 | * at a time. This pulls in a nice chunk of pages | ||
829 | * to COW and defrag. | ||
830 | * | ||
831 | * It also makes sure the delalloc code has enough | ||
832 | * dirty data to avoid making new small extents as part | ||
833 | * of the defrag | ||
834 | * | ||
835 | * It's a good idea to start RA on this range | ||
836 | * before calling this. | ||
837 | */ | ||
838 | static int cluster_pages_for_defrag(struct inode *inode, | ||
839 | struct page **pages, | ||
840 | unsigned long start_index, | ||
841 | int num_pages) | ||
731 | { | 842 | { |
732 | struct inode *inode = fdentry(file)->d_inode; | 843 | unsigned long file_end; |
733 | struct btrfs_root *root = BTRFS_I(inode)->root; | 844 | u64 isize = i_size_read(inode); |
734 | struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; | 845 | u64 page_start; |
846 | u64 page_end; | ||
847 | int ret; | ||
848 | int i; | ||
849 | int i_done; | ||
735 | struct btrfs_ordered_extent *ordered; | 850 | struct btrfs_ordered_extent *ordered; |
736 | struct page *page; | 851 | struct extent_state *cached_state = NULL; |
852 | |||
853 | if (isize == 0) | ||
854 | return 0; | ||
855 | file_end = (isize - 1) >> PAGE_CACHE_SHIFT; | ||
856 | |||
857 | ret = btrfs_delalloc_reserve_space(inode, | ||
858 | num_pages << PAGE_CACHE_SHIFT); | ||
859 | if (ret) | ||
860 | return ret; | ||
861 | again: | ||
862 | ret = 0; | ||
863 | i_done = 0; | ||
864 | |||
865 | /* step one, lock all the pages */ | ||
866 | for (i = 0; i < num_pages; i++) { | ||
867 | struct page *page; | ||
868 | page = grab_cache_page(inode->i_mapping, | ||
869 | start_index + i); | ||
870 | if (!page) | ||
871 | break; | ||
872 | |||
873 | if (!PageUptodate(page)) { | ||
874 | btrfs_readpage(NULL, page); | ||
875 | lock_page(page); | ||
876 | if (!PageUptodate(page)) { | ||
877 | unlock_page(page); | ||
878 | page_cache_release(page); | ||
879 | ret = -EIO; | ||
880 | break; | ||
881 | } | ||
882 | } | ||
883 | isize = i_size_read(inode); | ||
884 | file_end = (isize - 1) >> PAGE_CACHE_SHIFT; | ||
885 | if (!isize || page->index > file_end || | ||
886 | page->mapping != inode->i_mapping) { | ||
887 | /* whoops, we blew past eof, skip this page */ | ||
888 | unlock_page(page); | ||
889 | page_cache_release(page); | ||
890 | break; | ||
891 | } | ||
892 | pages[i] = page; | ||
893 | i_done++; | ||
894 | } | ||
895 | if (!i_done || ret) | ||
896 | goto out; | ||
897 | |||
898 | if (!(inode->i_sb->s_flags & MS_ACTIVE)) | ||
899 | goto out; | ||
900 | |||
901 | /* | ||
902 | * so now we have a nice long stream of locked | ||
903 | * and up to date pages, lets wait on them | ||
904 | */ | ||
905 | for (i = 0; i < i_done; i++) | ||
906 | wait_on_page_writeback(pages[i]); | ||
907 | |||
908 | page_start = page_offset(pages[0]); | ||
909 | page_end = page_offset(pages[i_done - 1]) + PAGE_CACHE_SIZE; | ||
910 | |||
911 | lock_extent_bits(&BTRFS_I(inode)->io_tree, | ||
912 | page_start, page_end - 1, 0, &cached_state, | ||
913 | GFP_NOFS); | ||
914 | ordered = btrfs_lookup_first_ordered_extent(inode, page_end - 1); | ||
915 | if (ordered && | ||
916 | ordered->file_offset + ordered->len > page_start && | ||
917 | ordered->file_offset < page_end) { | ||
918 | btrfs_put_ordered_extent(ordered); | ||
919 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, | ||
920 | page_start, page_end - 1, | ||
921 | &cached_state, GFP_NOFS); | ||
922 | for (i = 0; i < i_done; i++) { | ||
923 | unlock_page(pages[i]); | ||
924 | page_cache_release(pages[i]); | ||
925 | } | ||
926 | btrfs_wait_ordered_range(inode, page_start, | ||
927 | page_end - page_start); | ||
928 | goto again; | ||
929 | } | ||
930 | if (ordered) | ||
931 | btrfs_put_ordered_extent(ordered); | ||
932 | |||
933 | clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, | ||
934 | page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | | ||
935 | EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, | ||
936 | GFP_NOFS); | ||
937 | |||
938 | if (i_done != num_pages) { | ||
939 | atomic_inc(&BTRFS_I(inode)->outstanding_extents); | ||
940 | btrfs_delalloc_release_space(inode, | ||
941 | (num_pages - i_done) << PAGE_CACHE_SHIFT); | ||
942 | } | ||
943 | |||
944 | |||
945 | btrfs_set_extent_delalloc(inode, page_start, page_end - 1, | ||
946 | &cached_state); | ||
947 | |||
948 | unlock_extent_cached(&BTRFS_I(inode)->io_tree, | ||
949 | page_start, page_end - 1, &cached_state, | ||
950 | GFP_NOFS); | ||
951 | |||
952 | for (i = 0; i < i_done; i++) { | ||
953 | clear_page_dirty_for_io(pages[i]); | ||
954 | ClearPageChecked(pages[i]); | ||
955 | set_page_extent_mapped(pages[i]); | ||
956 | set_page_dirty(pages[i]); | ||
957 | unlock_page(pages[i]); | ||
958 | page_cache_release(pages[i]); | ||
959 | } | ||
960 | return i_done; | ||
961 | out: | ||
962 | for (i = 0; i < i_done; i++) { | ||
963 | unlock_page(pages[i]); | ||
964 | page_cache_release(pages[i]); | ||
965 | } | ||
966 | btrfs_delalloc_release_space(inode, num_pages << PAGE_CACHE_SHIFT); | ||
967 | return ret; | ||
968 | |||
969 | } | ||
970 | |||
971 | int btrfs_defrag_file(struct inode *inode, struct file *file, | ||
972 | struct btrfs_ioctl_defrag_range_args *range, | ||
973 | u64 newer_than, unsigned long max_to_defrag) | ||
974 | { | ||
975 | struct btrfs_root *root = BTRFS_I(inode)->root; | ||
737 | struct btrfs_super_block *disk_super; | 976 | struct btrfs_super_block *disk_super; |
977 | struct file_ra_state *ra = NULL; | ||
738 | unsigned long last_index; | 978 | unsigned long last_index; |
739 | unsigned long ra_pages = root->fs_info->bdi.ra_pages; | ||
740 | unsigned long total_read = 0; | ||
741 | u64 features; | 979 | u64 features; |
742 | u64 page_start; | ||
743 | u64 page_end; | ||
744 | u64 last_len = 0; | 980 | u64 last_len = 0; |
745 | u64 skip = 0; | 981 | u64 skip = 0; |
746 | u64 defrag_end = 0; | 982 | u64 defrag_end = 0; |
983 | u64 newer_off = range->start; | ||
984 | int newer_left = 0; | ||
747 | unsigned long i; | 985 | unsigned long i; |
748 | int ret; | 986 | int ret; |
987 | int defrag_count = 0; | ||
749 | int compress_type = BTRFS_COMPRESS_ZLIB; | 988 | int compress_type = BTRFS_COMPRESS_ZLIB; |
989 | int extent_thresh = range->extent_thresh; | ||
990 | int newer_cluster = (256 * 1024) >> PAGE_CACHE_SHIFT; | ||
991 | u64 new_align = ~((u64)128 * 1024 - 1); | ||
992 | struct page **pages = NULL; | ||
993 | |||
994 | if (extent_thresh == 0) | ||
995 | extent_thresh = 256 * 1024; | ||
750 | 996 | ||
751 | if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) { | 997 | if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) { |
752 | if (range->compress_type > BTRFS_COMPRESS_TYPES) | 998 | if (range->compress_type > BTRFS_COMPRESS_TYPES) |
@@ -758,6 +1004,27 @@ static int btrfs_defrag_file(struct file *file, | |||
758 | if (inode->i_size == 0) | 1004 | if (inode->i_size == 0) |
759 | return 0; | 1005 | return 0; |
760 | 1006 | ||
1007 | /* | ||
1008 | * if we were not given a file, allocate a readahead | ||
1009 | * context | ||
1010 | */ | ||
1011 | if (!file) { | ||
1012 | ra = kzalloc(sizeof(*ra), GFP_NOFS); | ||
1013 | if (!ra) | ||
1014 | return -ENOMEM; | ||
1015 | file_ra_state_init(ra, inode->i_mapping); | ||
1016 | } else { | ||
1017 | ra = &file->f_ra; | ||
1018 | } | ||
1019 | |||
1020 | pages = kmalloc(sizeof(struct page *) * newer_cluster, | ||
1021 | GFP_NOFS); | ||
1022 | if (!pages) { | ||
1023 | ret = -ENOMEM; | ||
1024 | goto out_ra; | ||
1025 | } | ||
1026 | |||
1027 | /* find the last page to defrag */ | ||
761 | if (range->start + range->len > range->start) { | 1028 | if (range->start + range->len > range->start) { |
762 | last_index = min_t(u64, inode->i_size - 1, | 1029 | last_index = min_t(u64, inode->i_size - 1, |
763 | range->start + range->len - 1) >> PAGE_CACHE_SHIFT; | 1030 | range->start + range->len - 1) >> PAGE_CACHE_SHIFT; |
@@ -765,11 +1032,37 @@ static int btrfs_defrag_file(struct file *file, | |||
765 | last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; | 1032 | last_index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT; |
766 | } | 1033 | } |
767 | 1034 | ||
768 | i = range->start >> PAGE_CACHE_SHIFT; | 1035 | if (newer_than) { |
769 | while (i <= last_index) { | 1036 | ret = find_new_extents(root, inode, newer_than, |
770 | if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, | 1037 | &newer_off, 64 * 1024); |
1038 | if (!ret) { | ||
1039 | range->start = newer_off; | ||
1040 | /* | ||
1041 | * we always align our defrag to help keep | ||
1042 | * the extents in the file evenly spaced | ||
1043 | */ | ||
1044 | i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; | ||
1045 | newer_left = newer_cluster; | ||
1046 | } else | ||
1047 | goto out_ra; | ||
1048 | } else { | ||
1049 | i = range->start >> PAGE_CACHE_SHIFT; | ||
1050 | } | ||
1051 | if (!max_to_defrag) | ||
1052 | max_to_defrag = last_index - 1; | ||
1053 | |||
1054 | while (i <= last_index && defrag_count < max_to_defrag) { | ||
1055 | /* | ||
1056 | * make sure we stop running if someone unmounts | ||
1057 | * the FS | ||
1058 | */ | ||
1059 | if (!(inode->i_sb->s_flags & MS_ACTIVE)) | ||
1060 | break; | ||
1061 | |||
1062 | if (!newer_than && | ||
1063 | !should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, | ||
771 | PAGE_CACHE_SIZE, | 1064 | PAGE_CACHE_SIZE, |
772 | range->extent_thresh, | 1065 | extent_thresh, |
773 | &last_len, &skip, | 1066 | &last_len, &skip, |
774 | &defrag_end)) { | 1067 | &defrag_end)) { |
775 | unsigned long next; | 1068 | unsigned long next; |
@@ -781,92 +1074,39 @@ static int btrfs_defrag_file(struct file *file, | |||
781 | i = max(i + 1, next); | 1074 | i = max(i + 1, next); |
782 | continue; | 1075 | continue; |
783 | } | 1076 | } |
784 | |||
785 | if (total_read % ra_pages == 0) { | ||
786 | btrfs_force_ra(inode->i_mapping, &file->f_ra, file, i, | ||
787 | min(last_index, i + ra_pages - 1)); | ||
788 | } | ||
789 | total_read++; | ||
790 | mutex_lock(&inode->i_mutex); | ||
791 | if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) | 1077 | if (range->flags & BTRFS_DEFRAG_RANGE_COMPRESS) |
792 | BTRFS_I(inode)->force_compress = compress_type; | 1078 | BTRFS_I(inode)->force_compress = compress_type; |
793 | 1079 | ||
794 | ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); | 1080 | btrfs_force_ra(inode->i_mapping, ra, file, i, newer_cluster); |
795 | if (ret) | ||
796 | goto err_unlock; | ||
797 | again: | ||
798 | if (inode->i_size == 0 || | ||
799 | i > ((inode->i_size - 1) >> PAGE_CACHE_SHIFT)) { | ||
800 | ret = 0; | ||
801 | goto err_reservations; | ||
802 | } | ||
803 | 1081 | ||
804 | page = grab_cache_page(inode->i_mapping, i); | 1082 | ret = cluster_pages_for_defrag(inode, pages, i, newer_cluster); |
805 | if (!page) { | 1083 | if (ret < 0) |
806 | ret = -ENOMEM; | 1084 | goto out_ra; |
807 | goto err_reservations; | ||
808 | } | ||
809 | |||
810 | if (!PageUptodate(page)) { | ||
811 | btrfs_readpage(NULL, page); | ||
812 | lock_page(page); | ||
813 | if (!PageUptodate(page)) { | ||
814 | unlock_page(page); | ||
815 | page_cache_release(page); | ||
816 | ret = -EIO; | ||
817 | goto err_reservations; | ||
818 | } | ||
819 | } | ||
820 | |||
821 | if (page->mapping != inode->i_mapping) { | ||
822 | unlock_page(page); | ||
823 | page_cache_release(page); | ||
824 | goto again; | ||
825 | } | ||
826 | 1085 | ||
827 | wait_on_page_writeback(page); | 1086 | defrag_count += ret; |
1087 | balance_dirty_pages_ratelimited_nr(inode->i_mapping, ret); | ||
1088 | i += ret; | ||
828 | 1089 | ||
829 | if (PageDirty(page)) { | 1090 | if (newer_than) { |
830 | btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); | 1091 | if (newer_off == (u64)-1) |
831 | goto loop_unlock; | 1092 | break; |
832 | } | ||
833 | |||
834 | page_start = (u64)page->index << PAGE_CACHE_SHIFT; | ||
835 | page_end = page_start + PAGE_CACHE_SIZE - 1; | ||
836 | lock_extent(io_tree, page_start, page_end, GFP_NOFS); | ||
837 | 1093 | ||
838 | ordered = btrfs_lookup_ordered_extent(inode, page_start); | 1094 | newer_off = max(newer_off + 1, |
839 | if (ordered) { | 1095 | (u64)i << PAGE_CACHE_SHIFT); |
840 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | 1096 | |
841 | unlock_page(page); | 1097 | ret = find_new_extents(root, inode, |
842 | page_cache_release(page); | 1098 | newer_than, &newer_off, |
843 | btrfs_start_ordered_extent(inode, ordered, 1); | 1099 | 64 * 1024); |
844 | btrfs_put_ordered_extent(ordered); | 1100 | if (!ret) { |
845 | goto again; | 1101 | range->start = newer_off; |
1102 | i = (newer_off & new_align) >> PAGE_CACHE_SHIFT; | ||
1103 | newer_left = newer_cluster; | ||
1104 | } else { | ||
1105 | break; | ||
1106 | } | ||
1107 | } else { | ||
1108 | i++; | ||
846 | } | 1109 | } |
847 | set_page_extent_mapped(page); | ||
848 | |||
849 | /* | ||
850 | * this makes sure page_mkwrite is called on the | ||
851 | * page if it is dirtied again later | ||
852 | */ | ||
853 | clear_page_dirty_for_io(page); | ||
854 | clear_extent_bits(&BTRFS_I(inode)->io_tree, page_start, | ||
855 | page_end, EXTENT_DIRTY | EXTENT_DELALLOC | | ||
856 | EXTENT_DO_ACCOUNTING, GFP_NOFS); | ||
857 | |||
858 | btrfs_set_extent_delalloc(inode, page_start, page_end, NULL); | ||
859 | ClearPageChecked(page); | ||
860 | set_page_dirty(page); | ||
861 | unlock_extent(io_tree, page_start, page_end, GFP_NOFS); | ||
862 | |||
863 | loop_unlock: | ||
864 | unlock_page(page); | ||
865 | page_cache_release(page); | ||
866 | mutex_unlock(&inode->i_mutex); | ||
867 | |||
868 | balance_dirty_pages_ratelimited_nr(inode->i_mapping, 1); | ||
869 | i++; | ||
870 | } | 1110 | } |
871 | 1111 | ||
872 | if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) | 1112 | if ((range->flags & BTRFS_DEFRAG_RANGE_START_IO)) |
@@ -898,12 +1138,14 @@ loop_unlock: | |||
898 | btrfs_set_super_incompat_flags(disk_super, features); | 1138 | btrfs_set_super_incompat_flags(disk_super, features); |
899 | } | 1139 | } |
900 | 1140 | ||
901 | return 0; | 1141 | if (!file) |
1142 | kfree(ra); | ||
1143 | return defrag_count; | ||
902 | 1144 | ||
903 | err_reservations: | 1145 | out_ra: |
904 | btrfs_delalloc_release_space(inode, PAGE_CACHE_SIZE); | 1146 | if (!file) |
905 | err_unlock: | 1147 | kfree(ra); |
906 | mutex_unlock(&inode->i_mutex); | 1148 | kfree(pages); |
907 | return ret; | 1149 | return ret; |
908 | } | 1150 | } |
909 | 1151 | ||
@@ -1129,7 +1371,7 @@ static noinline int btrfs_ioctl_subvol_getflags(struct file *file, | |||
1129 | int ret = 0; | 1371 | int ret = 0; |
1130 | u64 flags = 0; | 1372 | u64 flags = 0; |
1131 | 1373 | ||
1132 | if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) | 1374 | if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) |
1133 | return -EINVAL; | 1375 | return -EINVAL; |
1134 | 1376 | ||
1135 | down_read(&root->fs_info->subvol_sem); | 1377 | down_read(&root->fs_info->subvol_sem); |
@@ -1156,7 +1398,7 @@ static noinline int btrfs_ioctl_subvol_setflags(struct file *file, | |||
1156 | if (root->fs_info->sb->s_flags & MS_RDONLY) | 1398 | if (root->fs_info->sb->s_flags & MS_RDONLY) |
1157 | return -EROFS; | 1399 | return -EROFS; |
1158 | 1400 | ||
1159 | if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) | 1401 | if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) |
1160 | return -EINVAL; | 1402 | return -EINVAL; |
1161 | 1403 | ||
1162 | if (copy_from_user(&flags, arg, sizeof(flags))) | 1404 | if (copy_from_user(&flags, arg, sizeof(flags))) |
@@ -1279,7 +1521,6 @@ static noinline int copy_to_sk(struct btrfs_root *root, | |||
1279 | int nritems; | 1521 | int nritems; |
1280 | int i; | 1522 | int i; |
1281 | int slot; | 1523 | int slot; |
1282 | int found = 0; | ||
1283 | int ret = 0; | 1524 | int ret = 0; |
1284 | 1525 | ||
1285 | leaf = path->nodes[0]; | 1526 | leaf = path->nodes[0]; |
@@ -1326,7 +1567,7 @@ static noinline int copy_to_sk(struct btrfs_root *root, | |||
1326 | item_off, item_len); | 1567 | item_off, item_len); |
1327 | *sk_offset += item_len; | 1568 | *sk_offset += item_len; |
1328 | } | 1569 | } |
1329 | found++; | 1570 | (*num_found)++; |
1330 | 1571 | ||
1331 | if (*num_found >= sk->nr_items) | 1572 | if (*num_found >= sk->nr_items) |
1332 | break; | 1573 | break; |
@@ -1345,7 +1586,6 @@ advance_key: | |||
1345 | } else | 1586 | } else |
1346 | ret = 1; | 1587 | ret = 1; |
1347 | overflow: | 1588 | overflow: |
1348 | *num_found += found; | ||
1349 | return ret; | 1589 | return ret; |
1350 | } | 1590 | } |
1351 | 1591 | ||
@@ -1402,7 +1642,7 @@ static noinline int search_ioctl(struct inode *inode, | |||
1402 | } | 1642 | } |
1403 | ret = copy_to_sk(root, path, &key, sk, args->buf, | 1643 | ret = copy_to_sk(root, path, &key, sk, args->buf, |
1404 | &sk_offset, &num_found); | 1644 | &sk_offset, &num_found); |
1405 | btrfs_release_path(root, path); | 1645 | btrfs_release_path(path); |
1406 | if (ret || num_found >= sk->nr_items) | 1646 | if (ret || num_found >= sk->nr_items) |
1407 | break; | 1647 | break; |
1408 | 1648 | ||
@@ -1509,7 +1749,7 @@ static noinline int btrfs_search_path_in_tree(struct btrfs_fs_info *info, | |||
1509 | if (key.offset == BTRFS_FIRST_FREE_OBJECTID) | 1749 | if (key.offset == BTRFS_FIRST_FREE_OBJECTID) |
1510 | break; | 1750 | break; |
1511 | 1751 | ||
1512 | btrfs_release_path(root, path); | 1752 | btrfs_release_path(path); |
1513 | key.objectid = key.offset; | 1753 | key.objectid = key.offset; |
1514 | key.offset = (u64)-1; | 1754 | key.offset = (u64)-1; |
1515 | dirid = key.objectid; | 1755 | dirid = key.objectid; |
@@ -1639,7 +1879,7 @@ static noinline int btrfs_ioctl_snap_destroy(struct file *file, | |||
1639 | goto out_dput; | 1879 | goto out_dput; |
1640 | } | 1880 | } |
1641 | 1881 | ||
1642 | if (inode->i_ino != BTRFS_FIRST_FREE_OBJECTID) { | 1882 | if (btrfs_ino(inode) != BTRFS_FIRST_FREE_OBJECTID) { |
1643 | err = -EINVAL; | 1883 | err = -EINVAL; |
1644 | goto out_dput; | 1884 | goto out_dput; |
1645 | } | 1885 | } |
@@ -1757,7 +1997,10 @@ static int btrfs_ioctl_defrag(struct file *file, void __user *argp) | |||
1757 | /* the rest are all set to zero by kzalloc */ | 1997 | /* the rest are all set to zero by kzalloc */ |
1758 | range->len = (u64)-1; | 1998 | range->len = (u64)-1; |
1759 | } | 1999 | } |
1760 | ret = btrfs_defrag_file(file, range); | 2000 | ret = btrfs_defrag_file(fdentry(file)->d_inode, file, |
2001 | range, 0, 0); | ||
2002 | if (ret > 0) | ||
2003 | ret = 0; | ||
1761 | kfree(range); | 2004 | kfree(range); |
1762 | break; | 2005 | break; |
1763 | default: | 2006 | default: |
@@ -1809,6 +2052,75 @@ static long btrfs_ioctl_rm_dev(struct btrfs_root *root, void __user *arg) | |||
1809 | return ret; | 2052 | return ret; |
1810 | } | 2053 | } |
1811 | 2054 | ||
2055 | static long btrfs_ioctl_fs_info(struct btrfs_root *root, void __user *arg) | ||
2056 | { | ||
2057 | struct btrfs_ioctl_fs_info_args fi_args; | ||
2058 | struct btrfs_device *device; | ||
2059 | struct btrfs_device *next; | ||
2060 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; | ||
2061 | |||
2062 | if (!capable(CAP_SYS_ADMIN)) | ||
2063 | return -EPERM; | ||
2064 | |||
2065 | fi_args.num_devices = fs_devices->num_devices; | ||
2066 | fi_args.max_id = 0; | ||
2067 | memcpy(&fi_args.fsid, root->fs_info->fsid, sizeof(fi_args.fsid)); | ||
2068 | |||
2069 | mutex_lock(&fs_devices->device_list_mutex); | ||
2070 | list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { | ||
2071 | if (device->devid > fi_args.max_id) | ||
2072 | fi_args.max_id = device->devid; | ||
2073 | } | ||
2074 | mutex_unlock(&fs_devices->device_list_mutex); | ||
2075 | |||
2076 | if (copy_to_user(arg, &fi_args, sizeof(fi_args))) | ||
2077 | return -EFAULT; | ||
2078 | |||
2079 | return 0; | ||
2080 | } | ||
2081 | |||
2082 | static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg) | ||
2083 | { | ||
2084 | struct btrfs_ioctl_dev_info_args *di_args; | ||
2085 | struct btrfs_device *dev; | ||
2086 | struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; | ||
2087 | int ret = 0; | ||
2088 | char *s_uuid = NULL; | ||
2089 | char empty_uuid[BTRFS_UUID_SIZE] = {0}; | ||
2090 | |||
2091 | if (!capable(CAP_SYS_ADMIN)) | ||
2092 | return -EPERM; | ||
2093 | |||
2094 | di_args = memdup_user(arg, sizeof(*di_args)); | ||
2095 | if (IS_ERR(di_args)) | ||
2096 | return PTR_ERR(di_args); | ||
2097 | |||
2098 | if (memcmp(empty_uuid, di_args->uuid, BTRFS_UUID_SIZE) != 0) | ||
2099 | s_uuid = di_args->uuid; | ||
2100 | |||
2101 | mutex_lock(&fs_devices->device_list_mutex); | ||
2102 | dev = btrfs_find_device(root, di_args->devid, s_uuid, NULL); | ||
2103 | mutex_unlock(&fs_devices->device_list_mutex); | ||
2104 | |||
2105 | if (!dev) { | ||
2106 | ret = -ENODEV; | ||
2107 | goto out; | ||
2108 | } | ||
2109 | |||
2110 | di_args->devid = dev->devid; | ||
2111 | di_args->bytes_used = dev->bytes_used; | ||
2112 | di_args->total_bytes = dev->total_bytes; | ||
2113 | memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); | ||
2114 | strncpy(di_args->path, dev->name, sizeof(di_args->path)); | ||
2115 | |||
2116 | out: | ||
2117 | if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args))) | ||
2118 | ret = -EFAULT; | ||
2119 | |||
2120 | kfree(di_args); | ||
2121 | return ret; | ||
2122 | } | ||
2123 | |||
1812 | static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | 2124 | static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, |
1813 | u64 off, u64 olen, u64 destoff) | 2125 | u64 off, u64 olen, u64 destoff) |
1814 | { | 2126 | { |
@@ -1925,7 +2237,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1925 | } | 2237 | } |
1926 | 2238 | ||
1927 | /* clone data */ | 2239 | /* clone data */ |
1928 | key.objectid = src->i_ino; | 2240 | key.objectid = btrfs_ino(src); |
1929 | key.type = BTRFS_EXTENT_DATA_KEY; | 2241 | key.type = BTRFS_EXTENT_DATA_KEY; |
1930 | key.offset = 0; | 2242 | key.offset = 0; |
1931 | 2243 | ||
@@ -1952,7 +2264,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1952 | 2264 | ||
1953 | btrfs_item_key_to_cpu(leaf, &key, slot); | 2265 | btrfs_item_key_to_cpu(leaf, &key, slot); |
1954 | if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || | 2266 | if (btrfs_key_type(&key) > BTRFS_EXTENT_DATA_KEY || |
1955 | key.objectid != src->i_ino) | 2267 | key.objectid != btrfs_ino(src)) |
1956 | break; | 2268 | break; |
1957 | 2269 | ||
1958 | if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) { | 2270 | if (btrfs_key_type(&key) == BTRFS_EXTENT_DATA_KEY) { |
@@ -1988,14 +2300,14 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
1988 | datal = btrfs_file_extent_ram_bytes(leaf, | 2300 | datal = btrfs_file_extent_ram_bytes(leaf, |
1989 | extent); | 2301 | extent); |
1990 | } | 2302 | } |
1991 | btrfs_release_path(root, path); | 2303 | btrfs_release_path(path); |
1992 | 2304 | ||
1993 | if (key.offset + datal <= off || | 2305 | if (key.offset + datal <= off || |
1994 | key.offset >= off+len) | 2306 | key.offset >= off+len) |
1995 | goto next; | 2307 | goto next; |
1996 | 2308 | ||
1997 | memcpy(&new_key, &key, sizeof(new_key)); | 2309 | memcpy(&new_key, &key, sizeof(new_key)); |
1998 | new_key.objectid = inode->i_ino; | 2310 | new_key.objectid = btrfs_ino(inode); |
1999 | if (off <= key.offset) | 2311 | if (off <= key.offset) |
2000 | new_key.offset = key.offset + destoff - off; | 2312 | new_key.offset = key.offset + destoff - off; |
2001 | else | 2313 | else |
@@ -2049,7 +2361,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
2049 | ret = btrfs_inc_extent_ref(trans, root, | 2361 | ret = btrfs_inc_extent_ref(trans, root, |
2050 | disko, diskl, 0, | 2362 | disko, diskl, 0, |
2051 | root->root_key.objectid, | 2363 | root->root_key.objectid, |
2052 | inode->i_ino, | 2364 | btrfs_ino(inode), |
2053 | new_key.offset - datao); | 2365 | new_key.offset - datao); |
2054 | BUG_ON(ret); | 2366 | BUG_ON(ret); |
2055 | } | 2367 | } |
@@ -2098,7 +2410,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
2098 | } | 2410 | } |
2099 | 2411 | ||
2100 | btrfs_mark_buffer_dirty(leaf); | 2412 | btrfs_mark_buffer_dirty(leaf); |
2101 | btrfs_release_path(root, path); | 2413 | btrfs_release_path(path); |
2102 | 2414 | ||
2103 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; | 2415 | inode->i_mtime = inode->i_ctime = CURRENT_TIME; |
2104 | 2416 | ||
@@ -2119,12 +2431,12 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, | |||
2119 | btrfs_end_transaction(trans, root); | 2431 | btrfs_end_transaction(trans, root); |
2120 | } | 2432 | } |
2121 | next: | 2433 | next: |
2122 | btrfs_release_path(root, path); | 2434 | btrfs_release_path(path); |
2123 | key.offset++; | 2435 | key.offset++; |
2124 | } | 2436 | } |
2125 | ret = 0; | 2437 | ret = 0; |
2126 | out: | 2438 | out: |
2127 | btrfs_release_path(root, path); | 2439 | btrfs_release_path(path); |
2128 | unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); | 2440 | unlock_extent(&BTRFS_I(src)->io_tree, off, off+len, GFP_NOFS); |
2129 | out_unlock: | 2441 | out_unlock: |
2130 | mutex_unlock(&src->i_mutex); | 2442 | mutex_unlock(&src->i_mutex); |
@@ -2471,6 +2783,58 @@ static noinline long btrfs_ioctl_wait_sync(struct file *file, void __user *argp) | |||
2471 | return btrfs_wait_for_commit(root, transid); | 2783 | return btrfs_wait_for_commit(root, transid); |
2472 | } | 2784 | } |
2473 | 2785 | ||
2786 | static long btrfs_ioctl_scrub(struct btrfs_root *root, void __user *arg) | ||
2787 | { | ||
2788 | int ret; | ||
2789 | struct btrfs_ioctl_scrub_args *sa; | ||
2790 | |||
2791 | if (!capable(CAP_SYS_ADMIN)) | ||
2792 | return -EPERM; | ||
2793 | |||
2794 | sa = memdup_user(arg, sizeof(*sa)); | ||
2795 | if (IS_ERR(sa)) | ||
2796 | return PTR_ERR(sa); | ||
2797 | |||
2798 | ret = btrfs_scrub_dev(root, sa->devid, sa->start, sa->end, | ||
2799 | &sa->progress, sa->flags & BTRFS_SCRUB_READONLY); | ||
2800 | |||
2801 | if (copy_to_user(arg, sa, sizeof(*sa))) | ||
2802 | ret = -EFAULT; | ||
2803 | |||
2804 | kfree(sa); | ||
2805 | return ret; | ||
2806 | } | ||
2807 | |||
2808 | static long btrfs_ioctl_scrub_cancel(struct btrfs_root *root, void __user *arg) | ||
2809 | { | ||
2810 | if (!capable(CAP_SYS_ADMIN)) | ||
2811 | return -EPERM; | ||
2812 | |||
2813 | return btrfs_scrub_cancel(root); | ||
2814 | } | ||
2815 | |||
2816 | static long btrfs_ioctl_scrub_progress(struct btrfs_root *root, | ||
2817 | void __user *arg) | ||
2818 | { | ||
2819 | struct btrfs_ioctl_scrub_args *sa; | ||
2820 | int ret; | ||
2821 | |||
2822 | if (!capable(CAP_SYS_ADMIN)) | ||
2823 | return -EPERM; | ||
2824 | |||
2825 | sa = memdup_user(arg, sizeof(*sa)); | ||
2826 | if (IS_ERR(sa)) | ||
2827 | return PTR_ERR(sa); | ||
2828 | |||
2829 | ret = btrfs_scrub_progress(root, sa->devid, &sa->progress); | ||
2830 | |||
2831 | if (copy_to_user(arg, sa, sizeof(*sa))) | ||
2832 | ret = -EFAULT; | ||
2833 | |||
2834 | kfree(sa); | ||
2835 | return ret; | ||
2836 | } | ||
2837 | |||
2474 | long btrfs_ioctl(struct file *file, unsigned int | 2838 | long btrfs_ioctl(struct file *file, unsigned int |
2475 | cmd, unsigned long arg) | 2839 | cmd, unsigned long arg) |
2476 | { | 2840 | { |
@@ -2510,6 +2874,10 @@ long btrfs_ioctl(struct file *file, unsigned int | |||
2510 | return btrfs_ioctl_add_dev(root, argp); | 2874 | return btrfs_ioctl_add_dev(root, argp); |
2511 | case BTRFS_IOC_RM_DEV: | 2875 | case BTRFS_IOC_RM_DEV: |
2512 | return btrfs_ioctl_rm_dev(root, argp); | 2876 | return btrfs_ioctl_rm_dev(root, argp); |
2877 | case BTRFS_IOC_FS_INFO: | ||
2878 | return btrfs_ioctl_fs_info(root, argp); | ||
2879 | case BTRFS_IOC_DEV_INFO: | ||
2880 | return btrfs_ioctl_dev_info(root, argp); | ||
2513 | case BTRFS_IOC_BALANCE: | 2881 | case BTRFS_IOC_BALANCE: |
2514 | return btrfs_balance(root->fs_info->dev_root); | 2882 | return btrfs_balance(root->fs_info->dev_root); |
2515 | case BTRFS_IOC_CLONE: | 2883 | case BTRFS_IOC_CLONE: |
@@ -2533,6 +2901,12 @@ long btrfs_ioctl(struct file *file, unsigned int | |||
2533 | return btrfs_ioctl_start_sync(file, argp); | 2901 | return btrfs_ioctl_start_sync(file, argp); |
2534 | case BTRFS_IOC_WAIT_SYNC: | 2902 | case BTRFS_IOC_WAIT_SYNC: |
2535 | return btrfs_ioctl_wait_sync(file, argp); | 2903 | return btrfs_ioctl_wait_sync(file, argp); |
2904 | case BTRFS_IOC_SCRUB: | ||
2905 | return btrfs_ioctl_scrub(root, argp); | ||
2906 | case BTRFS_IOC_SCRUB_CANCEL: | ||
2907 | return btrfs_ioctl_scrub_cancel(root, argp); | ||
2908 | case BTRFS_IOC_SCRUB_PROGRESS: | ||
2909 | return btrfs_ioctl_scrub_progress(root, argp); | ||
2536 | } | 2910 | } |
2537 | 2911 | ||
2538 | return -ENOTTY; | 2912 | return -ENOTTY; |
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h index 8fb382167b13..ad1ea789fcb4 100644 --- a/fs/btrfs/ioctl.h +++ b/fs/btrfs/ioctl.h | |||
@@ -32,6 +32,8 @@ struct btrfs_ioctl_vol_args { | |||
32 | 32 | ||
33 | #define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) | 33 | #define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0) |
34 | #define BTRFS_SUBVOL_RDONLY (1ULL << 1) | 34 | #define BTRFS_SUBVOL_RDONLY (1ULL << 1) |
35 | #define BTRFS_FSID_SIZE 16 | ||
36 | #define BTRFS_UUID_SIZE 16 | ||
35 | 37 | ||
36 | #define BTRFS_SUBVOL_NAME_MAX 4039 | 38 | #define BTRFS_SUBVOL_NAME_MAX 4039 |
37 | struct btrfs_ioctl_vol_args_v2 { | 39 | struct btrfs_ioctl_vol_args_v2 { |
@@ -42,6 +44,71 @@ struct btrfs_ioctl_vol_args_v2 { | |||
42 | char name[BTRFS_SUBVOL_NAME_MAX + 1]; | 44 | char name[BTRFS_SUBVOL_NAME_MAX + 1]; |
43 | }; | 45 | }; |
44 | 46 | ||
47 | /* | ||
48 | * structure to report errors and progress to userspace, either as a | ||
49 | * result of a finished scrub, a canceled scrub or a progress inquiry | ||
50 | */ | ||
51 | struct btrfs_scrub_progress { | ||
52 | __u64 data_extents_scrubbed; /* # of data extents scrubbed */ | ||
53 | __u64 tree_extents_scrubbed; /* # of tree extents scrubbed */ | ||
54 | __u64 data_bytes_scrubbed; /* # of data bytes scrubbed */ | ||
55 | __u64 tree_bytes_scrubbed; /* # of tree bytes scrubbed */ | ||
56 | __u64 read_errors; /* # of read errors encountered (EIO) */ | ||
57 | __u64 csum_errors; /* # of failed csum checks */ | ||
58 | __u64 verify_errors; /* # of occurences, where the metadata | ||
59 | * of a tree block did not match the | ||
60 | * expected values, like generation or | ||
61 | * logical */ | ||
62 | __u64 no_csum; /* # of 4k data block for which no csum | ||
63 | * is present, probably the result of | ||
64 | * data written with nodatasum */ | ||
65 | __u64 csum_discards; /* # of csum for which no data was found | ||
66 | * in the extent tree. */ | ||
67 | __u64 super_errors; /* # of bad super blocks encountered */ | ||
68 | __u64 malloc_errors; /* # of internal kmalloc errors. These | ||
69 | * will likely cause an incomplete | ||
70 | * scrub */ | ||
71 | __u64 uncorrectable_errors; /* # of errors where either no intact | ||
72 | * copy was found or the writeback | ||
73 | * failed */ | ||
74 | __u64 corrected_errors; /* # of errors corrected */ | ||
75 | __u64 last_physical; /* last physical address scrubbed. In | ||
76 | * case a scrub was aborted, this can | ||
77 | * be used to restart the scrub */ | ||
78 | __u64 unverified_errors; /* # of occurences where a read for a | ||
79 | * full (64k) bio failed, but the re- | ||
80 | * check succeeded for each 4k piece. | ||
81 | * Intermittent error. */ | ||
82 | }; | ||
83 | |||
84 | #define BTRFS_SCRUB_READONLY 1 | ||
85 | struct btrfs_ioctl_scrub_args { | ||
86 | __u64 devid; /* in */ | ||
87 | __u64 start; /* in */ | ||
88 | __u64 end; /* in */ | ||
89 | __u64 flags; /* in */ | ||
90 | struct btrfs_scrub_progress progress; /* out */ | ||
91 | /* pad to 1k */ | ||
92 | __u64 unused[(1024-32-sizeof(struct btrfs_scrub_progress))/8]; | ||
93 | }; | ||
94 | |||
95 | #define BTRFS_DEVICE_PATH_NAME_MAX 1024 | ||
96 | struct btrfs_ioctl_dev_info_args { | ||
97 | __u64 devid; /* in/out */ | ||
98 | __u8 uuid[BTRFS_UUID_SIZE]; /* in/out */ | ||
99 | __u64 bytes_used; /* out */ | ||
100 | __u64 total_bytes; /* out */ | ||
101 | __u64 unused[379]; /* pad to 4k */ | ||
102 | __u8 path[BTRFS_DEVICE_PATH_NAME_MAX]; /* out */ | ||
103 | }; | ||
104 | |||
105 | struct btrfs_ioctl_fs_info_args { | ||
106 | __u64 max_id; /* out */ | ||
107 | __u64 num_devices; /* out */ | ||
108 | __u8 fsid[BTRFS_FSID_SIZE]; /* out */ | ||
109 | __u64 reserved[124]; /* pad to 1k */ | ||
110 | }; | ||
111 | |||
45 | #define BTRFS_INO_LOOKUP_PATH_MAX 4080 | 112 | #define BTRFS_INO_LOOKUP_PATH_MAX 4080 |
46 | struct btrfs_ioctl_ino_lookup_args { | 113 | struct btrfs_ioctl_ino_lookup_args { |
47 | __u64 treeid; | 114 | __u64 treeid; |
@@ -114,37 +181,6 @@ struct btrfs_ioctl_clone_range_args { | |||
114 | #define BTRFS_DEFRAG_RANGE_COMPRESS 1 | 181 | #define BTRFS_DEFRAG_RANGE_COMPRESS 1 |
115 | #define BTRFS_DEFRAG_RANGE_START_IO 2 | 182 | #define BTRFS_DEFRAG_RANGE_START_IO 2 |
116 | 183 | ||
117 | struct btrfs_ioctl_defrag_range_args { | ||
118 | /* start of the defrag operation */ | ||
119 | __u64 start; | ||
120 | |||
121 | /* number of bytes to defrag, use (u64)-1 to say all */ | ||
122 | __u64 len; | ||
123 | |||
124 | /* | ||
125 | * flags for the operation, which can include turning | ||
126 | * on compression for this one defrag | ||
127 | */ | ||
128 | __u64 flags; | ||
129 | |||
130 | /* | ||
131 | * any extent bigger than this will be considered | ||
132 | * already defragged. Use 0 to take the kernel default | ||
133 | * Use 1 to say every single extent must be rewritten | ||
134 | */ | ||
135 | __u32 extent_thresh; | ||
136 | |||
137 | /* | ||
138 | * which compression method to use if turning on compression | ||
139 | * for this defrag operation. If unspecified, zlib will | ||
140 | * be used | ||
141 | */ | ||
142 | __u32 compress_type; | ||
143 | |||
144 | /* spare for later */ | ||
145 | __u32 unused[4]; | ||
146 | }; | ||
147 | |||
148 | struct btrfs_ioctl_space_info { | 184 | struct btrfs_ioctl_space_info { |
149 | __u64 flags; | 185 | __u64 flags; |
150 | __u64 total_bytes; | 186 | __u64 total_bytes; |
@@ -203,4 +239,13 @@ struct btrfs_ioctl_space_args { | |||
203 | struct btrfs_ioctl_vol_args_v2) | 239 | struct btrfs_ioctl_vol_args_v2) |
204 | #define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64) | 240 | #define BTRFS_IOC_SUBVOL_GETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 25, __u64) |
205 | #define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64) | 241 | #define BTRFS_IOC_SUBVOL_SETFLAGS _IOW(BTRFS_IOCTL_MAGIC, 26, __u64) |
242 | #define BTRFS_IOC_SCRUB _IOWR(BTRFS_IOCTL_MAGIC, 27, \ | ||
243 | struct btrfs_ioctl_scrub_args) | ||
244 | #define BTRFS_IOC_SCRUB_CANCEL _IO(BTRFS_IOCTL_MAGIC, 28) | ||
245 | #define BTRFS_IOC_SCRUB_PROGRESS _IOWR(BTRFS_IOCTL_MAGIC, 29, \ | ||
246 | struct btrfs_ioctl_scrub_args) | ||
247 | #define BTRFS_IOC_DEV_INFO _IOWR(BTRFS_IOCTL_MAGIC, 30, \ | ||
248 | struct btrfs_ioctl_dev_info_args) | ||
249 | #define BTRFS_IOC_FS_INFO _IOR(BTRFS_IOCTL_MAGIC, 31, \ | ||
250 | struct btrfs_ioctl_fs_info_args) | ||
206 | #endif | 251 | #endif |
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c index 6151f2ea38bb..66fa43dc3f0f 100644 --- a/fs/btrfs/locking.c +++ b/fs/btrfs/locking.c | |||
@@ -185,31 +185,6 @@ sleep: | |||
185 | return 0; | 185 | return 0; |
186 | } | 186 | } |
187 | 187 | ||
188 | /* | ||
189 | * Very quick trylock, this does not spin or schedule. It returns | ||
190 | * 1 with the spinlock held if it was able to take the lock, or it | ||
191 | * returns zero if it was unable to take the lock. | ||
192 | * | ||
193 | * After this call, scheduling is not safe without first calling | ||
194 | * btrfs_set_lock_blocking() | ||
195 | */ | ||
196 | int btrfs_try_tree_lock(struct extent_buffer *eb) | ||
197 | { | ||
198 | if (spin_trylock(&eb->lock)) { | ||
199 | if (test_bit(EXTENT_BUFFER_BLOCKING, &eb->bflags)) { | ||
200 | /* | ||
201 | * we've got the spinlock, but the real owner is | ||
202 | * blocking. Drop the spinlock and return failure | ||
203 | */ | ||
204 | spin_unlock(&eb->lock); | ||
205 | return 0; | ||
206 | } | ||
207 | return 1; | ||
208 | } | ||
209 | /* someone else has the spinlock giveup */ | ||
210 | return 0; | ||
211 | } | ||
212 | |||
213 | int btrfs_tree_unlock(struct extent_buffer *eb) | 188 | int btrfs_tree_unlock(struct extent_buffer *eb) |
214 | { | 189 | { |
215 | /* | 190 | /* |
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h index 6c4ce457168c..5c33a560a2f1 100644 --- a/fs/btrfs/locking.h +++ b/fs/btrfs/locking.h | |||
@@ -21,8 +21,6 @@ | |||
21 | 21 | ||
22 | int btrfs_tree_lock(struct extent_buffer *eb); | 22 | int btrfs_tree_lock(struct extent_buffer *eb); |
23 | int btrfs_tree_unlock(struct extent_buffer *eb); | 23 | int btrfs_tree_unlock(struct extent_buffer *eb); |
24 | |||
25 | int btrfs_try_tree_lock(struct extent_buffer *eb); | ||
26 | int btrfs_try_spin_lock(struct extent_buffer *eb); | 24 | int btrfs_try_spin_lock(struct extent_buffer *eb); |
27 | 25 | ||
28 | void btrfs_set_lock_blocking(struct extent_buffer *eb); | 26 | void btrfs_set_lock_blocking(struct extent_buffer *eb); |
diff --git a/fs/btrfs/ref-cache.c b/fs/btrfs/ref-cache.c index a97314cf6bd6..82d569cb6267 100644 --- a/fs/btrfs/ref-cache.c +++ b/fs/btrfs/ref-cache.c | |||
@@ -23,56 +23,6 @@ | |||
23 | #include "ref-cache.h" | 23 | #include "ref-cache.h" |
24 | #include "transaction.h" | 24 | #include "transaction.h" |
25 | 25 | ||
26 | /* | ||
27 | * leaf refs are used to cache the information about which extents | ||
28 | * a given leaf has references on. This allows us to process that leaf | ||
29 | * in btrfs_drop_snapshot without needing to read it back from disk. | ||
30 | */ | ||
31 | |||
32 | /* | ||
33 | * kmalloc a leaf reference struct and update the counters for the | ||
34 | * total ref cache size | ||
35 | */ | ||
36 | struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, | ||
37 | int nr_extents) | ||
38 | { | ||
39 | struct btrfs_leaf_ref *ref; | ||
40 | size_t size = btrfs_leaf_ref_size(nr_extents); | ||
41 | |||
42 | ref = kmalloc(size, GFP_NOFS); | ||
43 | if (ref) { | ||
44 | spin_lock(&root->fs_info->ref_cache_lock); | ||
45 | root->fs_info->total_ref_cache_size += size; | ||
46 | spin_unlock(&root->fs_info->ref_cache_lock); | ||
47 | |||
48 | memset(ref, 0, sizeof(*ref)); | ||
49 | atomic_set(&ref->usage, 1); | ||
50 | INIT_LIST_HEAD(&ref->list); | ||
51 | } | ||
52 | return ref; | ||
53 | } | ||
54 | |||
55 | /* | ||
56 | * free a leaf reference struct and update the counters for the | ||
57 | * total ref cache size | ||
58 | */ | ||
59 | void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) | ||
60 | { | ||
61 | if (!ref) | ||
62 | return; | ||
63 | WARN_ON(atomic_read(&ref->usage) == 0); | ||
64 | if (atomic_dec_and_test(&ref->usage)) { | ||
65 | size_t size = btrfs_leaf_ref_size(ref->nritems); | ||
66 | |||
67 | BUG_ON(ref->in_tree); | ||
68 | kfree(ref); | ||
69 | |||
70 | spin_lock(&root->fs_info->ref_cache_lock); | ||
71 | root->fs_info->total_ref_cache_size -= size; | ||
72 | spin_unlock(&root->fs_info->ref_cache_lock); | ||
73 | } | ||
74 | } | ||
75 | |||
76 | static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, | 26 | static struct rb_node *tree_insert(struct rb_root *root, u64 bytenr, |
77 | struct rb_node *node) | 27 | struct rb_node *node) |
78 | { | 28 | { |
@@ -116,117 +66,3 @@ static struct rb_node *tree_search(struct rb_root *root, u64 bytenr) | |||
116 | } | 66 | } |
117 | return NULL; | 67 | return NULL; |
118 | } | 68 | } |
119 | |||
120 | int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, | ||
121 | int shared) | ||
122 | { | ||
123 | struct btrfs_leaf_ref *ref = NULL; | ||
124 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; | ||
125 | |||
126 | if (shared) | ||
127 | tree = &root->fs_info->shared_ref_tree; | ||
128 | if (!tree) | ||
129 | return 0; | ||
130 | |||
131 | spin_lock(&tree->lock); | ||
132 | while (!list_empty(&tree->list)) { | ||
133 | ref = list_entry(tree->list.next, struct btrfs_leaf_ref, list); | ||
134 | BUG_ON(ref->tree != tree); | ||
135 | if (ref->root_gen > max_root_gen) | ||
136 | break; | ||
137 | if (!xchg(&ref->in_tree, 0)) { | ||
138 | cond_resched_lock(&tree->lock); | ||
139 | continue; | ||
140 | } | ||
141 | |||
142 | rb_erase(&ref->rb_node, &tree->root); | ||
143 | list_del_init(&ref->list); | ||
144 | |||
145 | spin_unlock(&tree->lock); | ||
146 | btrfs_free_leaf_ref(root, ref); | ||
147 | cond_resched(); | ||
148 | spin_lock(&tree->lock); | ||
149 | } | ||
150 | spin_unlock(&tree->lock); | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * find the leaf ref for a given extent. This returns the ref struct with | ||
156 | * a usage reference incremented | ||
157 | */ | ||
158 | struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, | ||
159 | u64 bytenr) | ||
160 | { | ||
161 | struct rb_node *rb; | ||
162 | struct btrfs_leaf_ref *ref = NULL; | ||
163 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; | ||
164 | again: | ||
165 | if (tree) { | ||
166 | spin_lock(&tree->lock); | ||
167 | rb = tree_search(&tree->root, bytenr); | ||
168 | if (rb) | ||
169 | ref = rb_entry(rb, struct btrfs_leaf_ref, rb_node); | ||
170 | if (ref) | ||
171 | atomic_inc(&ref->usage); | ||
172 | spin_unlock(&tree->lock); | ||
173 | if (ref) | ||
174 | return ref; | ||
175 | } | ||
176 | if (tree != &root->fs_info->shared_ref_tree) { | ||
177 | tree = &root->fs_info->shared_ref_tree; | ||
178 | goto again; | ||
179 | } | ||
180 | return NULL; | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * add a fully filled in leaf ref struct | ||
185 | * remove all the refs older than a given root generation | ||
186 | */ | ||
187 | int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, | ||
188 | int shared) | ||
189 | { | ||
190 | int ret = 0; | ||
191 | struct rb_node *rb; | ||
192 | struct btrfs_leaf_ref_tree *tree = root->ref_tree; | ||
193 | |||
194 | if (shared) | ||
195 | tree = &root->fs_info->shared_ref_tree; | ||
196 | |||
197 | spin_lock(&tree->lock); | ||
198 | rb = tree_insert(&tree->root, ref->bytenr, &ref->rb_node); | ||
199 | if (rb) { | ||
200 | ret = -EEXIST; | ||
201 | } else { | ||
202 | atomic_inc(&ref->usage); | ||
203 | ref->tree = tree; | ||
204 | ref->in_tree = 1; | ||
205 | list_add_tail(&ref->list, &tree->list); | ||
206 | } | ||
207 | spin_unlock(&tree->lock); | ||
208 | return ret; | ||
209 | } | ||
210 | |||
211 | /* | ||
212 | * remove a single leaf ref from the tree. This drops the ref held by the tree | ||
213 | * only | ||
214 | */ | ||
215 | int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref) | ||
216 | { | ||
217 | struct btrfs_leaf_ref_tree *tree; | ||
218 | |||
219 | if (!xchg(&ref->in_tree, 0)) | ||
220 | return 0; | ||
221 | |||
222 | tree = ref->tree; | ||
223 | spin_lock(&tree->lock); | ||
224 | |||
225 | rb_erase(&ref->rb_node, &tree->root); | ||
226 | list_del_init(&ref->list); | ||
227 | |||
228 | spin_unlock(&tree->lock); | ||
229 | |||
230 | btrfs_free_leaf_ref(root, ref); | ||
231 | return 0; | ||
232 | } | ||
diff --git a/fs/btrfs/ref-cache.h b/fs/btrfs/ref-cache.h index e2a55cb2072b..24f7001f6387 100644 --- a/fs/btrfs/ref-cache.h +++ b/fs/btrfs/ref-cache.h | |||
@@ -49,28 +49,4 @@ static inline size_t btrfs_leaf_ref_size(int nr_extents) | |||
49 | return sizeof(struct btrfs_leaf_ref) + | 49 | return sizeof(struct btrfs_leaf_ref) + |
50 | sizeof(struct btrfs_extent_info) * nr_extents; | 50 | sizeof(struct btrfs_extent_info) * nr_extents; |
51 | } | 51 | } |
52 | |||
53 | static inline void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree) | ||
54 | { | ||
55 | tree->root = RB_ROOT; | ||
56 | INIT_LIST_HEAD(&tree->list); | ||
57 | spin_lock_init(&tree->lock); | ||
58 | } | ||
59 | |||
60 | static inline int btrfs_leaf_ref_tree_empty(struct btrfs_leaf_ref_tree *tree) | ||
61 | { | ||
62 | return RB_EMPTY_ROOT(&tree->root); | ||
63 | } | ||
64 | |||
65 | void btrfs_leaf_ref_tree_init(struct btrfs_leaf_ref_tree *tree); | ||
66 | struct btrfs_leaf_ref *btrfs_alloc_leaf_ref(struct btrfs_root *root, | ||
67 | int nr_extents); | ||
68 | void btrfs_free_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); | ||
69 | struct btrfs_leaf_ref *btrfs_lookup_leaf_ref(struct btrfs_root *root, | ||
70 | u64 bytenr); | ||
71 | int btrfs_add_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref, | ||
72 | int shared); | ||
73 | int btrfs_remove_leaf_refs(struct btrfs_root *root, u64 max_root_gen, | ||
74 | int shared); | ||
75 | int btrfs_remove_leaf_ref(struct btrfs_root *root, struct btrfs_leaf_ref *ref); | ||
76 | #endif | 52 | #endif |
diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index f340f7c99d09..ca38eca70af0 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c | |||
@@ -30,6 +30,7 @@ | |||
30 | #include "btrfs_inode.h" | 30 | #include "btrfs_inode.h" |
31 | #include "async-thread.h" | 31 | #include "async-thread.h" |
32 | #include "free-space-cache.h" | 32 | #include "free-space-cache.h" |
33 | #include "inode-map.h" | ||
33 | 34 | ||
34 | /* | 35 | /* |
35 | * backref_node, mapping_node and tree_block start with this | 36 | * backref_node, mapping_node and tree_block start with this |
@@ -507,6 +508,7 @@ static int update_backref_cache(struct btrfs_trans_handle *trans, | |||
507 | return 1; | 508 | return 1; |
508 | } | 509 | } |
509 | 510 | ||
511 | |||
510 | static int should_ignore_root(struct btrfs_root *root) | 512 | static int should_ignore_root(struct btrfs_root *root) |
511 | { | 513 | { |
512 | struct btrfs_root *reloc_root; | 514 | struct btrfs_root *reloc_root; |
@@ -529,7 +531,6 @@ static int should_ignore_root(struct btrfs_root *root) | |||
529 | */ | 531 | */ |
530 | return 1; | 532 | return 1; |
531 | } | 533 | } |
532 | |||
533 | /* | 534 | /* |
534 | * find reloc tree by address of tree root | 535 | * find reloc tree by address of tree root |
535 | */ | 536 | */ |
@@ -961,7 +962,7 @@ again: | |||
961 | lower = upper; | 962 | lower = upper; |
962 | upper = NULL; | 963 | upper = NULL; |
963 | } | 964 | } |
964 | btrfs_release_path(root, path2); | 965 | btrfs_release_path(path2); |
965 | next: | 966 | next: |
966 | if (ptr < end) { | 967 | if (ptr < end) { |
967 | ptr += btrfs_extent_inline_ref_size(key.type); | 968 | ptr += btrfs_extent_inline_ref_size(key.type); |
@@ -974,7 +975,7 @@ next: | |||
974 | if (ptr >= end) | 975 | if (ptr >= end) |
975 | path1->slots[0]++; | 976 | path1->slots[0]++; |
976 | } | 977 | } |
977 | btrfs_release_path(rc->extent_root, path1); | 978 | btrfs_release_path(path1); |
978 | 979 | ||
979 | cur->checked = 1; | 980 | cur->checked = 1; |
980 | WARN_ON(exist); | 981 | WARN_ON(exist); |
@@ -1409,9 +1410,9 @@ again: | |||
1409 | prev = node; | 1410 | prev = node; |
1410 | entry = rb_entry(node, struct btrfs_inode, rb_node); | 1411 | entry = rb_entry(node, struct btrfs_inode, rb_node); |
1411 | 1412 | ||
1412 | if (objectid < entry->vfs_inode.i_ino) | 1413 | if (objectid < btrfs_ino(&entry->vfs_inode)) |
1413 | node = node->rb_left; | 1414 | node = node->rb_left; |
1414 | else if (objectid > entry->vfs_inode.i_ino) | 1415 | else if (objectid > btrfs_ino(&entry->vfs_inode)) |
1415 | node = node->rb_right; | 1416 | node = node->rb_right; |
1416 | else | 1417 | else |
1417 | break; | 1418 | break; |
@@ -1419,7 +1420,7 @@ again: | |||
1419 | if (!node) { | 1420 | if (!node) { |
1420 | while (prev) { | 1421 | while (prev) { |
1421 | entry = rb_entry(prev, struct btrfs_inode, rb_node); | 1422 | entry = rb_entry(prev, struct btrfs_inode, rb_node); |
1422 | if (objectid <= entry->vfs_inode.i_ino) { | 1423 | if (objectid <= btrfs_ino(&entry->vfs_inode)) { |
1423 | node = prev; | 1424 | node = prev; |
1424 | break; | 1425 | break; |
1425 | } | 1426 | } |
@@ -1434,7 +1435,7 @@ again: | |||
1434 | return inode; | 1435 | return inode; |
1435 | } | 1436 | } |
1436 | 1437 | ||
1437 | objectid = entry->vfs_inode.i_ino + 1; | 1438 | objectid = btrfs_ino(&entry->vfs_inode) + 1; |
1438 | if (cond_resched_lock(&root->inode_lock)) | 1439 | if (cond_resched_lock(&root->inode_lock)) |
1439 | goto again; | 1440 | goto again; |
1440 | 1441 | ||
@@ -1470,7 +1471,7 @@ static int get_new_location(struct inode *reloc_inode, u64 *new_bytenr, | |||
1470 | return -ENOMEM; | 1471 | return -ENOMEM; |
1471 | 1472 | ||
1472 | bytenr -= BTRFS_I(reloc_inode)->index_cnt; | 1473 | bytenr -= BTRFS_I(reloc_inode)->index_cnt; |
1473 | ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino, | 1474 | ret = btrfs_lookup_file_extent(NULL, root, path, btrfs_ino(reloc_inode), |
1474 | bytenr, 0); | 1475 | bytenr, 0); |
1475 | if (ret < 0) | 1476 | if (ret < 0) |
1476 | goto out; | 1477 | goto out; |
@@ -1558,11 +1559,11 @@ int replace_file_extents(struct btrfs_trans_handle *trans, | |||
1558 | if (first) { | 1559 | if (first) { |
1559 | inode = find_next_inode(root, key.objectid); | 1560 | inode = find_next_inode(root, key.objectid); |
1560 | first = 0; | 1561 | first = 0; |
1561 | } else if (inode && inode->i_ino < key.objectid) { | 1562 | } else if (inode && btrfs_ino(inode) < key.objectid) { |
1562 | btrfs_add_delayed_iput(inode); | 1563 | btrfs_add_delayed_iput(inode); |
1563 | inode = find_next_inode(root, key.objectid); | 1564 | inode = find_next_inode(root, key.objectid); |
1564 | } | 1565 | } |
1565 | if (inode && inode->i_ino == key.objectid) { | 1566 | if (inode && btrfs_ino(inode) == key.objectid) { |
1566 | end = key.offset + | 1567 | end = key.offset + |
1567 | btrfs_file_extent_num_bytes(leaf, fi); | 1568 | btrfs_file_extent_num_bytes(leaf, fi); |
1568 | WARN_ON(!IS_ALIGNED(key.offset, | 1569 | WARN_ON(!IS_ALIGNED(key.offset, |
@@ -1749,7 +1750,7 @@ again: | |||
1749 | 1750 | ||
1750 | btrfs_node_key_to_cpu(path->nodes[level], &key, | 1751 | btrfs_node_key_to_cpu(path->nodes[level], &key, |
1751 | path->slots[level]); | 1752 | path->slots[level]); |
1752 | btrfs_release_path(src, path); | 1753 | btrfs_release_path(path); |
1753 | 1754 | ||
1754 | path->lowest_level = level; | 1755 | path->lowest_level = level; |
1755 | ret = btrfs_search_slot(trans, src, &key, path, 0, 1); | 1756 | ret = btrfs_search_slot(trans, src, &key, path, 0, 1); |
@@ -1893,6 +1894,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, | |||
1893 | struct inode *inode = NULL; | 1894 | struct inode *inode = NULL; |
1894 | u64 objectid; | 1895 | u64 objectid; |
1895 | u64 start, end; | 1896 | u64 start, end; |
1897 | u64 ino; | ||
1896 | 1898 | ||
1897 | objectid = min_key->objectid; | 1899 | objectid = min_key->objectid; |
1898 | while (1) { | 1900 | while (1) { |
@@ -1905,17 +1907,18 @@ static int invalidate_extent_cache(struct btrfs_root *root, | |||
1905 | inode = find_next_inode(root, objectid); | 1907 | inode = find_next_inode(root, objectid); |
1906 | if (!inode) | 1908 | if (!inode) |
1907 | break; | 1909 | break; |
1910 | ino = btrfs_ino(inode); | ||
1908 | 1911 | ||
1909 | if (inode->i_ino > max_key->objectid) { | 1912 | if (ino > max_key->objectid) { |
1910 | iput(inode); | 1913 | iput(inode); |
1911 | break; | 1914 | break; |
1912 | } | 1915 | } |
1913 | 1916 | ||
1914 | objectid = inode->i_ino + 1; | 1917 | objectid = ino + 1; |
1915 | if (!S_ISREG(inode->i_mode)) | 1918 | if (!S_ISREG(inode->i_mode)) |
1916 | continue; | 1919 | continue; |
1917 | 1920 | ||
1918 | if (unlikely(min_key->objectid == inode->i_ino)) { | 1921 | if (unlikely(min_key->objectid == ino)) { |
1919 | if (min_key->type > BTRFS_EXTENT_DATA_KEY) | 1922 | if (min_key->type > BTRFS_EXTENT_DATA_KEY) |
1920 | continue; | 1923 | continue; |
1921 | if (min_key->type < BTRFS_EXTENT_DATA_KEY) | 1924 | if (min_key->type < BTRFS_EXTENT_DATA_KEY) |
@@ -1928,7 +1931,7 @@ static int invalidate_extent_cache(struct btrfs_root *root, | |||
1928 | start = 0; | 1931 | start = 0; |
1929 | } | 1932 | } |
1930 | 1933 | ||
1931 | if (unlikely(max_key->objectid == inode->i_ino)) { | 1934 | if (unlikely(max_key->objectid == ino)) { |
1932 | if (max_key->type < BTRFS_EXTENT_DATA_KEY) | 1935 | if (max_key->type < BTRFS_EXTENT_DATA_KEY) |
1933 | continue; | 1936 | continue; |
1934 | if (max_key->type > BTRFS_EXTENT_DATA_KEY) { | 1937 | if (max_key->type > BTRFS_EXTENT_DATA_KEY) { |
@@ -2496,7 +2499,7 @@ static int do_relocation(struct btrfs_trans_handle *trans, | |||
2496 | path->locks[upper->level] = 0; | 2499 | path->locks[upper->level] = 0; |
2497 | 2500 | ||
2498 | slot = path->slots[upper->level]; | 2501 | slot = path->slots[upper->level]; |
2499 | btrfs_release_path(NULL, path); | 2502 | btrfs_release_path(path); |
2500 | } else { | 2503 | } else { |
2501 | ret = btrfs_bin_search(upper->eb, key, upper->level, | 2504 | ret = btrfs_bin_search(upper->eb, key, upper->level, |
2502 | &slot); | 2505 | &slot); |
@@ -2737,7 +2740,7 @@ static int relocate_tree_block(struct btrfs_trans_handle *trans, | |||
2737 | } else { | 2740 | } else { |
2738 | path->lowest_level = node->level; | 2741 | path->lowest_level = node->level; |
2739 | ret = btrfs_search_slot(trans, root, key, path, 0, 1); | 2742 | ret = btrfs_search_slot(trans, root, key, path, 0, 1); |
2740 | btrfs_release_path(root, path); | 2743 | btrfs_release_path(path); |
2741 | if (ret > 0) | 2744 | if (ret > 0) |
2742 | ret = 0; | 2745 | ret = 0; |
2743 | } | 2746 | } |
@@ -2870,7 +2873,7 @@ int setup_extent_mapping(struct inode *inode, u64 start, u64 end, | |||
2870 | struct extent_map *em; | 2873 | struct extent_map *em; |
2871 | int ret = 0; | 2874 | int ret = 0; |
2872 | 2875 | ||
2873 | em = alloc_extent_map(GFP_NOFS); | 2876 | em = alloc_extent_map(); |
2874 | if (!em) | 2877 | if (!em) |
2875 | return -ENOMEM; | 2878 | return -ENOMEM; |
2876 | 2879 | ||
@@ -3119,7 +3122,7 @@ static int add_tree_block(struct reloc_control *rc, | |||
3119 | #endif | 3122 | #endif |
3120 | } | 3123 | } |
3121 | 3124 | ||
3122 | btrfs_release_path(rc->extent_root, path); | 3125 | btrfs_release_path(path); |
3123 | 3126 | ||
3124 | BUG_ON(level == -1); | 3127 | BUG_ON(level == -1); |
3125 | 3128 | ||
@@ -3220,7 +3223,7 @@ static int delete_block_group_cache(struct btrfs_fs_info *fs_info, | |||
3220 | key.offset = 0; | 3223 | key.offset = 0; |
3221 | 3224 | ||
3222 | inode = btrfs_iget(fs_info->sb, &key, root, NULL); | 3225 | inode = btrfs_iget(fs_info->sb, &key, root, NULL); |
3223 | if (!inode || IS_ERR(inode) || is_bad_inode(inode)) { | 3226 | if (IS_ERR_OR_NULL(inode) || is_bad_inode(inode)) { |
3224 | if (inode && !IS_ERR(inode)) | 3227 | if (inode && !IS_ERR(inode)) |
3225 | iput(inode); | 3228 | iput(inode); |
3226 | return -ENOENT; | 3229 | return -ENOENT; |
@@ -3505,7 +3508,7 @@ int add_data_references(struct reloc_control *rc, | |||
3505 | } | 3508 | } |
3506 | path->slots[0]++; | 3509 | path->slots[0]++; |
3507 | } | 3510 | } |
3508 | btrfs_release_path(rc->extent_root, path); | 3511 | btrfs_release_path(path); |
3509 | if (err) | 3512 | if (err) |
3510 | free_block_list(blocks); | 3513 | free_block_list(blocks); |
3511 | return err; | 3514 | return err; |
@@ -3568,7 +3571,7 @@ next: | |||
3568 | EXTENT_DIRTY); | 3571 | EXTENT_DIRTY); |
3569 | 3572 | ||
3570 | if (ret == 0 && start <= key.objectid) { | 3573 | if (ret == 0 && start <= key.objectid) { |
3571 | btrfs_release_path(rc->extent_root, path); | 3574 | btrfs_release_path(path); |
3572 | rc->search_start = end + 1; | 3575 | rc->search_start = end + 1; |
3573 | } else { | 3576 | } else { |
3574 | rc->search_start = key.objectid + key.offset; | 3577 | rc->search_start = key.objectid + key.offset; |
@@ -3576,7 +3579,7 @@ next: | |||
3576 | return 0; | 3579 | return 0; |
3577 | } | 3580 | } |
3578 | } | 3581 | } |
3579 | btrfs_release_path(rc->extent_root, path); | 3582 | btrfs_release_path(path); |
3580 | return ret; | 3583 | return ret; |
3581 | } | 3584 | } |
3582 | 3585 | ||
@@ -3713,7 +3716,7 @@ restart: | |||
3713 | flags = BTRFS_EXTENT_FLAG_DATA; | 3716 | flags = BTRFS_EXTENT_FLAG_DATA; |
3714 | 3717 | ||
3715 | if (path_change) { | 3718 | if (path_change) { |
3716 | btrfs_release_path(rc->extent_root, path); | 3719 | btrfs_release_path(path); |
3717 | 3720 | ||
3718 | path->search_commit_root = 1; | 3721 | path->search_commit_root = 1; |
3719 | path->skip_locking = 1; | 3722 | path->skip_locking = 1; |
@@ -3736,7 +3739,7 @@ restart: | |||
3736 | (flags & BTRFS_EXTENT_FLAG_DATA)) { | 3739 | (flags & BTRFS_EXTENT_FLAG_DATA)) { |
3737 | ret = add_data_references(rc, &key, path, &blocks); | 3740 | ret = add_data_references(rc, &key, path, &blocks); |
3738 | } else { | 3741 | } else { |
3739 | btrfs_release_path(rc->extent_root, path); | 3742 | btrfs_release_path(path); |
3740 | ret = 0; | 3743 | ret = 0; |
3741 | } | 3744 | } |
3742 | if (ret < 0) { | 3745 | if (ret < 0) { |
@@ -3799,7 +3802,7 @@ restart: | |||
3799 | } | 3802 | } |
3800 | } | 3803 | } |
3801 | 3804 | ||
3802 | btrfs_release_path(rc->extent_root, path); | 3805 | btrfs_release_path(path); |
3803 | clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, | 3806 | clear_extent_bits(&rc->processed_blocks, 0, (u64)-1, EXTENT_DIRTY, |
3804 | GFP_NOFS); | 3807 | GFP_NOFS); |
3805 | 3808 | ||
@@ -3867,7 +3870,7 @@ static int __insert_orphan_inode(struct btrfs_trans_handle *trans, | |||
3867 | btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | | 3870 | btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS | |
3868 | BTRFS_INODE_PREALLOC); | 3871 | BTRFS_INODE_PREALLOC); |
3869 | btrfs_mark_buffer_dirty(leaf); | 3872 | btrfs_mark_buffer_dirty(leaf); |
3870 | btrfs_release_path(root, path); | 3873 | btrfs_release_path(path); |
3871 | out: | 3874 | out: |
3872 | btrfs_free_path(path); | 3875 | btrfs_free_path(path); |
3873 | return ret; | 3876 | return ret; |
@@ -3897,7 +3900,7 @@ struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info, | |||
3897 | if (IS_ERR(trans)) | 3900 | if (IS_ERR(trans)) |
3898 | return ERR_CAST(trans); | 3901 | return ERR_CAST(trans); |
3899 | 3902 | ||
3900 | err = btrfs_find_free_objectid(trans, root, objectid, &objectid); | 3903 | err = btrfs_find_free_objectid(root, &objectid); |
3901 | if (err) | 3904 | if (err) |
3902 | goto out; | 3905 | goto out; |
3903 | 3906 | ||
@@ -3935,7 +3938,7 @@ static struct reloc_control *alloc_reloc_control(void) | |||
3935 | INIT_LIST_HEAD(&rc->reloc_roots); | 3938 | INIT_LIST_HEAD(&rc->reloc_roots); |
3936 | backref_cache_init(&rc->backref_cache); | 3939 | backref_cache_init(&rc->backref_cache); |
3937 | mapping_tree_init(&rc->reloc_root_tree); | 3940 | mapping_tree_init(&rc->reloc_root_tree); |
3938 | extent_io_tree_init(&rc->processed_blocks, NULL, GFP_NOFS); | 3941 | extent_io_tree_init(&rc->processed_blocks, NULL); |
3939 | return rc; | 3942 | return rc; |
3940 | } | 3943 | } |
3941 | 3944 | ||
@@ -4109,7 +4112,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) | |||
4109 | } | 4112 | } |
4110 | leaf = path->nodes[0]; | 4113 | leaf = path->nodes[0]; |
4111 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | 4114 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); |
4112 | btrfs_release_path(root->fs_info->tree_root, path); | 4115 | btrfs_release_path(path); |
4113 | 4116 | ||
4114 | if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || | 4117 | if (key.objectid != BTRFS_TREE_RELOC_OBJECTID || |
4115 | key.type != BTRFS_ROOT_ITEM_KEY) | 4118 | key.type != BTRFS_ROOT_ITEM_KEY) |
@@ -4141,7 +4144,7 @@ int btrfs_recover_relocation(struct btrfs_root *root) | |||
4141 | 4144 | ||
4142 | key.offset--; | 4145 | key.offset--; |
4143 | } | 4146 | } |
4144 | btrfs_release_path(root->fs_info->tree_root, path); | 4147 | btrfs_release_path(path); |
4145 | 4148 | ||
4146 | if (list_empty(&reloc_roots)) | 4149 | if (list_empty(&reloc_roots)) |
4147 | goto out; | 4150 | goto out; |
@@ -4242,7 +4245,7 @@ int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len) | |||
4242 | 4245 | ||
4243 | disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; | 4246 | disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt; |
4244 | ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, | 4247 | ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr, |
4245 | disk_bytenr + len - 1, &list); | 4248 | disk_bytenr + len - 1, &list, 0); |
4246 | 4249 | ||
4247 | while (!list_empty(&list)) { | 4250 | while (!list_empty(&list)) { |
4248 | sums = list_entry(list.next, struct btrfs_ordered_sum, list); | 4251 | sums = list_entry(list.next, struct btrfs_ordered_sum, list); |
diff --git a/fs/btrfs/root-tree.c b/fs/btrfs/root-tree.c index 6928bff62daa..ebe45443de06 100644 --- a/fs/btrfs/root-tree.c +++ b/fs/btrfs/root-tree.c | |||
@@ -22,53 +22,6 @@ | |||
22 | #include "print-tree.h" | 22 | #include "print-tree.h" |
23 | 23 | ||
24 | /* | 24 | /* |
25 | * search forward for a root, starting with objectid 'search_start' | ||
26 | * if a root key is found, the objectid we find is filled into 'found_objectid' | ||
27 | * and 0 is returned. < 0 is returned on error, 1 if there is nothing | ||
28 | * left in the tree. | ||
29 | */ | ||
30 | int btrfs_search_root(struct btrfs_root *root, u64 search_start, | ||
31 | u64 *found_objectid) | ||
32 | { | ||
33 | struct btrfs_path *path; | ||
34 | struct btrfs_key search_key; | ||
35 | int ret; | ||
36 | |||
37 | root = root->fs_info->tree_root; | ||
38 | search_key.objectid = search_start; | ||
39 | search_key.type = (u8)-1; | ||
40 | search_key.offset = (u64)-1; | ||
41 | |||
42 | path = btrfs_alloc_path(); | ||
43 | BUG_ON(!path); | ||
44 | again: | ||
45 | ret = btrfs_search_slot(NULL, root, &search_key, path, 0, 0); | ||
46 | if (ret < 0) | ||
47 | goto out; | ||
48 | if (ret == 0) { | ||
49 | ret = 1; | ||
50 | goto out; | ||
51 | } | ||
52 | if (path->slots[0] >= btrfs_header_nritems(path->nodes[0])) { | ||
53 | ret = btrfs_next_leaf(root, path); | ||
54 | if (ret) | ||
55 | goto out; | ||
56 | } | ||
57 | btrfs_item_key_to_cpu(path->nodes[0], &search_key, path->slots[0]); | ||
58 | if (search_key.type != BTRFS_ROOT_ITEM_KEY) { | ||
59 | search_key.offset++; | ||
60 | btrfs_release_path(root, path); | ||
61 | goto again; | ||
62 | } | ||
63 | ret = 0; | ||
64 | *found_objectid = search_key.objectid; | ||
65 | |||
66 | out: | ||
67 | btrfs_free_path(path); | ||
68 | return ret; | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * lookup the root with the highest offset for a given objectid. The key we do | 25 | * lookup the root with the highest offset for a given objectid. The key we do |
73 | * find is copied into 'key'. If we find something return 0, otherwise 1, < 0 | 26 | * find is copied into 'key'. If we find something return 0, otherwise 1, < 0 |
74 | * on error. | 27 | * on error. |
@@ -230,7 +183,7 @@ again: | |||
230 | 183 | ||
231 | memcpy(&found_key, &key, sizeof(key)); | 184 | memcpy(&found_key, &key, sizeof(key)); |
232 | key.offset++; | 185 | key.offset++; |
233 | btrfs_release_path(root, path); | 186 | btrfs_release_path(path); |
234 | dead_root = | 187 | dead_root = |
235 | btrfs_read_fs_root_no_radix(root->fs_info->tree_root, | 188 | btrfs_read_fs_root_no_radix(root->fs_info->tree_root, |
236 | &found_key); | 189 | &found_key); |
@@ -292,7 +245,7 @@ int btrfs_find_orphan_roots(struct btrfs_root *tree_root) | |||
292 | } | 245 | } |
293 | 246 | ||
294 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | 247 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); |
295 | btrfs_release_path(tree_root, path); | 248 | btrfs_release_path(path); |
296 | 249 | ||
297 | if (key.objectid != BTRFS_ORPHAN_OBJECTID || | 250 | if (key.objectid != BTRFS_ORPHAN_OBJECTID || |
298 | key.type != BTRFS_ORPHAN_ITEM_KEY) | 251 | key.type != BTRFS_ORPHAN_ITEM_KEY) |
@@ -385,18 +338,22 @@ again: | |||
385 | *sequence = btrfs_root_ref_sequence(leaf, ref); | 338 | *sequence = btrfs_root_ref_sequence(leaf, ref); |
386 | 339 | ||
387 | ret = btrfs_del_item(trans, tree_root, path); | 340 | ret = btrfs_del_item(trans, tree_root, path); |
388 | BUG_ON(ret); | 341 | if (ret) { |
342 | err = ret; | ||
343 | goto out; | ||
344 | } | ||
389 | } else | 345 | } else |
390 | err = -ENOENT; | 346 | err = -ENOENT; |
391 | 347 | ||
392 | if (key.type == BTRFS_ROOT_BACKREF_KEY) { | 348 | if (key.type == BTRFS_ROOT_BACKREF_KEY) { |
393 | btrfs_release_path(tree_root, path); | 349 | btrfs_release_path(path); |
394 | key.objectid = ref_id; | 350 | key.objectid = ref_id; |
395 | key.type = BTRFS_ROOT_REF_KEY; | 351 | key.type = BTRFS_ROOT_REF_KEY; |
396 | key.offset = root_id; | 352 | key.offset = root_id; |
397 | goto again; | 353 | goto again; |
398 | } | 354 | } |
399 | 355 | ||
356 | out: | ||
400 | btrfs_free_path(path); | 357 | btrfs_free_path(path); |
401 | return err; | 358 | return err; |
402 | } | 359 | } |
@@ -463,7 +420,7 @@ again: | |||
463 | btrfs_mark_buffer_dirty(leaf); | 420 | btrfs_mark_buffer_dirty(leaf); |
464 | 421 | ||
465 | if (key.type == BTRFS_ROOT_BACKREF_KEY) { | 422 | if (key.type == BTRFS_ROOT_BACKREF_KEY) { |
466 | btrfs_release_path(tree_root, path); | 423 | btrfs_release_path(path); |
467 | key.objectid = ref_id; | 424 | key.objectid = ref_id; |
468 | key.type = BTRFS_ROOT_REF_KEY; | 425 | key.type = BTRFS_ROOT_REF_KEY; |
469 | key.offset = root_id; | 426 | key.offset = root_id; |
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c new file mode 100644 index 000000000000..6dfed0c27ac3 --- /dev/null +++ b/fs/btrfs/scrub.c | |||
@@ -0,0 +1,1369 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011 STRATO. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or | ||
5 | * modify it under the terms of the GNU General Public | ||
6 | * License v2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public | ||
14 | * License along with this program; if not, write to the | ||
15 | * Free Software Foundation, Inc., 59 Temple Place - Suite 330, | ||
16 | * Boston, MA 021110-1307, USA. | ||
17 | */ | ||
18 | |||
19 | #include <linux/sched.h> | ||
20 | #include <linux/pagemap.h> | ||
21 | #include <linux/writeback.h> | ||
22 | #include <linux/blkdev.h> | ||
23 | #include <linux/rbtree.h> | ||
24 | #include <linux/slab.h> | ||
25 | #include <linux/workqueue.h> | ||
26 | #include "ctree.h" | ||
27 | #include "volumes.h" | ||
28 | #include "disk-io.h" | ||
29 | #include "ordered-data.h" | ||
30 | |||
31 | /* | ||
32 | * This is only the first step towards a full-features scrub. It reads all | ||
33 | * extent and super block and verifies the checksums. In case a bad checksum | ||
34 | * is found or the extent cannot be read, good data will be written back if | ||
35 | * any can be found. | ||
36 | * | ||
37 | * Future enhancements: | ||
38 | * - To enhance the performance, better read-ahead strategies for the | ||
39 | * extent-tree can be employed. | ||
40 | * - In case an unrepairable extent is encountered, track which files are | ||
41 | * affected and report them | ||
42 | * - In case of a read error on files with nodatasum, map the file and read | ||
43 | * the extent to trigger a writeback of the good copy | ||
44 | * - track and record media errors, throw out bad devices | ||
45 | * - add a mode to also read unallocated space | ||
46 | * - make the prefetch cancellable | ||
47 | */ | ||
48 | |||
49 | struct scrub_bio; | ||
50 | struct scrub_page; | ||
51 | struct scrub_dev; | ||
52 | static void scrub_bio_end_io(struct bio *bio, int err); | ||
53 | static void scrub_checksum(struct btrfs_work *work); | ||
54 | static int scrub_checksum_data(struct scrub_dev *sdev, | ||
55 | struct scrub_page *spag, void *buffer); | ||
56 | static int scrub_checksum_tree_block(struct scrub_dev *sdev, | ||
57 | struct scrub_page *spag, u64 logical, | ||
58 | void *buffer); | ||
59 | static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer); | ||
60 | static int scrub_fixup_check(struct scrub_bio *sbio, int ix); | ||
61 | static void scrub_fixup_end_io(struct bio *bio, int err); | ||
62 | static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, | ||
63 | struct page *page); | ||
64 | static void scrub_fixup(struct scrub_bio *sbio, int ix); | ||
65 | |||
66 | #define SCRUB_PAGES_PER_BIO 16 /* 64k per bio */ | ||
67 | #define SCRUB_BIOS_PER_DEV 16 /* 1 MB per device in flight */ | ||
68 | |||
69 | struct scrub_page { | ||
70 | u64 flags; /* extent flags */ | ||
71 | u64 generation; | ||
72 | u64 mirror_num; | ||
73 | int have_csum; | ||
74 | u8 csum[BTRFS_CSUM_SIZE]; | ||
75 | }; | ||
76 | |||
77 | struct scrub_bio { | ||
78 | int index; | ||
79 | struct scrub_dev *sdev; | ||
80 | struct bio *bio; | ||
81 | int err; | ||
82 | u64 logical; | ||
83 | u64 physical; | ||
84 | struct scrub_page spag[SCRUB_PAGES_PER_BIO]; | ||
85 | u64 count; | ||
86 | int next_free; | ||
87 | struct btrfs_work work; | ||
88 | }; | ||
89 | |||
90 | struct scrub_dev { | ||
91 | struct scrub_bio *bios[SCRUB_BIOS_PER_DEV]; | ||
92 | struct btrfs_device *dev; | ||
93 | int first_free; | ||
94 | int curr; | ||
95 | atomic_t in_flight; | ||
96 | spinlock_t list_lock; | ||
97 | wait_queue_head_t list_wait; | ||
98 | u16 csum_size; | ||
99 | struct list_head csum_list; | ||
100 | atomic_t cancel_req; | ||
101 | int readonly; | ||
102 | /* | ||
103 | * statistics | ||
104 | */ | ||
105 | struct btrfs_scrub_progress stat; | ||
106 | spinlock_t stat_lock; | ||
107 | }; | ||
108 | |||
109 | static void scrub_free_csums(struct scrub_dev *sdev) | ||
110 | { | ||
111 | while (!list_empty(&sdev->csum_list)) { | ||
112 | struct btrfs_ordered_sum *sum; | ||
113 | sum = list_first_entry(&sdev->csum_list, | ||
114 | struct btrfs_ordered_sum, list); | ||
115 | list_del(&sum->list); | ||
116 | kfree(sum); | ||
117 | } | ||
118 | } | ||
119 | |||
120 | static noinline_for_stack void scrub_free_dev(struct scrub_dev *sdev) | ||
121 | { | ||
122 | int i; | ||
123 | int j; | ||
124 | struct page *last_page; | ||
125 | |||
126 | if (!sdev) | ||
127 | return; | ||
128 | |||
129 | for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { | ||
130 | struct scrub_bio *sbio = sdev->bios[i]; | ||
131 | struct bio *bio; | ||
132 | |||
133 | if (!sbio) | ||
134 | break; | ||
135 | |||
136 | bio = sbio->bio; | ||
137 | if (bio) { | ||
138 | last_page = NULL; | ||
139 | for (j = 0; j < bio->bi_vcnt; ++j) { | ||
140 | if (bio->bi_io_vec[j].bv_page == last_page) | ||
141 | continue; | ||
142 | last_page = bio->bi_io_vec[j].bv_page; | ||
143 | __free_page(last_page); | ||
144 | } | ||
145 | bio_put(bio); | ||
146 | } | ||
147 | kfree(sbio); | ||
148 | } | ||
149 | |||
150 | scrub_free_csums(sdev); | ||
151 | kfree(sdev); | ||
152 | } | ||
153 | |||
154 | static noinline_for_stack | ||
155 | struct scrub_dev *scrub_setup_dev(struct btrfs_device *dev) | ||
156 | { | ||
157 | struct scrub_dev *sdev; | ||
158 | int i; | ||
159 | int j; | ||
160 | int ret; | ||
161 | struct btrfs_fs_info *fs_info = dev->dev_root->fs_info; | ||
162 | |||
163 | sdev = kzalloc(sizeof(*sdev), GFP_NOFS); | ||
164 | if (!sdev) | ||
165 | goto nomem; | ||
166 | sdev->dev = dev; | ||
167 | for (i = 0; i < SCRUB_BIOS_PER_DEV; ++i) { | ||
168 | struct bio *bio; | ||
169 | struct scrub_bio *sbio; | ||
170 | |||
171 | sbio = kzalloc(sizeof(*sbio), GFP_NOFS); | ||
172 | if (!sbio) | ||
173 | goto nomem; | ||
174 | sdev->bios[i] = sbio; | ||
175 | |||
176 | bio = bio_kmalloc(GFP_NOFS, SCRUB_PAGES_PER_BIO); | ||
177 | if (!bio) | ||
178 | goto nomem; | ||
179 | |||
180 | sbio->index = i; | ||
181 | sbio->sdev = sdev; | ||
182 | sbio->bio = bio; | ||
183 | sbio->count = 0; | ||
184 | sbio->work.func = scrub_checksum; | ||
185 | bio->bi_private = sdev->bios[i]; | ||
186 | bio->bi_end_io = scrub_bio_end_io; | ||
187 | bio->bi_sector = 0; | ||
188 | bio->bi_bdev = dev->bdev; | ||
189 | bio->bi_size = 0; | ||
190 | |||
191 | for (j = 0; j < SCRUB_PAGES_PER_BIO; ++j) { | ||
192 | struct page *page; | ||
193 | page = alloc_page(GFP_NOFS); | ||
194 | if (!page) | ||
195 | goto nomem; | ||
196 | |||
197 | ret = bio_add_page(bio, page, PAGE_SIZE, 0); | ||
198 | if (!ret) | ||
199 | goto nomem; | ||
200 | } | ||
201 | WARN_ON(bio->bi_vcnt != SCRUB_PAGES_PER_BIO); | ||
202 | |||
203 | if (i != SCRUB_BIOS_PER_DEV-1) | ||
204 | sdev->bios[i]->next_free = i + 1; | ||
205 | else | ||
206 | sdev->bios[i]->next_free = -1; | ||
207 | } | ||
208 | sdev->first_free = 0; | ||
209 | sdev->curr = -1; | ||
210 | atomic_set(&sdev->in_flight, 0); | ||
211 | atomic_set(&sdev->cancel_req, 0); | ||
212 | sdev->csum_size = btrfs_super_csum_size(&fs_info->super_copy); | ||
213 | INIT_LIST_HEAD(&sdev->csum_list); | ||
214 | |||
215 | spin_lock_init(&sdev->list_lock); | ||
216 | spin_lock_init(&sdev->stat_lock); | ||
217 | init_waitqueue_head(&sdev->list_wait); | ||
218 | return sdev; | ||
219 | |||
220 | nomem: | ||
221 | scrub_free_dev(sdev); | ||
222 | return ERR_PTR(-ENOMEM); | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * scrub_recheck_error gets called when either verification of the page | ||
227 | * failed or the bio failed to read, e.g. with EIO. In the latter case, | ||
228 | * recheck_error gets called for every page in the bio, even though only | ||
229 | * one may be bad | ||
230 | */ | ||
231 | static void scrub_recheck_error(struct scrub_bio *sbio, int ix) | ||
232 | { | ||
233 | if (sbio->err) { | ||
234 | if (scrub_fixup_io(READ, sbio->sdev->dev->bdev, | ||
235 | (sbio->physical + ix * PAGE_SIZE) >> 9, | ||
236 | sbio->bio->bi_io_vec[ix].bv_page) == 0) { | ||
237 | if (scrub_fixup_check(sbio, ix) == 0) | ||
238 | return; | ||
239 | } | ||
240 | } | ||
241 | |||
242 | scrub_fixup(sbio, ix); | ||
243 | } | ||
244 | |||
245 | static int scrub_fixup_check(struct scrub_bio *sbio, int ix) | ||
246 | { | ||
247 | int ret = 1; | ||
248 | struct page *page; | ||
249 | void *buffer; | ||
250 | u64 flags = sbio->spag[ix].flags; | ||
251 | |||
252 | page = sbio->bio->bi_io_vec[ix].bv_page; | ||
253 | buffer = kmap_atomic(page, KM_USER0); | ||
254 | if (flags & BTRFS_EXTENT_FLAG_DATA) { | ||
255 | ret = scrub_checksum_data(sbio->sdev, | ||
256 | sbio->spag + ix, buffer); | ||
257 | } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | ||
258 | ret = scrub_checksum_tree_block(sbio->sdev, | ||
259 | sbio->spag + ix, | ||
260 | sbio->logical + ix * PAGE_SIZE, | ||
261 | buffer); | ||
262 | } else { | ||
263 | WARN_ON(1); | ||
264 | } | ||
265 | kunmap_atomic(buffer, KM_USER0); | ||
266 | |||
267 | return ret; | ||
268 | } | ||
269 | |||
270 | static void scrub_fixup_end_io(struct bio *bio, int err) | ||
271 | { | ||
272 | complete((struct completion *)bio->bi_private); | ||
273 | } | ||
274 | |||
275 | static void scrub_fixup(struct scrub_bio *sbio, int ix) | ||
276 | { | ||
277 | struct scrub_dev *sdev = sbio->sdev; | ||
278 | struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; | ||
279 | struct btrfs_mapping_tree *map_tree = &fs_info->mapping_tree; | ||
280 | struct btrfs_multi_bio *multi = NULL; | ||
281 | u64 logical = sbio->logical + ix * PAGE_SIZE; | ||
282 | u64 length; | ||
283 | int i; | ||
284 | int ret; | ||
285 | DECLARE_COMPLETION_ONSTACK(complete); | ||
286 | |||
287 | if ((sbio->spag[ix].flags & BTRFS_EXTENT_FLAG_DATA) && | ||
288 | (sbio->spag[ix].have_csum == 0)) { | ||
289 | /* | ||
290 | * nodatasum, don't try to fix anything | ||
291 | * FIXME: we can do better, open the inode and trigger a | ||
292 | * writeback | ||
293 | */ | ||
294 | goto uncorrectable; | ||
295 | } | ||
296 | |||
297 | length = PAGE_SIZE; | ||
298 | ret = btrfs_map_block(map_tree, REQ_WRITE, logical, &length, | ||
299 | &multi, 0); | ||
300 | if (ret || !multi || length < PAGE_SIZE) { | ||
301 | printk(KERN_ERR | ||
302 | "scrub_fixup: btrfs_map_block failed us for %llu\n", | ||
303 | (unsigned long long)logical); | ||
304 | WARN_ON(1); | ||
305 | return; | ||
306 | } | ||
307 | |||
308 | if (multi->num_stripes == 1) | ||
309 | /* there aren't any replicas */ | ||
310 | goto uncorrectable; | ||
311 | |||
312 | /* | ||
313 | * first find a good copy | ||
314 | */ | ||
315 | for (i = 0; i < multi->num_stripes; ++i) { | ||
316 | if (i == sbio->spag[ix].mirror_num) | ||
317 | continue; | ||
318 | |||
319 | if (scrub_fixup_io(READ, multi->stripes[i].dev->bdev, | ||
320 | multi->stripes[i].physical >> 9, | ||
321 | sbio->bio->bi_io_vec[ix].bv_page)) { | ||
322 | /* I/O-error, this is not a good copy */ | ||
323 | continue; | ||
324 | } | ||
325 | |||
326 | if (scrub_fixup_check(sbio, ix) == 0) | ||
327 | break; | ||
328 | } | ||
329 | if (i == multi->num_stripes) | ||
330 | goto uncorrectable; | ||
331 | |||
332 | if (!sdev->readonly) { | ||
333 | /* | ||
334 | * bi_io_vec[ix].bv_page now contains good data, write it back | ||
335 | */ | ||
336 | if (scrub_fixup_io(WRITE, sdev->dev->bdev, | ||
337 | (sbio->physical + ix * PAGE_SIZE) >> 9, | ||
338 | sbio->bio->bi_io_vec[ix].bv_page)) { | ||
339 | /* I/O-error, writeback failed, give up */ | ||
340 | goto uncorrectable; | ||
341 | } | ||
342 | } | ||
343 | |||
344 | kfree(multi); | ||
345 | spin_lock(&sdev->stat_lock); | ||
346 | ++sdev->stat.corrected_errors; | ||
347 | spin_unlock(&sdev->stat_lock); | ||
348 | |||
349 | if (printk_ratelimit()) | ||
350 | printk(KERN_ERR "btrfs: fixed up at %llu\n", | ||
351 | (unsigned long long)logical); | ||
352 | return; | ||
353 | |||
354 | uncorrectable: | ||
355 | kfree(multi); | ||
356 | spin_lock(&sdev->stat_lock); | ||
357 | ++sdev->stat.uncorrectable_errors; | ||
358 | spin_unlock(&sdev->stat_lock); | ||
359 | |||
360 | if (printk_ratelimit()) | ||
361 | printk(KERN_ERR "btrfs: unable to fixup at %llu\n", | ||
362 | (unsigned long long)logical); | ||
363 | } | ||
364 | |||
365 | static int scrub_fixup_io(int rw, struct block_device *bdev, sector_t sector, | ||
366 | struct page *page) | ||
367 | { | ||
368 | struct bio *bio = NULL; | ||
369 | int ret; | ||
370 | DECLARE_COMPLETION_ONSTACK(complete); | ||
371 | |||
372 | /* we are going to wait on this IO */ | ||
373 | rw |= REQ_SYNC; | ||
374 | |||
375 | bio = bio_alloc(GFP_NOFS, 1); | ||
376 | bio->bi_bdev = bdev; | ||
377 | bio->bi_sector = sector; | ||
378 | bio_add_page(bio, page, PAGE_SIZE, 0); | ||
379 | bio->bi_end_io = scrub_fixup_end_io; | ||
380 | bio->bi_private = &complete; | ||
381 | submit_bio(rw, bio); | ||
382 | |||
383 | wait_for_completion(&complete); | ||
384 | |||
385 | ret = !test_bit(BIO_UPTODATE, &bio->bi_flags); | ||
386 | bio_put(bio); | ||
387 | return ret; | ||
388 | } | ||
389 | |||
390 | static void scrub_bio_end_io(struct bio *bio, int err) | ||
391 | { | ||
392 | struct scrub_bio *sbio = bio->bi_private; | ||
393 | struct scrub_dev *sdev = sbio->sdev; | ||
394 | struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; | ||
395 | |||
396 | sbio->err = err; | ||
397 | |||
398 | btrfs_queue_worker(&fs_info->scrub_workers, &sbio->work); | ||
399 | } | ||
400 | |||
401 | static void scrub_checksum(struct btrfs_work *work) | ||
402 | { | ||
403 | struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); | ||
404 | struct scrub_dev *sdev = sbio->sdev; | ||
405 | struct page *page; | ||
406 | void *buffer; | ||
407 | int i; | ||
408 | u64 flags; | ||
409 | u64 logical; | ||
410 | int ret; | ||
411 | |||
412 | if (sbio->err) { | ||
413 | for (i = 0; i < sbio->count; ++i) | ||
414 | scrub_recheck_error(sbio, i); | ||
415 | |||
416 | sbio->bio->bi_flags &= ~(BIO_POOL_MASK - 1); | ||
417 | sbio->bio->bi_flags |= 1 << BIO_UPTODATE; | ||
418 | sbio->bio->bi_phys_segments = 0; | ||
419 | sbio->bio->bi_idx = 0; | ||
420 | |||
421 | for (i = 0; i < sbio->count; i++) { | ||
422 | struct bio_vec *bi; | ||
423 | bi = &sbio->bio->bi_io_vec[i]; | ||
424 | bi->bv_offset = 0; | ||
425 | bi->bv_len = PAGE_SIZE; | ||
426 | } | ||
427 | |||
428 | spin_lock(&sdev->stat_lock); | ||
429 | ++sdev->stat.read_errors; | ||
430 | spin_unlock(&sdev->stat_lock); | ||
431 | goto out; | ||
432 | } | ||
433 | for (i = 0; i < sbio->count; ++i) { | ||
434 | page = sbio->bio->bi_io_vec[i].bv_page; | ||
435 | buffer = kmap_atomic(page, KM_USER0); | ||
436 | flags = sbio->spag[i].flags; | ||
437 | logical = sbio->logical + i * PAGE_SIZE; | ||
438 | ret = 0; | ||
439 | if (flags & BTRFS_EXTENT_FLAG_DATA) { | ||
440 | ret = scrub_checksum_data(sdev, sbio->spag + i, buffer); | ||
441 | } else if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { | ||
442 | ret = scrub_checksum_tree_block(sdev, sbio->spag + i, | ||
443 | logical, buffer); | ||
444 | } else if (flags & BTRFS_EXTENT_FLAG_SUPER) { | ||
445 | BUG_ON(i); | ||
446 | (void)scrub_checksum_super(sbio, buffer); | ||
447 | } else { | ||
448 | WARN_ON(1); | ||
449 | } | ||
450 | kunmap_atomic(buffer, KM_USER0); | ||
451 | if (ret) | ||
452 | scrub_recheck_error(sbio, i); | ||
453 | } | ||
454 | |||
455 | out: | ||
456 | spin_lock(&sdev->list_lock); | ||
457 | sbio->next_free = sdev->first_free; | ||
458 | sdev->first_free = sbio->index; | ||
459 | spin_unlock(&sdev->list_lock); | ||
460 | atomic_dec(&sdev->in_flight); | ||
461 | wake_up(&sdev->list_wait); | ||
462 | } | ||
463 | |||
464 | static int scrub_checksum_data(struct scrub_dev *sdev, | ||
465 | struct scrub_page *spag, void *buffer) | ||
466 | { | ||
467 | u8 csum[BTRFS_CSUM_SIZE]; | ||
468 | u32 crc = ~(u32)0; | ||
469 | int fail = 0; | ||
470 | struct btrfs_root *root = sdev->dev->dev_root; | ||
471 | |||
472 | if (!spag->have_csum) | ||
473 | return 0; | ||
474 | |||
475 | crc = btrfs_csum_data(root, buffer, crc, PAGE_SIZE); | ||
476 | btrfs_csum_final(crc, csum); | ||
477 | if (memcmp(csum, spag->csum, sdev->csum_size)) | ||
478 | fail = 1; | ||
479 | |||
480 | spin_lock(&sdev->stat_lock); | ||
481 | ++sdev->stat.data_extents_scrubbed; | ||
482 | sdev->stat.data_bytes_scrubbed += PAGE_SIZE; | ||
483 | if (fail) | ||
484 | ++sdev->stat.csum_errors; | ||
485 | spin_unlock(&sdev->stat_lock); | ||
486 | |||
487 | return fail; | ||
488 | } | ||
489 | |||
490 | static int scrub_checksum_tree_block(struct scrub_dev *sdev, | ||
491 | struct scrub_page *spag, u64 logical, | ||
492 | void *buffer) | ||
493 | { | ||
494 | struct btrfs_header *h; | ||
495 | struct btrfs_root *root = sdev->dev->dev_root; | ||
496 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
497 | u8 csum[BTRFS_CSUM_SIZE]; | ||
498 | u32 crc = ~(u32)0; | ||
499 | int fail = 0; | ||
500 | int crc_fail = 0; | ||
501 | |||
502 | /* | ||
503 | * we don't use the getter functions here, as we | ||
504 | * a) don't have an extent buffer and | ||
505 | * b) the page is already kmapped | ||
506 | */ | ||
507 | h = (struct btrfs_header *)buffer; | ||
508 | |||
509 | if (logical != le64_to_cpu(h->bytenr)) | ||
510 | ++fail; | ||
511 | |||
512 | if (spag->generation != le64_to_cpu(h->generation)) | ||
513 | ++fail; | ||
514 | |||
515 | if (memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) | ||
516 | ++fail; | ||
517 | |||
518 | if (memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, | ||
519 | BTRFS_UUID_SIZE)) | ||
520 | ++fail; | ||
521 | |||
522 | crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, | ||
523 | PAGE_SIZE - BTRFS_CSUM_SIZE); | ||
524 | btrfs_csum_final(crc, csum); | ||
525 | if (memcmp(csum, h->csum, sdev->csum_size)) | ||
526 | ++crc_fail; | ||
527 | |||
528 | spin_lock(&sdev->stat_lock); | ||
529 | ++sdev->stat.tree_extents_scrubbed; | ||
530 | sdev->stat.tree_bytes_scrubbed += PAGE_SIZE; | ||
531 | if (crc_fail) | ||
532 | ++sdev->stat.csum_errors; | ||
533 | if (fail) | ||
534 | ++sdev->stat.verify_errors; | ||
535 | spin_unlock(&sdev->stat_lock); | ||
536 | |||
537 | return fail || crc_fail; | ||
538 | } | ||
539 | |||
540 | static int scrub_checksum_super(struct scrub_bio *sbio, void *buffer) | ||
541 | { | ||
542 | struct btrfs_super_block *s; | ||
543 | u64 logical; | ||
544 | struct scrub_dev *sdev = sbio->sdev; | ||
545 | struct btrfs_root *root = sdev->dev->dev_root; | ||
546 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
547 | u8 csum[BTRFS_CSUM_SIZE]; | ||
548 | u32 crc = ~(u32)0; | ||
549 | int fail = 0; | ||
550 | |||
551 | s = (struct btrfs_super_block *)buffer; | ||
552 | logical = sbio->logical; | ||
553 | |||
554 | if (logical != le64_to_cpu(s->bytenr)) | ||
555 | ++fail; | ||
556 | |||
557 | if (sbio->spag[0].generation != le64_to_cpu(s->generation)) | ||
558 | ++fail; | ||
559 | |||
560 | if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) | ||
561 | ++fail; | ||
562 | |||
563 | crc = btrfs_csum_data(root, buffer + BTRFS_CSUM_SIZE, crc, | ||
564 | PAGE_SIZE - BTRFS_CSUM_SIZE); | ||
565 | btrfs_csum_final(crc, csum); | ||
566 | if (memcmp(csum, s->csum, sbio->sdev->csum_size)) | ||
567 | ++fail; | ||
568 | |||
569 | if (fail) { | ||
570 | /* | ||
571 | * if we find an error in a super block, we just report it. | ||
572 | * They will get written with the next transaction commit | ||
573 | * anyway | ||
574 | */ | ||
575 | spin_lock(&sdev->stat_lock); | ||
576 | ++sdev->stat.super_errors; | ||
577 | spin_unlock(&sdev->stat_lock); | ||
578 | } | ||
579 | |||
580 | return fail; | ||
581 | } | ||
582 | |||
583 | static int scrub_submit(struct scrub_dev *sdev) | ||
584 | { | ||
585 | struct scrub_bio *sbio; | ||
586 | |||
587 | if (sdev->curr == -1) | ||
588 | return 0; | ||
589 | |||
590 | sbio = sdev->bios[sdev->curr]; | ||
591 | |||
592 | sbio->bio->bi_sector = sbio->physical >> 9; | ||
593 | sbio->bio->bi_size = sbio->count * PAGE_SIZE; | ||
594 | sbio->bio->bi_next = NULL; | ||
595 | sbio->bio->bi_flags |= 1 << BIO_UPTODATE; | ||
596 | sbio->bio->bi_comp_cpu = -1; | ||
597 | sbio->bio->bi_bdev = sdev->dev->bdev; | ||
598 | sbio->err = 0; | ||
599 | sdev->curr = -1; | ||
600 | atomic_inc(&sdev->in_flight); | ||
601 | |||
602 | submit_bio(0, sbio->bio); | ||
603 | |||
604 | return 0; | ||
605 | } | ||
606 | |||
607 | static int scrub_page(struct scrub_dev *sdev, u64 logical, u64 len, | ||
608 | u64 physical, u64 flags, u64 gen, u64 mirror_num, | ||
609 | u8 *csum, int force) | ||
610 | { | ||
611 | struct scrub_bio *sbio; | ||
612 | |||
613 | again: | ||
614 | /* | ||
615 | * grab a fresh bio or wait for one to become available | ||
616 | */ | ||
617 | while (sdev->curr == -1) { | ||
618 | spin_lock(&sdev->list_lock); | ||
619 | sdev->curr = sdev->first_free; | ||
620 | if (sdev->curr != -1) { | ||
621 | sdev->first_free = sdev->bios[sdev->curr]->next_free; | ||
622 | sdev->bios[sdev->curr]->next_free = -1; | ||
623 | sdev->bios[sdev->curr]->count = 0; | ||
624 | spin_unlock(&sdev->list_lock); | ||
625 | } else { | ||
626 | spin_unlock(&sdev->list_lock); | ||
627 | wait_event(sdev->list_wait, sdev->first_free != -1); | ||
628 | } | ||
629 | } | ||
630 | sbio = sdev->bios[sdev->curr]; | ||
631 | if (sbio->count == 0) { | ||
632 | sbio->physical = physical; | ||
633 | sbio->logical = logical; | ||
634 | } else if (sbio->physical + sbio->count * PAGE_SIZE != physical || | ||
635 | sbio->logical + sbio->count * PAGE_SIZE != logical) { | ||
636 | scrub_submit(sdev); | ||
637 | goto again; | ||
638 | } | ||
639 | sbio->spag[sbio->count].flags = flags; | ||
640 | sbio->spag[sbio->count].generation = gen; | ||
641 | sbio->spag[sbio->count].have_csum = 0; | ||
642 | sbio->spag[sbio->count].mirror_num = mirror_num; | ||
643 | if (csum) { | ||
644 | sbio->spag[sbio->count].have_csum = 1; | ||
645 | memcpy(sbio->spag[sbio->count].csum, csum, sdev->csum_size); | ||
646 | } | ||
647 | ++sbio->count; | ||
648 | if (sbio->count == SCRUB_PAGES_PER_BIO || force) | ||
649 | scrub_submit(sdev); | ||
650 | |||
651 | return 0; | ||
652 | } | ||
653 | |||
654 | static int scrub_find_csum(struct scrub_dev *sdev, u64 logical, u64 len, | ||
655 | u8 *csum) | ||
656 | { | ||
657 | struct btrfs_ordered_sum *sum = NULL; | ||
658 | int ret = 0; | ||
659 | unsigned long i; | ||
660 | unsigned long num_sectors; | ||
661 | u32 sectorsize = sdev->dev->dev_root->sectorsize; | ||
662 | |||
663 | while (!list_empty(&sdev->csum_list)) { | ||
664 | sum = list_first_entry(&sdev->csum_list, | ||
665 | struct btrfs_ordered_sum, list); | ||
666 | if (sum->bytenr > logical) | ||
667 | return 0; | ||
668 | if (sum->bytenr + sum->len > logical) | ||
669 | break; | ||
670 | |||
671 | ++sdev->stat.csum_discards; | ||
672 | list_del(&sum->list); | ||
673 | kfree(sum); | ||
674 | sum = NULL; | ||
675 | } | ||
676 | if (!sum) | ||
677 | return 0; | ||
678 | |||
679 | num_sectors = sum->len / sectorsize; | ||
680 | for (i = 0; i < num_sectors; ++i) { | ||
681 | if (sum->sums[i].bytenr == logical) { | ||
682 | memcpy(csum, &sum->sums[i].sum, sdev->csum_size); | ||
683 | ret = 1; | ||
684 | break; | ||
685 | } | ||
686 | } | ||
687 | if (ret && i == num_sectors - 1) { | ||
688 | list_del(&sum->list); | ||
689 | kfree(sum); | ||
690 | } | ||
691 | return ret; | ||
692 | } | ||
693 | |||
694 | /* scrub extent tries to collect up to 64 kB for each bio */ | ||
695 | static int scrub_extent(struct scrub_dev *sdev, u64 logical, u64 len, | ||
696 | u64 physical, u64 flags, u64 gen, u64 mirror_num) | ||
697 | { | ||
698 | int ret; | ||
699 | u8 csum[BTRFS_CSUM_SIZE]; | ||
700 | |||
701 | while (len) { | ||
702 | u64 l = min_t(u64, len, PAGE_SIZE); | ||
703 | int have_csum = 0; | ||
704 | |||
705 | if (flags & BTRFS_EXTENT_FLAG_DATA) { | ||
706 | /* push csums to sbio */ | ||
707 | have_csum = scrub_find_csum(sdev, logical, l, csum); | ||
708 | if (have_csum == 0) | ||
709 | ++sdev->stat.no_csum; | ||
710 | } | ||
711 | ret = scrub_page(sdev, logical, l, physical, flags, gen, | ||
712 | mirror_num, have_csum ? csum : NULL, 0); | ||
713 | if (ret) | ||
714 | return ret; | ||
715 | len -= l; | ||
716 | logical += l; | ||
717 | physical += l; | ||
718 | } | ||
719 | return 0; | ||
720 | } | ||
721 | |||
722 | static noinline_for_stack int scrub_stripe(struct scrub_dev *sdev, | ||
723 | struct map_lookup *map, int num, u64 base, u64 length) | ||
724 | { | ||
725 | struct btrfs_path *path; | ||
726 | struct btrfs_fs_info *fs_info = sdev->dev->dev_root->fs_info; | ||
727 | struct btrfs_root *root = fs_info->extent_root; | ||
728 | struct btrfs_root *csum_root = fs_info->csum_root; | ||
729 | struct btrfs_extent_item *extent; | ||
730 | u64 flags; | ||
731 | int ret; | ||
732 | int slot; | ||
733 | int i; | ||
734 | u64 nstripes; | ||
735 | int start_stripe; | ||
736 | struct extent_buffer *l; | ||
737 | struct btrfs_key key; | ||
738 | u64 physical; | ||
739 | u64 logical; | ||
740 | u64 generation; | ||
741 | u64 mirror_num; | ||
742 | |||
743 | u64 increment = map->stripe_len; | ||
744 | u64 offset; | ||
745 | |||
746 | nstripes = length; | ||
747 | offset = 0; | ||
748 | do_div(nstripes, map->stripe_len); | ||
749 | if (map->type & BTRFS_BLOCK_GROUP_RAID0) { | ||
750 | offset = map->stripe_len * num; | ||
751 | increment = map->stripe_len * map->num_stripes; | ||
752 | mirror_num = 0; | ||
753 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID10) { | ||
754 | int factor = map->num_stripes / map->sub_stripes; | ||
755 | offset = map->stripe_len * (num / map->sub_stripes); | ||
756 | increment = map->stripe_len * factor; | ||
757 | mirror_num = num % map->sub_stripes; | ||
758 | } else if (map->type & BTRFS_BLOCK_GROUP_RAID1) { | ||
759 | increment = map->stripe_len; | ||
760 | mirror_num = num % map->num_stripes; | ||
761 | } else if (map->type & BTRFS_BLOCK_GROUP_DUP) { | ||
762 | increment = map->stripe_len; | ||
763 | mirror_num = num % map->num_stripes; | ||
764 | } else { | ||
765 | increment = map->stripe_len; | ||
766 | mirror_num = 0; | ||
767 | } | ||
768 | |||
769 | path = btrfs_alloc_path(); | ||
770 | if (!path) | ||
771 | return -ENOMEM; | ||
772 | |||
773 | path->reada = 2; | ||
774 | path->search_commit_root = 1; | ||
775 | path->skip_locking = 1; | ||
776 | |||
777 | /* | ||
778 | * find all extents for each stripe and just read them to get | ||
779 | * them into the page cache | ||
780 | * FIXME: we can do better. build a more intelligent prefetching | ||
781 | */ | ||
782 | logical = base + offset; | ||
783 | physical = map->stripes[num].physical; | ||
784 | ret = 0; | ||
785 | for (i = 0; i < nstripes; ++i) { | ||
786 | key.objectid = logical; | ||
787 | key.type = BTRFS_EXTENT_ITEM_KEY; | ||
788 | key.offset = (u64)0; | ||
789 | |||
790 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
791 | if (ret < 0) | ||
792 | goto out; | ||
793 | |||
794 | l = path->nodes[0]; | ||
795 | slot = path->slots[0]; | ||
796 | btrfs_item_key_to_cpu(l, &key, slot); | ||
797 | if (key.objectid != logical) { | ||
798 | ret = btrfs_previous_item(root, path, 0, | ||
799 | BTRFS_EXTENT_ITEM_KEY); | ||
800 | if (ret < 0) | ||
801 | goto out; | ||
802 | } | ||
803 | |||
804 | while (1) { | ||
805 | l = path->nodes[0]; | ||
806 | slot = path->slots[0]; | ||
807 | if (slot >= btrfs_header_nritems(l)) { | ||
808 | ret = btrfs_next_leaf(root, path); | ||
809 | if (ret == 0) | ||
810 | continue; | ||
811 | if (ret < 0) | ||
812 | goto out; | ||
813 | |||
814 | break; | ||
815 | } | ||
816 | btrfs_item_key_to_cpu(l, &key, slot); | ||
817 | |||
818 | if (key.objectid >= logical + map->stripe_len) | ||
819 | break; | ||
820 | |||
821 | path->slots[0]++; | ||
822 | } | ||
823 | btrfs_release_path(path); | ||
824 | logical += increment; | ||
825 | physical += map->stripe_len; | ||
826 | cond_resched(); | ||
827 | } | ||
828 | |||
829 | /* | ||
830 | * collect all data csums for the stripe to avoid seeking during | ||
831 | * the scrub. This might currently (crc32) end up to be about 1MB | ||
832 | */ | ||
833 | start_stripe = 0; | ||
834 | again: | ||
835 | logical = base + offset + start_stripe * increment; | ||
836 | for (i = start_stripe; i < nstripes; ++i) { | ||
837 | ret = btrfs_lookup_csums_range(csum_root, logical, | ||
838 | logical + map->stripe_len - 1, | ||
839 | &sdev->csum_list, 1); | ||
840 | if (ret) | ||
841 | goto out; | ||
842 | |||
843 | logical += increment; | ||
844 | cond_resched(); | ||
845 | } | ||
846 | /* | ||
847 | * now find all extents for each stripe and scrub them | ||
848 | */ | ||
849 | logical = base + offset + start_stripe * increment; | ||
850 | physical = map->stripes[num].physical + start_stripe * map->stripe_len; | ||
851 | ret = 0; | ||
852 | for (i = start_stripe; i < nstripes; ++i) { | ||
853 | /* | ||
854 | * canceled? | ||
855 | */ | ||
856 | if (atomic_read(&fs_info->scrub_cancel_req) || | ||
857 | atomic_read(&sdev->cancel_req)) { | ||
858 | ret = -ECANCELED; | ||
859 | goto out; | ||
860 | } | ||
861 | /* | ||
862 | * check to see if we have to pause | ||
863 | */ | ||
864 | if (atomic_read(&fs_info->scrub_pause_req)) { | ||
865 | /* push queued extents */ | ||
866 | scrub_submit(sdev); | ||
867 | wait_event(sdev->list_wait, | ||
868 | atomic_read(&sdev->in_flight) == 0); | ||
869 | atomic_inc(&fs_info->scrubs_paused); | ||
870 | wake_up(&fs_info->scrub_pause_wait); | ||
871 | mutex_lock(&fs_info->scrub_lock); | ||
872 | while (atomic_read(&fs_info->scrub_pause_req)) { | ||
873 | mutex_unlock(&fs_info->scrub_lock); | ||
874 | wait_event(fs_info->scrub_pause_wait, | ||
875 | atomic_read(&fs_info->scrub_pause_req) == 0); | ||
876 | mutex_lock(&fs_info->scrub_lock); | ||
877 | } | ||
878 | atomic_dec(&fs_info->scrubs_paused); | ||
879 | mutex_unlock(&fs_info->scrub_lock); | ||
880 | wake_up(&fs_info->scrub_pause_wait); | ||
881 | scrub_free_csums(sdev); | ||
882 | start_stripe = i; | ||
883 | goto again; | ||
884 | } | ||
885 | |||
886 | key.objectid = logical; | ||
887 | key.type = BTRFS_EXTENT_ITEM_KEY; | ||
888 | key.offset = (u64)0; | ||
889 | |||
890 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
891 | if (ret < 0) | ||
892 | goto out; | ||
893 | |||
894 | l = path->nodes[0]; | ||
895 | slot = path->slots[0]; | ||
896 | btrfs_item_key_to_cpu(l, &key, slot); | ||
897 | if (key.objectid != logical) { | ||
898 | ret = btrfs_previous_item(root, path, 0, | ||
899 | BTRFS_EXTENT_ITEM_KEY); | ||
900 | if (ret < 0) | ||
901 | goto out; | ||
902 | } | ||
903 | |||
904 | while (1) { | ||
905 | l = path->nodes[0]; | ||
906 | slot = path->slots[0]; | ||
907 | if (slot >= btrfs_header_nritems(l)) { | ||
908 | ret = btrfs_next_leaf(root, path); | ||
909 | if (ret == 0) | ||
910 | continue; | ||
911 | if (ret < 0) | ||
912 | goto out; | ||
913 | |||
914 | break; | ||
915 | } | ||
916 | btrfs_item_key_to_cpu(l, &key, slot); | ||
917 | |||
918 | if (key.objectid + key.offset <= logical) | ||
919 | goto next; | ||
920 | |||
921 | if (key.objectid >= logical + map->stripe_len) | ||
922 | break; | ||
923 | |||
924 | if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY) | ||
925 | goto next; | ||
926 | |||
927 | extent = btrfs_item_ptr(l, slot, | ||
928 | struct btrfs_extent_item); | ||
929 | flags = btrfs_extent_flags(l, extent); | ||
930 | generation = btrfs_extent_generation(l, extent); | ||
931 | |||
932 | if (key.objectid < logical && | ||
933 | (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK)) { | ||
934 | printk(KERN_ERR | ||
935 | "btrfs scrub: tree block %llu spanning " | ||
936 | "stripes, ignored. logical=%llu\n", | ||
937 | (unsigned long long)key.objectid, | ||
938 | (unsigned long long)logical); | ||
939 | goto next; | ||
940 | } | ||
941 | |||
942 | /* | ||
943 | * trim extent to this stripe | ||
944 | */ | ||
945 | if (key.objectid < logical) { | ||
946 | key.offset -= logical - key.objectid; | ||
947 | key.objectid = logical; | ||
948 | } | ||
949 | if (key.objectid + key.offset > | ||
950 | logical + map->stripe_len) { | ||
951 | key.offset = logical + map->stripe_len - | ||
952 | key.objectid; | ||
953 | } | ||
954 | |||
955 | ret = scrub_extent(sdev, key.objectid, key.offset, | ||
956 | key.objectid - logical + physical, | ||
957 | flags, generation, mirror_num); | ||
958 | if (ret) | ||
959 | goto out; | ||
960 | |||
961 | next: | ||
962 | path->slots[0]++; | ||
963 | } | ||
964 | btrfs_release_path(path); | ||
965 | logical += increment; | ||
966 | physical += map->stripe_len; | ||
967 | spin_lock(&sdev->stat_lock); | ||
968 | sdev->stat.last_physical = physical; | ||
969 | spin_unlock(&sdev->stat_lock); | ||
970 | } | ||
971 | /* push queued extents */ | ||
972 | scrub_submit(sdev); | ||
973 | |||
974 | out: | ||
975 | btrfs_free_path(path); | ||
976 | return ret < 0 ? ret : 0; | ||
977 | } | ||
978 | |||
979 | static noinline_for_stack int scrub_chunk(struct scrub_dev *sdev, | ||
980 | u64 chunk_tree, u64 chunk_objectid, u64 chunk_offset, u64 length) | ||
981 | { | ||
982 | struct btrfs_mapping_tree *map_tree = | ||
983 | &sdev->dev->dev_root->fs_info->mapping_tree; | ||
984 | struct map_lookup *map; | ||
985 | struct extent_map *em; | ||
986 | int i; | ||
987 | int ret = -EINVAL; | ||
988 | |||
989 | read_lock(&map_tree->map_tree.lock); | ||
990 | em = lookup_extent_mapping(&map_tree->map_tree, chunk_offset, 1); | ||
991 | read_unlock(&map_tree->map_tree.lock); | ||
992 | |||
993 | if (!em) | ||
994 | return -EINVAL; | ||
995 | |||
996 | map = (struct map_lookup *)em->bdev; | ||
997 | if (em->start != chunk_offset) | ||
998 | goto out; | ||
999 | |||
1000 | if (em->len < length) | ||
1001 | goto out; | ||
1002 | |||
1003 | for (i = 0; i < map->num_stripes; ++i) { | ||
1004 | if (map->stripes[i].dev == sdev->dev) { | ||
1005 | ret = scrub_stripe(sdev, map, i, chunk_offset, length); | ||
1006 | if (ret) | ||
1007 | goto out; | ||
1008 | } | ||
1009 | } | ||
1010 | out: | ||
1011 | free_extent_map(em); | ||
1012 | |||
1013 | return ret; | ||
1014 | } | ||
1015 | |||
1016 | static noinline_for_stack | ||
1017 | int scrub_enumerate_chunks(struct scrub_dev *sdev, u64 start, u64 end) | ||
1018 | { | ||
1019 | struct btrfs_dev_extent *dev_extent = NULL; | ||
1020 | struct btrfs_path *path; | ||
1021 | struct btrfs_root *root = sdev->dev->dev_root; | ||
1022 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
1023 | u64 length; | ||
1024 | u64 chunk_tree; | ||
1025 | u64 chunk_objectid; | ||
1026 | u64 chunk_offset; | ||
1027 | int ret; | ||
1028 | int slot; | ||
1029 | struct extent_buffer *l; | ||
1030 | struct btrfs_key key; | ||
1031 | struct btrfs_key found_key; | ||
1032 | struct btrfs_block_group_cache *cache; | ||
1033 | |||
1034 | path = btrfs_alloc_path(); | ||
1035 | if (!path) | ||
1036 | return -ENOMEM; | ||
1037 | |||
1038 | path->reada = 2; | ||
1039 | path->search_commit_root = 1; | ||
1040 | path->skip_locking = 1; | ||
1041 | |||
1042 | key.objectid = sdev->dev->devid; | ||
1043 | key.offset = 0ull; | ||
1044 | key.type = BTRFS_DEV_EXTENT_KEY; | ||
1045 | |||
1046 | |||
1047 | while (1) { | ||
1048 | ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); | ||
1049 | if (ret < 0) | ||
1050 | goto out; | ||
1051 | ret = 0; | ||
1052 | |||
1053 | l = path->nodes[0]; | ||
1054 | slot = path->slots[0]; | ||
1055 | |||
1056 | btrfs_item_key_to_cpu(l, &found_key, slot); | ||
1057 | |||
1058 | if (found_key.objectid != sdev->dev->devid) | ||
1059 | break; | ||
1060 | |||
1061 | if (btrfs_key_type(&key) != BTRFS_DEV_EXTENT_KEY) | ||
1062 | break; | ||
1063 | |||
1064 | if (found_key.offset >= end) | ||
1065 | break; | ||
1066 | |||
1067 | if (found_key.offset < key.offset) | ||
1068 | break; | ||
1069 | |||
1070 | dev_extent = btrfs_item_ptr(l, slot, struct btrfs_dev_extent); | ||
1071 | length = btrfs_dev_extent_length(l, dev_extent); | ||
1072 | |||
1073 | if (found_key.offset + length <= start) { | ||
1074 | key.offset = found_key.offset + length; | ||
1075 | btrfs_release_path(path); | ||
1076 | continue; | ||
1077 | } | ||
1078 | |||
1079 | chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); | ||
1080 | chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); | ||
1081 | chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); | ||
1082 | |||
1083 | /* | ||
1084 | * get a reference on the corresponding block group to prevent | ||
1085 | * the chunk from going away while we scrub it | ||
1086 | */ | ||
1087 | cache = btrfs_lookup_block_group(fs_info, chunk_offset); | ||
1088 | if (!cache) { | ||
1089 | ret = -ENOENT; | ||
1090 | goto out; | ||
1091 | } | ||
1092 | ret = scrub_chunk(sdev, chunk_tree, chunk_objectid, | ||
1093 | chunk_offset, length); | ||
1094 | btrfs_put_block_group(cache); | ||
1095 | if (ret) | ||
1096 | break; | ||
1097 | |||
1098 | key.offset = found_key.offset + length; | ||
1099 | btrfs_release_path(path); | ||
1100 | } | ||
1101 | |||
1102 | out: | ||
1103 | btrfs_free_path(path); | ||
1104 | return ret; | ||
1105 | } | ||
1106 | |||
1107 | static noinline_for_stack int scrub_supers(struct scrub_dev *sdev) | ||
1108 | { | ||
1109 | int i; | ||
1110 | u64 bytenr; | ||
1111 | u64 gen; | ||
1112 | int ret; | ||
1113 | struct btrfs_device *device = sdev->dev; | ||
1114 | struct btrfs_root *root = device->dev_root; | ||
1115 | |||
1116 | gen = root->fs_info->last_trans_committed; | ||
1117 | |||
1118 | for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) { | ||
1119 | bytenr = btrfs_sb_offset(i); | ||
1120 | if (bytenr + BTRFS_SUPER_INFO_SIZE >= device->total_bytes) | ||
1121 | break; | ||
1122 | |||
1123 | ret = scrub_page(sdev, bytenr, PAGE_SIZE, bytenr, | ||
1124 | BTRFS_EXTENT_FLAG_SUPER, gen, i, NULL, 1); | ||
1125 | if (ret) | ||
1126 | return ret; | ||
1127 | } | ||
1128 | wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); | ||
1129 | |||
1130 | return 0; | ||
1131 | } | ||
1132 | |||
1133 | /* | ||
1134 | * get a reference count on fs_info->scrub_workers. start worker if necessary | ||
1135 | */ | ||
1136 | static noinline_for_stack int scrub_workers_get(struct btrfs_root *root) | ||
1137 | { | ||
1138 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
1139 | |||
1140 | mutex_lock(&fs_info->scrub_lock); | ||
1141 | if (fs_info->scrub_workers_refcnt == 0) | ||
1142 | btrfs_start_workers(&fs_info->scrub_workers, 1); | ||
1143 | ++fs_info->scrub_workers_refcnt; | ||
1144 | mutex_unlock(&fs_info->scrub_lock); | ||
1145 | |||
1146 | return 0; | ||
1147 | } | ||
1148 | |||
1149 | static noinline_for_stack void scrub_workers_put(struct btrfs_root *root) | ||
1150 | { | ||
1151 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
1152 | |||
1153 | mutex_lock(&fs_info->scrub_lock); | ||
1154 | if (--fs_info->scrub_workers_refcnt == 0) | ||
1155 | btrfs_stop_workers(&fs_info->scrub_workers); | ||
1156 | WARN_ON(fs_info->scrub_workers_refcnt < 0); | ||
1157 | mutex_unlock(&fs_info->scrub_lock); | ||
1158 | } | ||
1159 | |||
1160 | |||
1161 | int btrfs_scrub_dev(struct btrfs_root *root, u64 devid, u64 start, u64 end, | ||
1162 | struct btrfs_scrub_progress *progress, int readonly) | ||
1163 | { | ||
1164 | struct scrub_dev *sdev; | ||
1165 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
1166 | int ret; | ||
1167 | struct btrfs_device *dev; | ||
1168 | |||
1169 | if (root->fs_info->closing) | ||
1170 | return -EINVAL; | ||
1171 | |||
1172 | /* | ||
1173 | * check some assumptions | ||
1174 | */ | ||
1175 | if (root->sectorsize != PAGE_SIZE || | ||
1176 | root->sectorsize != root->leafsize || | ||
1177 | root->sectorsize != root->nodesize) { | ||
1178 | printk(KERN_ERR "btrfs_scrub: size assumptions fail\n"); | ||
1179 | return -EINVAL; | ||
1180 | } | ||
1181 | |||
1182 | ret = scrub_workers_get(root); | ||
1183 | if (ret) | ||
1184 | return ret; | ||
1185 | |||
1186 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | ||
1187 | dev = btrfs_find_device(root, devid, NULL, NULL); | ||
1188 | if (!dev || dev->missing) { | ||
1189 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | ||
1190 | scrub_workers_put(root); | ||
1191 | return -ENODEV; | ||
1192 | } | ||
1193 | mutex_lock(&fs_info->scrub_lock); | ||
1194 | |||
1195 | if (!dev->in_fs_metadata) { | ||
1196 | mutex_unlock(&fs_info->scrub_lock); | ||
1197 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | ||
1198 | scrub_workers_put(root); | ||
1199 | return -ENODEV; | ||
1200 | } | ||
1201 | |||
1202 | if (dev->scrub_device) { | ||
1203 | mutex_unlock(&fs_info->scrub_lock); | ||
1204 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | ||
1205 | scrub_workers_put(root); | ||
1206 | return -EINPROGRESS; | ||
1207 | } | ||
1208 | sdev = scrub_setup_dev(dev); | ||
1209 | if (IS_ERR(sdev)) { | ||
1210 | mutex_unlock(&fs_info->scrub_lock); | ||
1211 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | ||
1212 | scrub_workers_put(root); | ||
1213 | return PTR_ERR(sdev); | ||
1214 | } | ||
1215 | sdev->readonly = readonly; | ||
1216 | dev->scrub_device = sdev; | ||
1217 | |||
1218 | atomic_inc(&fs_info->scrubs_running); | ||
1219 | mutex_unlock(&fs_info->scrub_lock); | ||
1220 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | ||
1221 | |||
1222 | down_read(&fs_info->scrub_super_lock); | ||
1223 | ret = scrub_supers(sdev); | ||
1224 | up_read(&fs_info->scrub_super_lock); | ||
1225 | |||
1226 | if (!ret) | ||
1227 | ret = scrub_enumerate_chunks(sdev, start, end); | ||
1228 | |||
1229 | wait_event(sdev->list_wait, atomic_read(&sdev->in_flight) == 0); | ||
1230 | |||
1231 | atomic_dec(&fs_info->scrubs_running); | ||
1232 | wake_up(&fs_info->scrub_pause_wait); | ||
1233 | |||
1234 | if (progress) | ||
1235 | memcpy(progress, &sdev->stat, sizeof(*progress)); | ||
1236 | |||
1237 | mutex_lock(&fs_info->scrub_lock); | ||
1238 | dev->scrub_device = NULL; | ||
1239 | mutex_unlock(&fs_info->scrub_lock); | ||
1240 | |||
1241 | scrub_free_dev(sdev); | ||
1242 | scrub_workers_put(root); | ||
1243 | |||
1244 | return ret; | ||
1245 | } | ||
1246 | |||
1247 | int btrfs_scrub_pause(struct btrfs_root *root) | ||
1248 | { | ||
1249 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
1250 | |||
1251 | mutex_lock(&fs_info->scrub_lock); | ||
1252 | atomic_inc(&fs_info->scrub_pause_req); | ||
1253 | while (atomic_read(&fs_info->scrubs_paused) != | ||
1254 | atomic_read(&fs_info->scrubs_running)) { | ||
1255 | mutex_unlock(&fs_info->scrub_lock); | ||
1256 | wait_event(fs_info->scrub_pause_wait, | ||
1257 | atomic_read(&fs_info->scrubs_paused) == | ||
1258 | atomic_read(&fs_info->scrubs_running)); | ||
1259 | mutex_lock(&fs_info->scrub_lock); | ||
1260 | } | ||
1261 | mutex_unlock(&fs_info->scrub_lock); | ||
1262 | |||
1263 | return 0; | ||
1264 | } | ||
1265 | |||
1266 | int btrfs_scrub_continue(struct btrfs_root *root) | ||
1267 | { | ||
1268 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
1269 | |||
1270 | atomic_dec(&fs_info->scrub_pause_req); | ||
1271 | wake_up(&fs_info->scrub_pause_wait); | ||
1272 | return 0; | ||
1273 | } | ||
1274 | |||
1275 | int btrfs_scrub_pause_super(struct btrfs_root *root) | ||
1276 | { | ||
1277 | down_write(&root->fs_info->scrub_super_lock); | ||
1278 | return 0; | ||
1279 | } | ||
1280 | |||
1281 | int btrfs_scrub_continue_super(struct btrfs_root *root) | ||
1282 | { | ||
1283 | up_write(&root->fs_info->scrub_super_lock); | ||
1284 | return 0; | ||
1285 | } | ||
1286 | |||
1287 | int btrfs_scrub_cancel(struct btrfs_root *root) | ||
1288 | { | ||
1289 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
1290 | |||
1291 | mutex_lock(&fs_info->scrub_lock); | ||
1292 | if (!atomic_read(&fs_info->scrubs_running)) { | ||
1293 | mutex_unlock(&fs_info->scrub_lock); | ||
1294 | return -ENOTCONN; | ||
1295 | } | ||
1296 | |||
1297 | atomic_inc(&fs_info->scrub_cancel_req); | ||
1298 | while (atomic_read(&fs_info->scrubs_running)) { | ||
1299 | mutex_unlock(&fs_info->scrub_lock); | ||
1300 | wait_event(fs_info->scrub_pause_wait, | ||
1301 | atomic_read(&fs_info->scrubs_running) == 0); | ||
1302 | mutex_lock(&fs_info->scrub_lock); | ||
1303 | } | ||
1304 | atomic_dec(&fs_info->scrub_cancel_req); | ||
1305 | mutex_unlock(&fs_info->scrub_lock); | ||
1306 | |||
1307 | return 0; | ||
1308 | } | ||
1309 | |||
1310 | int btrfs_scrub_cancel_dev(struct btrfs_root *root, struct btrfs_device *dev) | ||
1311 | { | ||
1312 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
1313 | struct scrub_dev *sdev; | ||
1314 | |||
1315 | mutex_lock(&fs_info->scrub_lock); | ||
1316 | sdev = dev->scrub_device; | ||
1317 | if (!sdev) { | ||
1318 | mutex_unlock(&fs_info->scrub_lock); | ||
1319 | return -ENOTCONN; | ||
1320 | } | ||
1321 | atomic_inc(&sdev->cancel_req); | ||
1322 | while (dev->scrub_device) { | ||
1323 | mutex_unlock(&fs_info->scrub_lock); | ||
1324 | wait_event(fs_info->scrub_pause_wait, | ||
1325 | dev->scrub_device == NULL); | ||
1326 | mutex_lock(&fs_info->scrub_lock); | ||
1327 | } | ||
1328 | mutex_unlock(&fs_info->scrub_lock); | ||
1329 | |||
1330 | return 0; | ||
1331 | } | ||
1332 | int btrfs_scrub_cancel_devid(struct btrfs_root *root, u64 devid) | ||
1333 | { | ||
1334 | struct btrfs_fs_info *fs_info = root->fs_info; | ||
1335 | struct btrfs_device *dev; | ||
1336 | int ret; | ||
1337 | |||
1338 | /* | ||
1339 | * we have to hold the device_list_mutex here so the device | ||
1340 | * does not go away in cancel_dev. FIXME: find a better solution | ||
1341 | */ | ||
1342 | mutex_lock(&fs_info->fs_devices->device_list_mutex); | ||
1343 | dev = btrfs_find_device(root, devid, NULL, NULL); | ||
1344 | if (!dev) { | ||
1345 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | ||
1346 | return -ENODEV; | ||
1347 | } | ||
1348 | ret = btrfs_scrub_cancel_dev(root, dev); | ||
1349 | mutex_unlock(&fs_info->fs_devices->device_list_mutex); | ||
1350 | |||
1351 | return ret; | ||
1352 | } | ||
1353 | |||
1354 | int btrfs_scrub_progress(struct btrfs_root *root, u64 devid, | ||
1355 | struct btrfs_scrub_progress *progress) | ||
1356 | { | ||
1357 | struct btrfs_device *dev; | ||
1358 | struct scrub_dev *sdev = NULL; | ||
1359 | |||
1360 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | ||
1361 | dev = btrfs_find_device(root, devid, NULL, NULL); | ||
1362 | if (dev) | ||
1363 | sdev = dev->scrub_device; | ||
1364 | if (sdev) | ||
1365 | memcpy(progress, &sdev->stat, sizeof(*progress)); | ||
1366 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | ||
1367 | |||
1368 | return dev ? (sdev ? 0 : -ENOTCONN) : -ENODEV; | ||
1369 | } | ||
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c index 0ac712efcdf2..9b2e7e5bc3ef 100644 --- a/fs/btrfs/super.c +++ b/fs/btrfs/super.c | |||
@@ -39,7 +39,9 @@ | |||
39 | #include <linux/miscdevice.h> | 39 | #include <linux/miscdevice.h> |
40 | #include <linux/magic.h> | 40 | #include <linux/magic.h> |
41 | #include <linux/slab.h> | 41 | #include <linux/slab.h> |
42 | #include <linux/cleancache.h> | ||
42 | #include "compat.h" | 43 | #include "compat.h" |
44 | #include "delayed-inode.h" | ||
43 | #include "ctree.h" | 45 | #include "ctree.h" |
44 | #include "disk-io.h" | 46 | #include "disk-io.h" |
45 | #include "transaction.h" | 47 | #include "transaction.h" |
@@ -159,7 +161,7 @@ enum { | |||
159 | Opt_compress_type, Opt_compress_force, Opt_compress_force_type, | 161 | Opt_compress_type, Opt_compress_force, Opt_compress_force_type, |
160 | Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, | 162 | Opt_notreelog, Opt_ratio, Opt_flushoncommit, Opt_discard, |
161 | Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, | 163 | Opt_space_cache, Opt_clear_cache, Opt_user_subvol_rm_allowed, |
162 | Opt_enospc_debug, Opt_subvolrootid, Opt_err, | 164 | Opt_enospc_debug, Opt_subvolrootid, Opt_defrag, Opt_err, |
163 | }; | 165 | }; |
164 | 166 | ||
165 | static match_table_t tokens = { | 167 | static match_table_t tokens = { |
@@ -190,6 +192,7 @@ static match_table_t tokens = { | |||
190 | {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, | 192 | {Opt_user_subvol_rm_allowed, "user_subvol_rm_allowed"}, |
191 | {Opt_enospc_debug, "enospc_debug"}, | 193 | {Opt_enospc_debug, "enospc_debug"}, |
192 | {Opt_subvolrootid, "subvolrootid=%d"}, | 194 | {Opt_subvolrootid, "subvolrootid=%d"}, |
195 | {Opt_defrag, "autodefrag"}, | ||
193 | {Opt_err, NULL}, | 196 | {Opt_err, NULL}, |
194 | }; | 197 | }; |
195 | 198 | ||
@@ -368,6 +371,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options) | |||
368 | case Opt_enospc_debug: | 371 | case Opt_enospc_debug: |
369 | btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); | 372 | btrfs_set_opt(info->mount_opt, ENOSPC_DEBUG); |
370 | break; | 373 | break; |
374 | case Opt_defrag: | ||
375 | printk(KERN_INFO "btrfs: enabling auto defrag"); | ||
376 | btrfs_set_opt(info->mount_opt, AUTO_DEFRAG); | ||
377 | break; | ||
371 | case Opt_err: | 378 | case Opt_err: |
372 | printk(KERN_INFO "btrfs: unrecognized mount option " | 379 | printk(KERN_INFO "btrfs: unrecognized mount option " |
373 | "'%s'\n", p); | 380 | "'%s'\n", p); |
@@ -506,8 +513,10 @@ static struct dentry *get_default_root(struct super_block *sb, | |||
506 | */ | 513 | */ |
507 | dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); | 514 | dir_id = btrfs_super_root_dir(&root->fs_info->super_copy); |
508 | di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); | 515 | di = btrfs_lookup_dir_item(NULL, root, path, dir_id, "default", 7, 0); |
509 | if (IS_ERR(di)) | 516 | if (IS_ERR(di)) { |
517 | btrfs_free_path(path); | ||
510 | return ERR_CAST(di); | 518 | return ERR_CAST(di); |
519 | } | ||
511 | if (!di) { | 520 | if (!di) { |
512 | /* | 521 | /* |
513 | * Ok the default dir item isn't there. This is weird since | 522 | * Ok the default dir item isn't there. This is weird since |
@@ -624,6 +633,7 @@ static int btrfs_fill_super(struct super_block *sb, | |||
624 | sb->s_root = root_dentry; | 633 | sb->s_root = root_dentry; |
625 | 634 | ||
626 | save_mount_options(sb, data); | 635 | save_mount_options(sb, data); |
636 | cleancache_init_fs(sb); | ||
627 | return 0; | 637 | return 0; |
628 | 638 | ||
629 | fail_close: | 639 | fail_close: |
@@ -739,7 +749,7 @@ static int btrfs_set_super(struct super_block *s, void *data) | |||
739 | * for multiple device setup. Make sure to keep it in sync. | 749 | * for multiple device setup. Make sure to keep it in sync. |
740 | */ | 750 | */ |
741 | static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, | 751 | static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, |
742 | const char *dev_name, void *data) | 752 | const char *device_name, void *data) |
743 | { | 753 | { |
744 | struct block_device *bdev = NULL; | 754 | struct block_device *bdev = NULL; |
745 | struct super_block *s; | 755 | struct super_block *s; |
@@ -762,7 +772,7 @@ static struct dentry *btrfs_mount(struct file_system_type *fs_type, int flags, | |||
762 | if (error) | 772 | if (error) |
763 | return ERR_PTR(error); | 773 | return ERR_PTR(error); |
764 | 774 | ||
765 | error = btrfs_scan_one_device(dev_name, mode, fs_type, &fs_devices); | 775 | error = btrfs_scan_one_device(device_name, mode, fs_type, &fs_devices); |
766 | if (error) | 776 | if (error) |
767 | goto error_free_subvol_name; | 777 | goto error_free_subvol_name; |
768 | 778 | ||
@@ -913,6 +923,32 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data) | |||
913 | return 0; | 923 | return 0; |
914 | } | 924 | } |
915 | 925 | ||
926 | /* Used to sort the devices by max_avail(descending sort) */ | ||
927 | static int btrfs_cmp_device_free_bytes(const void *dev_info1, | ||
928 | const void *dev_info2) | ||
929 | { | ||
930 | if (((struct btrfs_device_info *)dev_info1)->max_avail > | ||
931 | ((struct btrfs_device_info *)dev_info2)->max_avail) | ||
932 | return -1; | ||
933 | else if (((struct btrfs_device_info *)dev_info1)->max_avail < | ||
934 | ((struct btrfs_device_info *)dev_info2)->max_avail) | ||
935 | return 1; | ||
936 | else | ||
937 | return 0; | ||
938 | } | ||
939 | |||
940 | /* | ||
941 | * sort the devices by max_avail, in which max free extent size of each device | ||
942 | * is stored.(Descending Sort) | ||
943 | */ | ||
944 | static inline void btrfs_descending_sort_devices( | ||
945 | struct btrfs_device_info *devices, | ||
946 | size_t nr_devices) | ||
947 | { | ||
948 | sort(devices, nr_devices, sizeof(struct btrfs_device_info), | ||
949 | btrfs_cmp_device_free_bytes, NULL); | ||
950 | } | ||
951 | |||
916 | /* | 952 | /* |
917 | * The helper to calc the free space on the devices that can be used to store | 953 | * The helper to calc the free space on the devices that can be used to store |
918 | * file data. | 954 | * file data. |
@@ -1206,10 +1242,14 @@ static int __init init_btrfs_fs(void) | |||
1206 | if (err) | 1242 | if (err) |
1207 | goto free_extent_io; | 1243 | goto free_extent_io; |
1208 | 1244 | ||
1209 | err = btrfs_interface_init(); | 1245 | err = btrfs_delayed_inode_init(); |
1210 | if (err) | 1246 | if (err) |
1211 | goto free_extent_map; | 1247 | goto free_extent_map; |
1212 | 1248 | ||
1249 | err = btrfs_interface_init(); | ||
1250 | if (err) | ||
1251 | goto free_delayed_inode; | ||
1252 | |||
1213 | err = register_filesystem(&btrfs_fs_type); | 1253 | err = register_filesystem(&btrfs_fs_type); |
1214 | if (err) | 1254 | if (err) |
1215 | goto unregister_ioctl; | 1255 | goto unregister_ioctl; |
@@ -1219,6 +1259,8 @@ static int __init init_btrfs_fs(void) | |||
1219 | 1259 | ||
1220 | unregister_ioctl: | 1260 | unregister_ioctl: |
1221 | btrfs_interface_exit(); | 1261 | btrfs_interface_exit(); |
1262 | free_delayed_inode: | ||
1263 | btrfs_delayed_inode_exit(); | ||
1222 | free_extent_map: | 1264 | free_extent_map: |
1223 | extent_map_exit(); | 1265 | extent_map_exit(); |
1224 | free_extent_io: | 1266 | free_extent_io: |
@@ -1235,6 +1277,7 @@ free_sysfs: | |||
1235 | static void __exit exit_btrfs_fs(void) | 1277 | static void __exit exit_btrfs_fs(void) |
1236 | { | 1278 | { |
1237 | btrfs_destroy_cachep(); | 1279 | btrfs_destroy_cachep(); |
1280 | btrfs_delayed_inode_exit(); | ||
1238 | extent_map_exit(); | 1281 | extent_map_exit(); |
1239 | extent_io_exit(); | 1282 | extent_io_exit(); |
1240 | btrfs_interface_exit(); | 1283 | btrfs_interface_exit(); |
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index 4ce16ef702a3..c3c223ae6691 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c | |||
@@ -174,86 +174,9 @@ static const struct sysfs_ops btrfs_root_attr_ops = { | |||
174 | .store = btrfs_root_attr_store, | 174 | .store = btrfs_root_attr_store, |
175 | }; | 175 | }; |
176 | 176 | ||
177 | static struct kobj_type btrfs_root_ktype = { | ||
178 | .default_attrs = btrfs_root_attrs, | ||
179 | .sysfs_ops = &btrfs_root_attr_ops, | ||
180 | .release = btrfs_root_release, | ||
181 | }; | ||
182 | |||
183 | static struct kobj_type btrfs_super_ktype = { | ||
184 | .default_attrs = btrfs_super_attrs, | ||
185 | .sysfs_ops = &btrfs_super_attr_ops, | ||
186 | .release = btrfs_super_release, | ||
187 | }; | ||
188 | |||
189 | /* /sys/fs/btrfs/ entry */ | 177 | /* /sys/fs/btrfs/ entry */ |
190 | static struct kset *btrfs_kset; | 178 | static struct kset *btrfs_kset; |
191 | 179 | ||
192 | int btrfs_sysfs_add_super(struct btrfs_fs_info *fs) | ||
193 | { | ||
194 | int error; | ||
195 | char *name; | ||
196 | char c; | ||
197 | int len = strlen(fs->sb->s_id) + 1; | ||
198 | int i; | ||
199 | |||
200 | name = kmalloc(len, GFP_NOFS); | ||
201 | if (!name) { | ||
202 | error = -ENOMEM; | ||
203 | goto fail; | ||
204 | } | ||
205 | |||
206 | for (i = 0; i < len; i++) { | ||
207 | c = fs->sb->s_id[i]; | ||
208 | if (c == '/' || c == '\\') | ||
209 | c = '!'; | ||
210 | name[i] = c; | ||
211 | } | ||
212 | name[len] = '\0'; | ||
213 | |||
214 | fs->super_kobj.kset = btrfs_kset; | ||
215 | error = kobject_init_and_add(&fs->super_kobj, &btrfs_super_ktype, | ||
216 | NULL, "%s", name); | ||
217 | kfree(name); | ||
218 | if (error) | ||
219 | goto fail; | ||
220 | |||
221 | return 0; | ||
222 | |||
223 | fail: | ||
224 | printk(KERN_ERR "btrfs: sysfs creation for super failed\n"); | ||
225 | return error; | ||
226 | } | ||
227 | |||
228 | int btrfs_sysfs_add_root(struct btrfs_root *root) | ||
229 | { | ||
230 | int error; | ||
231 | |||
232 | error = kobject_init_and_add(&root->root_kobj, &btrfs_root_ktype, | ||
233 | &root->fs_info->super_kobj, | ||
234 | "%s", root->name); | ||
235 | if (error) | ||
236 | goto fail; | ||
237 | |||
238 | return 0; | ||
239 | |||
240 | fail: | ||
241 | printk(KERN_ERR "btrfs: sysfs creation for root failed\n"); | ||
242 | return error; | ||
243 | } | ||
244 | |||
245 | void btrfs_sysfs_del_root(struct btrfs_root *root) | ||
246 | { | ||
247 | kobject_put(&root->root_kobj); | ||
248 | wait_for_completion(&root->kobj_unregister); | ||
249 | } | ||
250 | |||
251 | void btrfs_sysfs_del_super(struct btrfs_fs_info *fs) | ||
252 | { | ||
253 | kobject_put(&fs->super_kobj); | ||
254 | wait_for_completion(&fs->kobj_unregister); | ||
255 | } | ||
256 | |||
257 | int btrfs_init_sysfs(void) | 180 | int btrfs_init_sysfs(void) |
258 | { | 181 | { |
259 | btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj); | 182 | btrfs_kset = kset_create_and_add("btrfs", NULL, fs_kobj); |
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index c571734d5e5a..dc80f7156923 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include "transaction.h" | 27 | #include "transaction.h" |
28 | #include "locking.h" | 28 | #include "locking.h" |
29 | #include "tree-log.h" | 29 | #include "tree-log.h" |
30 | #include "inode-map.h" | ||
30 | 31 | ||
31 | #define BTRFS_ROOT_TRANS_TAG 0 | 32 | #define BTRFS_ROOT_TRANS_TAG 0 |
32 | 33 | ||
@@ -80,8 +81,7 @@ static noinline int join_transaction(struct btrfs_root *root) | |||
80 | INIT_LIST_HEAD(&cur_trans->pending_snapshots); | 81 | INIT_LIST_HEAD(&cur_trans->pending_snapshots); |
81 | list_add_tail(&cur_trans->list, &root->fs_info->trans_list); | 82 | list_add_tail(&cur_trans->list, &root->fs_info->trans_list); |
82 | extent_io_tree_init(&cur_trans->dirty_pages, | 83 | extent_io_tree_init(&cur_trans->dirty_pages, |
83 | root->fs_info->btree_inode->i_mapping, | 84 | root->fs_info->btree_inode->i_mapping); |
84 | GFP_NOFS); | ||
85 | spin_lock(&root->fs_info->new_trans_lock); | 85 | spin_lock(&root->fs_info->new_trans_lock); |
86 | root->fs_info->running_transaction = cur_trans; | 86 | root->fs_info->running_transaction = cur_trans; |
87 | spin_unlock(&root->fs_info->new_trans_lock); | 87 | spin_unlock(&root->fs_info->new_trans_lock); |
@@ -347,49 +347,6 @@ out_unlock: | |||
347 | return ret; | 347 | return ret; |
348 | } | 348 | } |
349 | 349 | ||
350 | #if 0 | ||
351 | /* | ||
352 | * rate limit against the drop_snapshot code. This helps to slow down new | ||
353 | * operations if the drop_snapshot code isn't able to keep up. | ||
354 | */ | ||
355 | static void throttle_on_drops(struct btrfs_root *root) | ||
356 | { | ||
357 | struct btrfs_fs_info *info = root->fs_info; | ||
358 | int harder_count = 0; | ||
359 | |||
360 | harder: | ||
361 | if (atomic_read(&info->throttles)) { | ||
362 | DEFINE_WAIT(wait); | ||
363 | int thr; | ||
364 | thr = atomic_read(&info->throttle_gen); | ||
365 | |||
366 | do { | ||
367 | prepare_to_wait(&info->transaction_throttle, | ||
368 | &wait, TASK_UNINTERRUPTIBLE); | ||
369 | if (!atomic_read(&info->throttles)) { | ||
370 | finish_wait(&info->transaction_throttle, &wait); | ||
371 | break; | ||
372 | } | ||
373 | schedule(); | ||
374 | finish_wait(&info->transaction_throttle, &wait); | ||
375 | } while (thr == atomic_read(&info->throttle_gen)); | ||
376 | harder_count++; | ||
377 | |||
378 | if (root->fs_info->total_ref_cache_size > 1 * 1024 * 1024 && | ||
379 | harder_count < 2) | ||
380 | goto harder; | ||
381 | |||
382 | if (root->fs_info->total_ref_cache_size > 5 * 1024 * 1024 && | ||
383 | harder_count < 10) | ||
384 | goto harder; | ||
385 | |||
386 | if (root->fs_info->total_ref_cache_size > 10 * 1024 * 1024 && | ||
387 | harder_count < 20) | ||
388 | goto harder; | ||
389 | } | ||
390 | } | ||
391 | #endif | ||
392 | |||
393 | void btrfs_throttle(struct btrfs_root *root) | 350 | void btrfs_throttle(struct btrfs_root *root) |
394 | { | 351 | { |
395 | mutex_lock(&root->fs_info->trans_mutex); | 352 | mutex_lock(&root->fs_info->trans_mutex); |
@@ -487,19 +444,40 @@ static int __btrfs_end_transaction(struct btrfs_trans_handle *trans, | |||
487 | int btrfs_end_transaction(struct btrfs_trans_handle *trans, | 444 | int btrfs_end_transaction(struct btrfs_trans_handle *trans, |
488 | struct btrfs_root *root) | 445 | struct btrfs_root *root) |
489 | { | 446 | { |
490 | return __btrfs_end_transaction(trans, root, 0, 1); | 447 | int ret; |
448 | |||
449 | ret = __btrfs_end_transaction(trans, root, 0, 1); | ||
450 | if (ret) | ||
451 | return ret; | ||
452 | return 0; | ||
491 | } | 453 | } |
492 | 454 | ||
493 | int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, | 455 | int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, |
494 | struct btrfs_root *root) | 456 | struct btrfs_root *root) |
495 | { | 457 | { |
496 | return __btrfs_end_transaction(trans, root, 1, 1); | 458 | int ret; |
459 | |||
460 | ret = __btrfs_end_transaction(trans, root, 1, 1); | ||
461 | if (ret) | ||
462 | return ret; | ||
463 | return 0; | ||
497 | } | 464 | } |
498 | 465 | ||
499 | int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, | 466 | int btrfs_end_transaction_nolock(struct btrfs_trans_handle *trans, |
500 | struct btrfs_root *root) | 467 | struct btrfs_root *root) |
501 | { | 468 | { |
502 | return __btrfs_end_transaction(trans, root, 0, 0); | 469 | int ret; |
470 | |||
471 | ret = __btrfs_end_transaction(trans, root, 0, 0); | ||
472 | if (ret) | ||
473 | return ret; | ||
474 | return 0; | ||
475 | } | ||
476 | |||
477 | int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, | ||
478 | struct btrfs_root *root) | ||
479 | { | ||
480 | return __btrfs_end_transaction(trans, root, 1, 1); | ||
503 | } | 481 | } |
504 | 482 | ||
505 | /* | 483 | /* |
@@ -760,8 +738,14 @@ static noinline int commit_fs_roots(struct btrfs_trans_handle *trans, | |||
760 | btrfs_update_reloc_root(trans, root); | 738 | btrfs_update_reloc_root(trans, root); |
761 | btrfs_orphan_commit_root(trans, root); | 739 | btrfs_orphan_commit_root(trans, root); |
762 | 740 | ||
741 | btrfs_save_ino_cache(root, trans); | ||
742 | |||
763 | if (root->commit_root != root->node) { | 743 | if (root->commit_root != root->node) { |
744 | mutex_lock(&root->fs_commit_mutex); | ||
764 | switch_commit_root(root); | 745 | switch_commit_root(root); |
746 | btrfs_unpin_free_ino(root); | ||
747 | mutex_unlock(&root->fs_commit_mutex); | ||
748 | |||
765 | btrfs_set_root_node(&root->root_item, | 749 | btrfs_set_root_node(&root->root_item, |
766 | root->node); | 750 | root->node); |
767 | } | 751 | } |
@@ -809,97 +793,6 @@ int btrfs_defrag_root(struct btrfs_root *root, int cacheonly) | |||
809 | return ret; | 793 | return ret; |
810 | } | 794 | } |
811 | 795 | ||
812 | #if 0 | ||
813 | /* | ||
814 | * when dropping snapshots, we generate a ton of delayed refs, and it makes | ||
815 | * sense not to join the transaction while it is trying to flush the current | ||
816 | * queue of delayed refs out. | ||
817 | * | ||
818 | * This is used by the drop snapshot code only | ||
819 | */ | ||
820 | static noinline int wait_transaction_pre_flush(struct btrfs_fs_info *info) | ||
821 | { | ||
822 | DEFINE_WAIT(wait); | ||
823 | |||
824 | mutex_lock(&info->trans_mutex); | ||
825 | while (info->running_transaction && | ||
826 | info->running_transaction->delayed_refs.flushing) { | ||
827 | prepare_to_wait(&info->transaction_wait, &wait, | ||
828 | TASK_UNINTERRUPTIBLE); | ||
829 | mutex_unlock(&info->trans_mutex); | ||
830 | |||
831 | schedule(); | ||
832 | |||
833 | mutex_lock(&info->trans_mutex); | ||
834 | finish_wait(&info->transaction_wait, &wait); | ||
835 | } | ||
836 | mutex_unlock(&info->trans_mutex); | ||
837 | return 0; | ||
838 | } | ||
839 | |||
840 | /* | ||
841 | * Given a list of roots that need to be deleted, call btrfs_drop_snapshot on | ||
842 | * all of them | ||
843 | */ | ||
844 | int btrfs_drop_dead_root(struct btrfs_root *root) | ||
845 | { | ||
846 | struct btrfs_trans_handle *trans; | ||
847 | struct btrfs_root *tree_root = root->fs_info->tree_root; | ||
848 | unsigned long nr; | ||
849 | int ret; | ||
850 | |||
851 | while (1) { | ||
852 | /* | ||
853 | * we don't want to jump in and create a bunch of | ||
854 | * delayed refs if the transaction is starting to close | ||
855 | */ | ||
856 | wait_transaction_pre_flush(tree_root->fs_info); | ||
857 | trans = btrfs_start_transaction(tree_root, 1); | ||
858 | |||
859 | /* | ||
860 | * we've joined a transaction, make sure it isn't | ||
861 | * closing right now | ||
862 | */ | ||
863 | if (trans->transaction->delayed_refs.flushing) { | ||
864 | btrfs_end_transaction(trans, tree_root); | ||
865 | continue; | ||
866 | } | ||
867 | |||
868 | ret = btrfs_drop_snapshot(trans, root); | ||
869 | if (ret != -EAGAIN) | ||
870 | break; | ||
871 | |||
872 | ret = btrfs_update_root(trans, tree_root, | ||
873 | &root->root_key, | ||
874 | &root->root_item); | ||
875 | if (ret) | ||
876 | break; | ||
877 | |||
878 | nr = trans->blocks_used; | ||
879 | ret = btrfs_end_transaction(trans, tree_root); | ||
880 | BUG_ON(ret); | ||
881 | |||
882 | btrfs_btree_balance_dirty(tree_root, nr); | ||
883 | cond_resched(); | ||
884 | } | ||
885 | BUG_ON(ret); | ||
886 | |||
887 | ret = btrfs_del_root(trans, tree_root, &root->root_key); | ||
888 | BUG_ON(ret); | ||
889 | |||
890 | nr = trans->blocks_used; | ||
891 | ret = btrfs_end_transaction(trans, tree_root); | ||
892 | BUG_ON(ret); | ||
893 | |||
894 | free_extent_buffer(root->node); | ||
895 | free_extent_buffer(root->commit_root); | ||
896 | kfree(root); | ||
897 | |||
898 | btrfs_btree_balance_dirty(tree_root, nr); | ||
899 | return ret; | ||
900 | } | ||
901 | #endif | ||
902 | |||
903 | /* | 796 | /* |
904 | * new snapshots need to be created at a very specific time in the | 797 | * new snapshots need to be created at a very specific time in the |
905 | * transaction commit. This does the actual creation | 798 | * transaction commit. This does the actual creation |
@@ -930,7 +823,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
930 | goto fail; | 823 | goto fail; |
931 | } | 824 | } |
932 | 825 | ||
933 | ret = btrfs_find_free_objectid(trans, tree_root, 0, &objectid); | 826 | ret = btrfs_find_free_objectid(tree_root, &objectid); |
934 | if (ret) { | 827 | if (ret) { |
935 | pending->error = ret; | 828 | pending->error = ret; |
936 | goto fail; | 829 | goto fail; |
@@ -967,7 +860,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
967 | BUG_ON(ret); | 860 | BUG_ON(ret); |
968 | ret = btrfs_insert_dir_item(trans, parent_root, | 861 | ret = btrfs_insert_dir_item(trans, parent_root, |
969 | dentry->d_name.name, dentry->d_name.len, | 862 | dentry->d_name.name, dentry->d_name.len, |
970 | parent_inode->i_ino, &key, | 863 | parent_inode, &key, |
971 | BTRFS_FT_DIR, index); | 864 | BTRFS_FT_DIR, index); |
972 | BUG_ON(ret); | 865 | BUG_ON(ret); |
973 | 866 | ||
@@ -1009,7 +902,7 @@ static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans, | |||
1009 | */ | 902 | */ |
1010 | ret = btrfs_add_root_ref(trans, tree_root, objectid, | 903 | ret = btrfs_add_root_ref(trans, tree_root, objectid, |
1011 | parent_root->root_key.objectid, | 904 | parent_root->root_key.objectid, |
1012 | parent_inode->i_ino, index, | 905 | btrfs_ino(parent_inode), index, |
1013 | dentry->d_name.name, dentry->d_name.len); | 906 | dentry->d_name.name, dentry->d_name.len); |
1014 | BUG_ON(ret); | 907 | BUG_ON(ret); |
1015 | dput(parent); | 908 | dput(parent); |
@@ -1037,6 +930,14 @@ static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans, | |||
1037 | int ret; | 930 | int ret; |
1038 | 931 | ||
1039 | list_for_each_entry(pending, head, list) { | 932 | list_for_each_entry(pending, head, list) { |
933 | /* | ||
934 | * We must deal with the delayed items before creating | ||
935 | * snapshots, or we will create a snapthot with inconsistent | ||
936 | * information. | ||
937 | */ | ||
938 | ret = btrfs_run_delayed_items(trans, fs_info->fs_root); | ||
939 | BUG_ON(ret); | ||
940 | |||
1040 | ret = create_pending_snapshot(trans, fs_info, pending); | 941 | ret = create_pending_snapshot(trans, fs_info, pending); |
1041 | BUG_ON(ret); | 942 | BUG_ON(ret); |
1042 | } | 943 | } |
@@ -1290,6 +1191,9 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
1290 | BUG_ON(ret); | 1191 | BUG_ON(ret); |
1291 | } | 1192 | } |
1292 | 1193 | ||
1194 | ret = btrfs_run_delayed_items(trans, root); | ||
1195 | BUG_ON(ret); | ||
1196 | |||
1293 | /* | 1197 | /* |
1294 | * rename don't use btrfs_join_transaction, so, once we | 1198 | * rename don't use btrfs_join_transaction, so, once we |
1295 | * set the transaction to blocked above, we aren't going | 1199 | * set the transaction to blocked above, we aren't going |
@@ -1316,11 +1220,15 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
1316 | ret = create_pending_snapshots(trans, root->fs_info); | 1220 | ret = create_pending_snapshots(trans, root->fs_info); |
1317 | BUG_ON(ret); | 1221 | BUG_ON(ret); |
1318 | 1222 | ||
1223 | ret = btrfs_run_delayed_items(trans, root); | ||
1224 | BUG_ON(ret); | ||
1225 | |||
1319 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); | 1226 | ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1); |
1320 | BUG_ON(ret); | 1227 | BUG_ON(ret); |
1321 | 1228 | ||
1322 | WARN_ON(cur_trans != trans->transaction); | 1229 | WARN_ON(cur_trans != trans->transaction); |
1323 | 1230 | ||
1231 | btrfs_scrub_pause(root); | ||
1324 | /* btrfs_commit_tree_roots is responsible for getting the | 1232 | /* btrfs_commit_tree_roots is responsible for getting the |
1325 | * various roots consistent with each other. Every pointer | 1233 | * various roots consistent with each other. Every pointer |
1326 | * in the tree of tree roots has to point to the most up to date | 1234 | * in the tree of tree roots has to point to the most up to date |
@@ -1405,6 +1313,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | |||
1405 | 1313 | ||
1406 | mutex_unlock(&root->fs_info->trans_mutex); | 1314 | mutex_unlock(&root->fs_info->trans_mutex); |
1407 | 1315 | ||
1316 | btrfs_scrub_continue(root); | ||
1317 | |||
1408 | if (current->journal_info == trans) | 1318 | if (current->journal_info == trans) |
1409 | current->journal_info = NULL; | 1319 | current->journal_info = NULL; |
1410 | 1320 | ||
@@ -1432,6 +1342,8 @@ int btrfs_clean_old_snapshots(struct btrfs_root *root) | |||
1432 | root = list_entry(list.next, struct btrfs_root, root_list); | 1342 | root = list_entry(list.next, struct btrfs_root, root_list); |
1433 | list_del(&root->root_list); | 1343 | list_del(&root->root_list); |
1434 | 1344 | ||
1345 | btrfs_kill_all_delayed_nodes(root); | ||
1346 | |||
1435 | if (btrfs_header_backref_rev(root->node) < | 1347 | if (btrfs_header_backref_rev(root->node) < |
1436 | BTRFS_MIXED_BACKREF_REV) | 1348 | BTRFS_MIXED_BACKREF_REV) |
1437 | btrfs_drop_snapshot(root, NULL, 0); | 1349 | btrfs_drop_snapshot(root, NULL, 0); |
diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index e441acc6c584..804c88639e5d 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h | |||
@@ -101,11 +101,8 @@ struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *r, | |||
101 | int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); | 101 | int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid); |
102 | int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, | 102 | int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans, |
103 | struct btrfs_root *root); | 103 | struct btrfs_root *root); |
104 | int btrfs_commit_tree_roots(struct btrfs_trans_handle *trans, | ||
105 | struct btrfs_root *root); | ||
106 | 104 | ||
107 | int btrfs_add_dead_root(struct btrfs_root *root); | 105 | int btrfs_add_dead_root(struct btrfs_root *root); |
108 | int btrfs_drop_dead_root(struct btrfs_root *root); | ||
109 | int btrfs_defrag_root(struct btrfs_root *root, int cacheonly); | 106 | int btrfs_defrag_root(struct btrfs_root *root, int cacheonly); |
110 | int btrfs_clean_old_snapshots(struct btrfs_root *root); | 107 | int btrfs_clean_old_snapshots(struct btrfs_root *root); |
111 | int btrfs_commit_transaction(struct btrfs_trans_handle *trans, | 108 | int btrfs_commit_transaction(struct btrfs_trans_handle *trans, |
@@ -115,6 +112,8 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans, | |||
115 | int wait_for_unblock); | 112 | int wait_for_unblock); |
116 | int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, | 113 | int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans, |
117 | struct btrfs_root *root); | 114 | struct btrfs_root *root); |
115 | int btrfs_end_transaction_dmeta(struct btrfs_trans_handle *trans, | ||
116 | struct btrfs_root *root); | ||
118 | int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, | 117 | int btrfs_should_end_transaction(struct btrfs_trans_handle *trans, |
119 | struct btrfs_root *root); | 118 | struct btrfs_root *root); |
120 | void btrfs_throttle(struct btrfs_root *root); | 119 | void btrfs_throttle(struct btrfs_root *root); |
diff --git a/fs/btrfs/tree-defrag.c b/fs/btrfs/tree-defrag.c index 992ab425599d..3b580ee8ab1d 100644 --- a/fs/btrfs/tree-defrag.c +++ b/fs/btrfs/tree-defrag.c | |||
@@ -97,7 +97,7 @@ int btrfs_defrag_leaves(struct btrfs_trans_handle *trans, | |||
97 | ret = 0; | 97 | ret = 0; |
98 | goto out; | 98 | goto out; |
99 | } | 99 | } |
100 | btrfs_release_path(root, path); | 100 | btrfs_release_path(path); |
101 | wret = btrfs_search_slot(trans, root, &key, path, 0, 1); | 101 | wret = btrfs_search_slot(trans, root, &key, path, 0, 1); |
102 | 102 | ||
103 | if (wret < 0) { | 103 | if (wret < 0) { |
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index f997ec0c1ba4..592396c6dc47 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c | |||
@@ -333,13 +333,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, | |||
333 | goto insert; | 333 | goto insert; |
334 | 334 | ||
335 | if (item_size == 0) { | 335 | if (item_size == 0) { |
336 | btrfs_release_path(root, path); | 336 | btrfs_release_path(path); |
337 | return 0; | 337 | return 0; |
338 | } | 338 | } |
339 | dst_copy = kmalloc(item_size, GFP_NOFS); | 339 | dst_copy = kmalloc(item_size, GFP_NOFS); |
340 | src_copy = kmalloc(item_size, GFP_NOFS); | 340 | src_copy = kmalloc(item_size, GFP_NOFS); |
341 | if (!dst_copy || !src_copy) { | 341 | if (!dst_copy || !src_copy) { |
342 | btrfs_release_path(root, path); | 342 | btrfs_release_path(path); |
343 | kfree(dst_copy); | 343 | kfree(dst_copy); |
344 | kfree(src_copy); | 344 | kfree(src_copy); |
345 | return -ENOMEM; | 345 | return -ENOMEM; |
@@ -361,13 +361,13 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, | |||
361 | * sync | 361 | * sync |
362 | */ | 362 | */ |
363 | if (ret == 0) { | 363 | if (ret == 0) { |
364 | btrfs_release_path(root, path); | 364 | btrfs_release_path(path); |
365 | return 0; | 365 | return 0; |
366 | } | 366 | } |
367 | 367 | ||
368 | } | 368 | } |
369 | insert: | 369 | insert: |
370 | btrfs_release_path(root, path); | 370 | btrfs_release_path(path); |
371 | /* try to insert the key into the destination tree */ | 371 | /* try to insert the key into the destination tree */ |
372 | ret = btrfs_insert_empty_item(trans, root, path, | 372 | ret = btrfs_insert_empty_item(trans, root, path, |
373 | key, item_size); | 373 | key, item_size); |
@@ -382,7 +382,6 @@ insert: | |||
382 | } else if (found_size < item_size) { | 382 | } else if (found_size < item_size) { |
383 | ret = btrfs_extend_item(trans, root, path, | 383 | ret = btrfs_extend_item(trans, root, path, |
384 | item_size - found_size); | 384 | item_size - found_size); |
385 | BUG_ON(ret); | ||
386 | } | 385 | } |
387 | } else if (ret) { | 386 | } else if (ret) { |
388 | return ret; | 387 | return ret; |
@@ -438,7 +437,7 @@ insert: | |||
438 | } | 437 | } |
439 | no_copy: | 438 | no_copy: |
440 | btrfs_mark_buffer_dirty(path->nodes[0]); | 439 | btrfs_mark_buffer_dirty(path->nodes[0]); |
441 | btrfs_release_path(root, path); | 440 | btrfs_release_path(path); |
442 | return 0; | 441 | return 0; |
443 | } | 442 | } |
444 | 443 | ||
@@ -519,7 +518,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |||
519 | * file. This must be done before the btrfs_drop_extents run | 518 | * file. This must be done before the btrfs_drop_extents run |
520 | * so we don't try to drop this extent. | 519 | * so we don't try to drop this extent. |
521 | */ | 520 | */ |
522 | ret = btrfs_lookup_file_extent(trans, root, path, inode->i_ino, | 521 | ret = btrfs_lookup_file_extent(trans, root, path, btrfs_ino(inode), |
523 | start, 0); | 522 | start, 0); |
524 | 523 | ||
525 | if (ret == 0 && | 524 | if (ret == 0 && |
@@ -544,11 +543,11 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |||
544 | * we don't have to do anything | 543 | * we don't have to do anything |
545 | */ | 544 | */ |
546 | if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { | 545 | if (memcmp(&cmp1, &cmp2, sizeof(cmp1)) == 0) { |
547 | btrfs_release_path(root, path); | 546 | btrfs_release_path(path); |
548 | goto out; | 547 | goto out; |
549 | } | 548 | } |
550 | } | 549 | } |
551 | btrfs_release_path(root, path); | 550 | btrfs_release_path(path); |
552 | 551 | ||
553 | saved_nbytes = inode_get_bytes(inode); | 552 | saved_nbytes = inode_get_bytes(inode); |
554 | /* drop any overlapping extents */ | 553 | /* drop any overlapping extents */ |
@@ -590,6 +589,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |||
590 | ins.objectid, ins.offset, | 589 | ins.objectid, ins.offset, |
591 | 0, root->root_key.objectid, | 590 | 0, root->root_key.objectid, |
592 | key->objectid, offset); | 591 | key->objectid, offset); |
592 | BUG_ON(ret); | ||
593 | } else { | 593 | } else { |
594 | /* | 594 | /* |
595 | * insert the extent pointer in the extent | 595 | * insert the extent pointer in the extent |
@@ -600,7 +600,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |||
600 | key->objectid, offset, &ins); | 600 | key->objectid, offset, &ins); |
601 | BUG_ON(ret); | 601 | BUG_ON(ret); |
602 | } | 602 | } |
603 | btrfs_release_path(root, path); | 603 | btrfs_release_path(path); |
604 | 604 | ||
605 | if (btrfs_file_extent_compression(eb, item)) { | 605 | if (btrfs_file_extent_compression(eb, item)) { |
606 | csum_start = ins.objectid; | 606 | csum_start = ins.objectid; |
@@ -614,7 +614,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |||
614 | 614 | ||
615 | ret = btrfs_lookup_csums_range(root->log_root, | 615 | ret = btrfs_lookup_csums_range(root->log_root, |
616 | csum_start, csum_end - 1, | 616 | csum_start, csum_end - 1, |
617 | &ordered_sums); | 617 | &ordered_sums, 0); |
618 | BUG_ON(ret); | 618 | BUG_ON(ret); |
619 | while (!list_empty(&ordered_sums)) { | 619 | while (!list_empty(&ordered_sums)) { |
620 | struct btrfs_ordered_sum *sums; | 620 | struct btrfs_ordered_sum *sums; |
@@ -629,7 +629,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, | |||
629 | kfree(sums); | 629 | kfree(sums); |
630 | } | 630 | } |
631 | } else { | 631 | } else { |
632 | btrfs_release_path(root, path); | 632 | btrfs_release_path(path); |
633 | } | 633 | } |
634 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { | 634 | } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { |
635 | /* inline extents are easy, we just overwrite them */ | 635 | /* inline extents are easy, we just overwrite them */ |
@@ -675,10 +675,13 @@ static noinline int drop_one_dir_item(struct btrfs_trans_handle *trans, | |||
675 | return -ENOMEM; | 675 | return -ENOMEM; |
676 | 676 | ||
677 | read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); | 677 | read_extent_buffer(leaf, name, (unsigned long)(di + 1), name_len); |
678 | btrfs_release_path(root, path); | 678 | btrfs_release_path(path); |
679 | 679 | ||
680 | inode = read_one_inode(root, location.objectid); | 680 | inode = read_one_inode(root, location.objectid); |
681 | BUG_ON(!inode); | 681 | if (!inode) { |
682 | kfree(name); | ||
683 | return -EIO; | ||
684 | } | ||
682 | 685 | ||
683 | ret = link_to_fixup_dir(trans, root, path, location.objectid); | 686 | ret = link_to_fixup_dir(trans, root, path, location.objectid); |
684 | BUG_ON(ret); | 687 | BUG_ON(ret); |
@@ -713,7 +716,7 @@ static noinline int inode_in_dir(struct btrfs_root *root, | |||
713 | goto out; | 716 | goto out; |
714 | } else | 717 | } else |
715 | goto out; | 718 | goto out; |
716 | btrfs_release_path(root, path); | 719 | btrfs_release_path(path); |
717 | 720 | ||
718 | di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); | 721 | di = btrfs_lookup_dir_item(NULL, root, path, dirid, name, name_len, 0); |
719 | if (di && !IS_ERR(di)) { | 722 | if (di && !IS_ERR(di)) { |
@@ -724,7 +727,7 @@ static noinline int inode_in_dir(struct btrfs_root *root, | |||
724 | goto out; | 727 | goto out; |
725 | match = 1; | 728 | match = 1; |
726 | out: | 729 | out: |
727 | btrfs_release_path(root, path); | 730 | btrfs_release_path(path); |
728 | return match; | 731 | return match; |
729 | } | 732 | } |
730 | 733 | ||
@@ -817,7 +820,10 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, | |||
817 | return -ENOENT; | 820 | return -ENOENT; |
818 | 821 | ||
819 | inode = read_one_inode(root, key->objectid); | 822 | inode = read_one_inode(root, key->objectid); |
820 | BUG_ON(!inode); | 823 | if (!inode) { |
824 | iput(dir); | ||
825 | return -EIO; | ||
826 | } | ||
821 | 827 | ||
822 | ref_ptr = btrfs_item_ptr_offset(eb, slot); | 828 | ref_ptr = btrfs_item_ptr_offset(eb, slot); |
823 | ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); | 829 | ref_end = ref_ptr + btrfs_item_size_nr(eb, slot); |
@@ -832,7 +838,7 @@ again: | |||
832 | read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen); | 838 | read_extent_buffer(eb, name, (unsigned long)(ref + 1), namelen); |
833 | 839 | ||
834 | /* if we already have a perfect match, we're done */ | 840 | /* if we already have a perfect match, we're done */ |
835 | if (inode_in_dir(root, path, dir->i_ino, inode->i_ino, | 841 | if (inode_in_dir(root, path, btrfs_ino(dir), btrfs_ino(inode), |
836 | btrfs_inode_ref_index(eb, ref), | 842 | btrfs_inode_ref_index(eb, ref), |
837 | name, namelen)) { | 843 | name, namelen)) { |
838 | goto out; | 844 | goto out; |
@@ -884,7 +890,7 @@ again: | |||
884 | if (!backref_in_log(log, key, victim_name, | 890 | if (!backref_in_log(log, key, victim_name, |
885 | victim_name_len)) { | 891 | victim_name_len)) { |
886 | btrfs_inc_nlink(inode); | 892 | btrfs_inc_nlink(inode); |
887 | btrfs_release_path(root, path); | 893 | btrfs_release_path(path); |
888 | 894 | ||
889 | ret = btrfs_unlink_inode(trans, root, dir, | 895 | ret = btrfs_unlink_inode(trans, root, dir, |
890 | inode, victim_name, | 896 | inode, victim_name, |
@@ -901,7 +907,7 @@ again: | |||
901 | */ | 907 | */ |
902 | search_done = 1; | 908 | search_done = 1; |
903 | } | 909 | } |
904 | btrfs_release_path(root, path); | 910 | btrfs_release_path(path); |
905 | 911 | ||
906 | insert: | 912 | insert: |
907 | /* insert our name */ | 913 | /* insert our name */ |
@@ -922,7 +928,7 @@ out: | |||
922 | BUG_ON(ret); | 928 | BUG_ON(ret); |
923 | 929 | ||
924 | out_nowrite: | 930 | out_nowrite: |
925 | btrfs_release_path(root, path); | 931 | btrfs_release_path(path); |
926 | iput(dir); | 932 | iput(dir); |
927 | iput(inode); | 933 | iput(inode); |
928 | return 0; | 934 | return 0; |
@@ -960,8 +966,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, | |||
960 | unsigned long ptr; | 966 | unsigned long ptr; |
961 | unsigned long ptr_end; | 967 | unsigned long ptr_end; |
962 | int name_len; | 968 | int name_len; |
969 | u64 ino = btrfs_ino(inode); | ||
963 | 970 | ||
964 | key.objectid = inode->i_ino; | 971 | key.objectid = ino; |
965 | key.type = BTRFS_INODE_REF_KEY; | 972 | key.type = BTRFS_INODE_REF_KEY; |
966 | key.offset = (u64)-1; | 973 | key.offset = (u64)-1; |
967 | 974 | ||
@@ -980,7 +987,7 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, | |||
980 | } | 987 | } |
981 | btrfs_item_key_to_cpu(path->nodes[0], &key, | 988 | btrfs_item_key_to_cpu(path->nodes[0], &key, |
982 | path->slots[0]); | 989 | path->slots[0]); |
983 | if (key.objectid != inode->i_ino || | 990 | if (key.objectid != ino || |
984 | key.type != BTRFS_INODE_REF_KEY) | 991 | key.type != BTRFS_INODE_REF_KEY) |
985 | break; | 992 | break; |
986 | ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); | 993 | ptr = btrfs_item_ptr_offset(path->nodes[0], path->slots[0]); |
@@ -999,9 +1006,9 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, | |||
999 | if (key.offset == 0) | 1006 | if (key.offset == 0) |
1000 | break; | 1007 | break; |
1001 | key.offset--; | 1008 | key.offset--; |
1002 | btrfs_release_path(root, path); | 1009 | btrfs_release_path(path); |
1003 | } | 1010 | } |
1004 | btrfs_release_path(root, path); | 1011 | btrfs_release_path(path); |
1005 | if (nlink != inode->i_nlink) { | 1012 | if (nlink != inode->i_nlink) { |
1006 | inode->i_nlink = nlink; | 1013 | inode->i_nlink = nlink; |
1007 | btrfs_update_inode(trans, root, inode); | 1014 | btrfs_update_inode(trans, root, inode); |
@@ -1011,10 +1018,10 @@ static noinline int fixup_inode_link_count(struct btrfs_trans_handle *trans, | |||
1011 | if (inode->i_nlink == 0) { | 1018 | if (inode->i_nlink == 0) { |
1012 | if (S_ISDIR(inode->i_mode)) { | 1019 | if (S_ISDIR(inode->i_mode)) { |
1013 | ret = replay_dir_deletes(trans, root, NULL, path, | 1020 | ret = replay_dir_deletes(trans, root, NULL, path, |
1014 | inode->i_ino, 1); | 1021 | ino, 1); |
1015 | BUG_ON(ret); | 1022 | BUG_ON(ret); |
1016 | } | 1023 | } |
1017 | ret = insert_orphan_item(trans, root, inode->i_ino); | 1024 | ret = insert_orphan_item(trans, root, ino); |
1018 | BUG_ON(ret); | 1025 | BUG_ON(ret); |
1019 | } | 1026 | } |
1020 | btrfs_free_path(path); | 1027 | btrfs_free_path(path); |
@@ -1050,11 +1057,13 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, | |||
1050 | break; | 1057 | break; |
1051 | 1058 | ||
1052 | ret = btrfs_del_item(trans, root, path); | 1059 | ret = btrfs_del_item(trans, root, path); |
1053 | BUG_ON(ret); | 1060 | if (ret) |
1061 | goto out; | ||
1054 | 1062 | ||
1055 | btrfs_release_path(root, path); | 1063 | btrfs_release_path(path); |
1056 | inode = read_one_inode(root, key.offset); | 1064 | inode = read_one_inode(root, key.offset); |
1057 | BUG_ON(!inode); | 1065 | if (!inode) |
1066 | return -EIO; | ||
1058 | 1067 | ||
1059 | ret = fixup_inode_link_count(trans, root, inode); | 1068 | ret = fixup_inode_link_count(trans, root, inode); |
1060 | BUG_ON(ret); | 1069 | BUG_ON(ret); |
@@ -1068,8 +1077,10 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans, | |||
1068 | */ | 1077 | */ |
1069 | key.offset = (u64)-1; | 1078 | key.offset = (u64)-1; |
1070 | } | 1079 | } |
1071 | btrfs_release_path(root, path); | 1080 | ret = 0; |
1072 | return 0; | 1081 | out: |
1082 | btrfs_release_path(path); | ||
1083 | return ret; | ||
1073 | } | 1084 | } |
1074 | 1085 | ||
1075 | 1086 | ||
@@ -1088,7 +1099,8 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, | |||
1088 | struct inode *inode; | 1099 | struct inode *inode; |
1089 | 1100 | ||
1090 | inode = read_one_inode(root, objectid); | 1101 | inode = read_one_inode(root, objectid); |
1091 | BUG_ON(!inode); | 1102 | if (!inode) |
1103 | return -EIO; | ||
1092 | 1104 | ||
1093 | key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; | 1105 | key.objectid = BTRFS_TREE_LOG_FIXUP_OBJECTID; |
1094 | btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); | 1106 | btrfs_set_key_type(&key, BTRFS_ORPHAN_ITEM_KEY); |
@@ -1096,7 +1108,7 @@ static noinline int link_to_fixup_dir(struct btrfs_trans_handle *trans, | |||
1096 | 1108 | ||
1097 | ret = btrfs_insert_empty_item(trans, root, path, &key, 0); | 1109 | ret = btrfs_insert_empty_item(trans, root, path, &key, 0); |
1098 | 1110 | ||
1099 | btrfs_release_path(root, path); | 1111 | btrfs_release_path(path); |
1100 | if (ret == 0) { | 1112 | if (ret == 0) { |
1101 | btrfs_inc_nlink(inode); | 1113 | btrfs_inc_nlink(inode); |
1102 | btrfs_update_inode(trans, root, inode); | 1114 | btrfs_update_inode(trans, root, inode); |
@@ -1175,7 +1187,8 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, | |||
1175 | int ret; | 1187 | int ret; |
1176 | 1188 | ||
1177 | dir = read_one_inode(root, key->objectid); | 1189 | dir = read_one_inode(root, key->objectid); |
1178 | BUG_ON(!dir); | 1190 | if (!dir) |
1191 | return -EIO; | ||
1179 | 1192 | ||
1180 | name_len = btrfs_dir_name_len(eb, di); | 1193 | name_len = btrfs_dir_name_len(eb, di); |
1181 | name = kmalloc(name_len, GFP_NOFS); | 1194 | name = kmalloc(name_len, GFP_NOFS); |
@@ -1192,7 +1205,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, | |||
1192 | exists = 1; | 1205 | exists = 1; |
1193 | else | 1206 | else |
1194 | exists = 0; | 1207 | exists = 0; |
1195 | btrfs_release_path(root, path); | 1208 | btrfs_release_path(path); |
1196 | 1209 | ||
1197 | if (key->type == BTRFS_DIR_ITEM_KEY) { | 1210 | if (key->type == BTRFS_DIR_ITEM_KEY) { |
1198 | dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, | 1211 | dst_di = btrfs_lookup_dir_item(trans, root, path, key->objectid, |
@@ -1205,7 +1218,7 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, | |||
1205 | } else { | 1218 | } else { |
1206 | BUG(); | 1219 | BUG(); |
1207 | } | 1220 | } |
1208 | if (!dst_di || IS_ERR(dst_di)) { | 1221 | if (IS_ERR_OR_NULL(dst_di)) { |
1209 | /* we need a sequence number to insert, so we only | 1222 | /* we need a sequence number to insert, so we only |
1210 | * do inserts for the BTRFS_DIR_INDEX_KEY types | 1223 | * do inserts for the BTRFS_DIR_INDEX_KEY types |
1211 | */ | 1224 | */ |
@@ -1236,13 +1249,13 @@ static noinline int replay_one_name(struct btrfs_trans_handle *trans, | |||
1236 | if (key->type == BTRFS_DIR_INDEX_KEY) | 1249 | if (key->type == BTRFS_DIR_INDEX_KEY) |
1237 | goto insert; | 1250 | goto insert; |
1238 | out: | 1251 | out: |
1239 | btrfs_release_path(root, path); | 1252 | btrfs_release_path(path); |
1240 | kfree(name); | 1253 | kfree(name); |
1241 | iput(dir); | 1254 | iput(dir); |
1242 | return 0; | 1255 | return 0; |
1243 | 1256 | ||
1244 | insert: | 1257 | insert: |
1245 | btrfs_release_path(root, path); | 1258 | btrfs_release_path(path); |
1246 | ret = insert_one_name(trans, root, path, key->objectid, key->offset, | 1259 | ret = insert_one_name(trans, root, path, key->objectid, key->offset, |
1247 | name, name_len, log_type, &log_key); | 1260 | name, name_len, log_type, &log_key); |
1248 | 1261 | ||
@@ -1363,7 +1376,7 @@ next: | |||
1363 | *end_ret = found_end; | 1376 | *end_ret = found_end; |
1364 | ret = 0; | 1377 | ret = 0; |
1365 | out: | 1378 | out: |
1366 | btrfs_release_path(root, path); | 1379 | btrfs_release_path(path); |
1367 | return ret; | 1380 | return ret; |
1368 | } | 1381 | } |
1369 | 1382 | ||
@@ -1426,12 +1439,15 @@ again: | |||
1426 | dir_key->offset, | 1439 | dir_key->offset, |
1427 | name, name_len, 0); | 1440 | name, name_len, 0); |
1428 | } | 1441 | } |
1429 | if (!log_di || IS_ERR(log_di)) { | 1442 | if (IS_ERR_OR_NULL(log_di)) { |
1430 | btrfs_dir_item_key_to_cpu(eb, di, &location); | 1443 | btrfs_dir_item_key_to_cpu(eb, di, &location); |
1431 | btrfs_release_path(root, path); | 1444 | btrfs_release_path(path); |
1432 | btrfs_release_path(log, log_path); | 1445 | btrfs_release_path(log_path); |
1433 | inode = read_one_inode(root, location.objectid); | 1446 | inode = read_one_inode(root, location.objectid); |
1434 | BUG_ON(!inode); | 1447 | if (!inode) { |
1448 | kfree(name); | ||
1449 | return -EIO; | ||
1450 | } | ||
1435 | 1451 | ||
1436 | ret = link_to_fixup_dir(trans, root, | 1452 | ret = link_to_fixup_dir(trans, root, |
1437 | path, location.objectid); | 1453 | path, location.objectid); |
@@ -1453,7 +1469,7 @@ again: | |||
1453 | ret = 0; | 1469 | ret = 0; |
1454 | goto out; | 1470 | goto out; |
1455 | } | 1471 | } |
1456 | btrfs_release_path(log, log_path); | 1472 | btrfs_release_path(log_path); |
1457 | kfree(name); | 1473 | kfree(name); |
1458 | 1474 | ||
1459 | ptr = (unsigned long)(di + 1); | 1475 | ptr = (unsigned long)(di + 1); |
@@ -1461,8 +1477,8 @@ again: | |||
1461 | } | 1477 | } |
1462 | ret = 0; | 1478 | ret = 0; |
1463 | out: | 1479 | out: |
1464 | btrfs_release_path(root, path); | 1480 | btrfs_release_path(path); |
1465 | btrfs_release_path(log, log_path); | 1481 | btrfs_release_path(log_path); |
1466 | return ret; | 1482 | return ret; |
1467 | } | 1483 | } |
1468 | 1484 | ||
@@ -1550,7 +1566,7 @@ again: | |||
1550 | break; | 1566 | break; |
1551 | dir_key.offset = found_key.offset + 1; | 1567 | dir_key.offset = found_key.offset + 1; |
1552 | } | 1568 | } |
1553 | btrfs_release_path(root, path); | 1569 | btrfs_release_path(path); |
1554 | if (range_end == (u64)-1) | 1570 | if (range_end == (u64)-1) |
1555 | break; | 1571 | break; |
1556 | range_start = range_end + 1; | 1572 | range_start = range_end + 1; |
@@ -1561,11 +1577,11 @@ next_type: | |||
1561 | if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { | 1577 | if (key_type == BTRFS_DIR_LOG_ITEM_KEY) { |
1562 | key_type = BTRFS_DIR_LOG_INDEX_KEY; | 1578 | key_type = BTRFS_DIR_LOG_INDEX_KEY; |
1563 | dir_key.type = BTRFS_DIR_INDEX_KEY; | 1579 | dir_key.type = BTRFS_DIR_INDEX_KEY; |
1564 | btrfs_release_path(root, path); | 1580 | btrfs_release_path(path); |
1565 | goto again; | 1581 | goto again; |
1566 | } | 1582 | } |
1567 | out: | 1583 | out: |
1568 | btrfs_release_path(root, path); | 1584 | btrfs_release_path(path); |
1569 | btrfs_free_path(log_path); | 1585 | btrfs_free_path(log_path); |
1570 | iput(dir); | 1586 | iput(dir); |
1571 | return ret; | 1587 | return ret; |
@@ -2093,7 +2109,9 @@ int btrfs_sync_log(struct btrfs_trans_handle *trans, | |||
2093 | * the running transaction open, so a full commit can't hop | 2109 | * the running transaction open, so a full commit can't hop |
2094 | * in and cause problems either. | 2110 | * in and cause problems either. |
2095 | */ | 2111 | */ |
2112 | btrfs_scrub_pause_super(root); | ||
2096 | write_ctree_super(trans, root->fs_info->tree_root, 1); | 2113 | write_ctree_super(trans, root->fs_info->tree_root, 1); |
2114 | btrfs_scrub_continue_super(root); | ||
2097 | ret = 0; | 2115 | ret = 0; |
2098 | 2116 | ||
2099 | mutex_lock(&root->log_mutex); | 2117 | mutex_lock(&root->log_mutex); |
@@ -2197,6 +2215,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, | |||
2197 | int ret; | 2215 | int ret; |
2198 | int err = 0; | 2216 | int err = 0; |
2199 | int bytes_del = 0; | 2217 | int bytes_del = 0; |
2218 | u64 dir_ino = btrfs_ino(dir); | ||
2200 | 2219 | ||
2201 | if (BTRFS_I(dir)->logged_trans < trans->transid) | 2220 | if (BTRFS_I(dir)->logged_trans < trans->transid) |
2202 | return 0; | 2221 | return 0; |
@@ -2214,7 +2233,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, | |||
2214 | goto out_unlock; | 2233 | goto out_unlock; |
2215 | } | 2234 | } |
2216 | 2235 | ||
2217 | di = btrfs_lookup_dir_item(trans, log, path, dir->i_ino, | 2236 | di = btrfs_lookup_dir_item(trans, log, path, dir_ino, |
2218 | name, name_len, -1); | 2237 | name, name_len, -1); |
2219 | if (IS_ERR(di)) { | 2238 | if (IS_ERR(di)) { |
2220 | err = PTR_ERR(di); | 2239 | err = PTR_ERR(di); |
@@ -2225,8 +2244,8 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, | |||
2225 | bytes_del += name_len; | 2244 | bytes_del += name_len; |
2226 | BUG_ON(ret); | 2245 | BUG_ON(ret); |
2227 | } | 2246 | } |
2228 | btrfs_release_path(log, path); | 2247 | btrfs_release_path(path); |
2229 | di = btrfs_lookup_dir_index_item(trans, log, path, dir->i_ino, | 2248 | di = btrfs_lookup_dir_index_item(trans, log, path, dir_ino, |
2230 | index, name, name_len, -1); | 2249 | index, name, name_len, -1); |
2231 | if (IS_ERR(di)) { | 2250 | if (IS_ERR(di)) { |
2232 | err = PTR_ERR(di); | 2251 | err = PTR_ERR(di); |
@@ -2244,10 +2263,10 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, | |||
2244 | if (bytes_del) { | 2263 | if (bytes_del) { |
2245 | struct btrfs_key key; | 2264 | struct btrfs_key key; |
2246 | 2265 | ||
2247 | key.objectid = dir->i_ino; | 2266 | key.objectid = dir_ino; |
2248 | key.offset = 0; | 2267 | key.offset = 0; |
2249 | key.type = BTRFS_INODE_ITEM_KEY; | 2268 | key.type = BTRFS_INODE_ITEM_KEY; |
2250 | btrfs_release_path(log, path); | 2269 | btrfs_release_path(path); |
2251 | 2270 | ||
2252 | ret = btrfs_search_slot(trans, log, &key, path, 0, 1); | 2271 | ret = btrfs_search_slot(trans, log, &key, path, 0, 1); |
2253 | if (ret < 0) { | 2272 | if (ret < 0) { |
@@ -2269,7 +2288,7 @@ int btrfs_del_dir_entries_in_log(struct btrfs_trans_handle *trans, | |||
2269 | btrfs_mark_buffer_dirty(path->nodes[0]); | 2288 | btrfs_mark_buffer_dirty(path->nodes[0]); |
2270 | } else | 2289 | } else |
2271 | ret = 0; | 2290 | ret = 0; |
2272 | btrfs_release_path(log, path); | 2291 | btrfs_release_path(path); |
2273 | } | 2292 | } |
2274 | fail: | 2293 | fail: |
2275 | btrfs_free_path(path); | 2294 | btrfs_free_path(path); |
@@ -2303,7 +2322,7 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, | |||
2303 | log = root->log_root; | 2322 | log = root->log_root; |
2304 | mutex_lock(&BTRFS_I(inode)->log_mutex); | 2323 | mutex_lock(&BTRFS_I(inode)->log_mutex); |
2305 | 2324 | ||
2306 | ret = btrfs_del_inode_ref(trans, log, name, name_len, inode->i_ino, | 2325 | ret = btrfs_del_inode_ref(trans, log, name, name_len, btrfs_ino(inode), |
2307 | dirid, &index); | 2326 | dirid, &index); |
2308 | mutex_unlock(&BTRFS_I(inode)->log_mutex); | 2327 | mutex_unlock(&BTRFS_I(inode)->log_mutex); |
2309 | if (ret == -ENOSPC) { | 2328 | if (ret == -ENOSPC) { |
@@ -2344,7 +2363,7 @@ static noinline int insert_dir_log_key(struct btrfs_trans_handle *trans, | |||
2344 | struct btrfs_dir_log_item); | 2363 | struct btrfs_dir_log_item); |
2345 | btrfs_set_dir_log_end(path->nodes[0], item, last_offset); | 2364 | btrfs_set_dir_log_end(path->nodes[0], item, last_offset); |
2346 | btrfs_mark_buffer_dirty(path->nodes[0]); | 2365 | btrfs_mark_buffer_dirty(path->nodes[0]); |
2347 | btrfs_release_path(log, path); | 2366 | btrfs_release_path(path); |
2348 | return 0; | 2367 | return 0; |
2349 | } | 2368 | } |
2350 | 2369 | ||
@@ -2369,13 +2388,14 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, | |||
2369 | int nritems; | 2388 | int nritems; |
2370 | u64 first_offset = min_offset; | 2389 | u64 first_offset = min_offset; |
2371 | u64 last_offset = (u64)-1; | 2390 | u64 last_offset = (u64)-1; |
2391 | u64 ino = btrfs_ino(inode); | ||
2372 | 2392 | ||
2373 | log = root->log_root; | 2393 | log = root->log_root; |
2374 | max_key.objectid = inode->i_ino; | 2394 | max_key.objectid = ino; |
2375 | max_key.offset = (u64)-1; | 2395 | max_key.offset = (u64)-1; |
2376 | max_key.type = key_type; | 2396 | max_key.type = key_type; |
2377 | 2397 | ||
2378 | min_key.objectid = inode->i_ino; | 2398 | min_key.objectid = ino; |
2379 | min_key.type = key_type; | 2399 | min_key.type = key_type; |
2380 | min_key.offset = min_offset; | 2400 | min_key.offset = min_offset; |
2381 | 2401 | ||
@@ -2388,18 +2408,17 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, | |||
2388 | * we didn't find anything from this transaction, see if there | 2408 | * we didn't find anything from this transaction, see if there |
2389 | * is anything at all | 2409 | * is anything at all |
2390 | */ | 2410 | */ |
2391 | if (ret != 0 || min_key.objectid != inode->i_ino || | 2411 | if (ret != 0 || min_key.objectid != ino || min_key.type != key_type) { |
2392 | min_key.type != key_type) { | 2412 | min_key.objectid = ino; |
2393 | min_key.objectid = inode->i_ino; | ||
2394 | min_key.type = key_type; | 2413 | min_key.type = key_type; |
2395 | min_key.offset = (u64)-1; | 2414 | min_key.offset = (u64)-1; |
2396 | btrfs_release_path(root, path); | 2415 | btrfs_release_path(path); |
2397 | ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); | 2416 | ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); |
2398 | if (ret < 0) { | 2417 | if (ret < 0) { |
2399 | btrfs_release_path(root, path); | 2418 | btrfs_release_path(path); |
2400 | return ret; | 2419 | return ret; |
2401 | } | 2420 | } |
2402 | ret = btrfs_previous_item(root, path, inode->i_ino, key_type); | 2421 | ret = btrfs_previous_item(root, path, ino, key_type); |
2403 | 2422 | ||
2404 | /* if ret == 0 there are items for this type, | 2423 | /* if ret == 0 there are items for this type, |
2405 | * create a range to tell us the last key of this type. | 2424 | * create a range to tell us the last key of this type. |
@@ -2417,7 +2436,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, | |||
2417 | } | 2436 | } |
2418 | 2437 | ||
2419 | /* go backward to find any previous key */ | 2438 | /* go backward to find any previous key */ |
2420 | ret = btrfs_previous_item(root, path, inode->i_ino, key_type); | 2439 | ret = btrfs_previous_item(root, path, ino, key_type); |
2421 | if (ret == 0) { | 2440 | if (ret == 0) { |
2422 | struct btrfs_key tmp; | 2441 | struct btrfs_key tmp; |
2423 | btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); | 2442 | btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); |
@@ -2432,7 +2451,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, | |||
2432 | } | 2451 | } |
2433 | } | 2452 | } |
2434 | } | 2453 | } |
2435 | btrfs_release_path(root, path); | 2454 | btrfs_release_path(path); |
2436 | 2455 | ||
2437 | /* find the first key from this transaction again */ | 2456 | /* find the first key from this transaction again */ |
2438 | ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); | 2457 | ret = btrfs_search_slot(NULL, root, &min_key, path, 0, 0); |
@@ -2452,8 +2471,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, | |||
2452 | for (i = path->slots[0]; i < nritems; i++) { | 2471 | for (i = path->slots[0]; i < nritems; i++) { |
2453 | btrfs_item_key_to_cpu(src, &min_key, i); | 2472 | btrfs_item_key_to_cpu(src, &min_key, i); |
2454 | 2473 | ||
2455 | if (min_key.objectid != inode->i_ino || | 2474 | if (min_key.objectid != ino || min_key.type != key_type) |
2456 | min_key.type != key_type) | ||
2457 | goto done; | 2475 | goto done; |
2458 | ret = overwrite_item(trans, log, dst_path, src, i, | 2476 | ret = overwrite_item(trans, log, dst_path, src, i, |
2459 | &min_key); | 2477 | &min_key); |
@@ -2474,7 +2492,7 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, | |||
2474 | goto done; | 2492 | goto done; |
2475 | } | 2493 | } |
2476 | btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); | 2494 | btrfs_item_key_to_cpu(path->nodes[0], &tmp, path->slots[0]); |
2477 | if (tmp.objectid != inode->i_ino || tmp.type != key_type) { | 2495 | if (tmp.objectid != ino || tmp.type != key_type) { |
2478 | last_offset = (u64)-1; | 2496 | last_offset = (u64)-1; |
2479 | goto done; | 2497 | goto done; |
2480 | } | 2498 | } |
@@ -2490,8 +2508,8 @@ static noinline int log_dir_items(struct btrfs_trans_handle *trans, | |||
2490 | } | 2508 | } |
2491 | } | 2509 | } |
2492 | done: | 2510 | done: |
2493 | btrfs_release_path(root, path); | 2511 | btrfs_release_path(path); |
2494 | btrfs_release_path(log, dst_path); | 2512 | btrfs_release_path(dst_path); |
2495 | 2513 | ||
2496 | if (err == 0) { | 2514 | if (err == 0) { |
2497 | *last_offset_ret = last_offset; | 2515 | *last_offset_ret = last_offset; |
@@ -2500,8 +2518,7 @@ done: | |||
2500 | * is valid | 2518 | * is valid |
2501 | */ | 2519 | */ |
2502 | ret = insert_dir_log_key(trans, log, path, key_type, | 2520 | ret = insert_dir_log_key(trans, log, path, key_type, |
2503 | inode->i_ino, first_offset, | 2521 | ino, first_offset, last_offset); |
2504 | last_offset); | ||
2505 | if (ret) | 2522 | if (ret) |
2506 | err = ret; | 2523 | err = ret; |
2507 | } | 2524 | } |
@@ -2587,10 +2604,11 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans, | |||
2587 | break; | 2604 | break; |
2588 | 2605 | ||
2589 | ret = btrfs_del_item(trans, log, path); | 2606 | ret = btrfs_del_item(trans, log, path); |
2590 | BUG_ON(ret); | 2607 | if (ret) |
2591 | btrfs_release_path(log, path); | 2608 | break; |
2609 | btrfs_release_path(path); | ||
2592 | } | 2610 | } |
2593 | btrfs_release_path(log, path); | 2611 | btrfs_release_path(path); |
2594 | return ret; | 2612 | return ret; |
2595 | } | 2613 | } |
2596 | 2614 | ||
@@ -2665,6 +2683,9 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, | |||
2665 | extent = btrfs_item_ptr(src, start_slot + i, | 2683 | extent = btrfs_item_ptr(src, start_slot + i, |
2666 | struct btrfs_file_extent_item); | 2684 | struct btrfs_file_extent_item); |
2667 | 2685 | ||
2686 | if (btrfs_file_extent_generation(src, extent) < trans->transid) | ||
2687 | continue; | ||
2688 | |||
2668 | found_type = btrfs_file_extent_type(src, extent); | 2689 | found_type = btrfs_file_extent_type(src, extent); |
2669 | if (found_type == BTRFS_FILE_EXTENT_REG || | 2690 | if (found_type == BTRFS_FILE_EXTENT_REG || |
2670 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { | 2691 | found_type == BTRFS_FILE_EXTENT_PREALLOC) { |
@@ -2689,14 +2710,14 @@ static noinline int copy_items(struct btrfs_trans_handle *trans, | |||
2689 | ret = btrfs_lookup_csums_range( | 2710 | ret = btrfs_lookup_csums_range( |
2690 | log->fs_info->csum_root, | 2711 | log->fs_info->csum_root, |
2691 | ds + cs, ds + cs + cl - 1, | 2712 | ds + cs, ds + cs + cl - 1, |
2692 | &ordered_sums); | 2713 | &ordered_sums, 0); |
2693 | BUG_ON(ret); | 2714 | BUG_ON(ret); |
2694 | } | 2715 | } |
2695 | } | 2716 | } |
2696 | } | 2717 | } |
2697 | 2718 | ||
2698 | btrfs_mark_buffer_dirty(dst_path->nodes[0]); | 2719 | btrfs_mark_buffer_dirty(dst_path->nodes[0]); |
2699 | btrfs_release_path(log, dst_path); | 2720 | btrfs_release_path(dst_path); |
2700 | kfree(ins_data); | 2721 | kfree(ins_data); |
2701 | 2722 | ||
2702 | /* | 2723 | /* |
@@ -2745,6 +2766,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
2745 | int nritems; | 2766 | int nritems; |
2746 | int ins_start_slot = 0; | 2767 | int ins_start_slot = 0; |
2747 | int ins_nr; | 2768 | int ins_nr; |
2769 | u64 ino = btrfs_ino(inode); | ||
2748 | 2770 | ||
2749 | log = root->log_root; | 2771 | log = root->log_root; |
2750 | 2772 | ||
@@ -2757,11 +2779,11 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
2757 | return -ENOMEM; | 2779 | return -ENOMEM; |
2758 | } | 2780 | } |
2759 | 2781 | ||
2760 | min_key.objectid = inode->i_ino; | 2782 | min_key.objectid = ino; |
2761 | min_key.type = BTRFS_INODE_ITEM_KEY; | 2783 | min_key.type = BTRFS_INODE_ITEM_KEY; |
2762 | min_key.offset = 0; | 2784 | min_key.offset = 0; |
2763 | 2785 | ||
2764 | max_key.objectid = inode->i_ino; | 2786 | max_key.objectid = ino; |
2765 | 2787 | ||
2766 | /* today the code can only do partial logging of directories */ | 2788 | /* today the code can only do partial logging of directories */ |
2767 | if (!S_ISDIR(inode->i_mode)) | 2789 | if (!S_ISDIR(inode->i_mode)) |
@@ -2773,6 +2795,13 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
2773 | max_key.type = (u8)-1; | 2795 | max_key.type = (u8)-1; |
2774 | max_key.offset = (u64)-1; | 2796 | max_key.offset = (u64)-1; |
2775 | 2797 | ||
2798 | ret = btrfs_commit_inode_delayed_items(trans, inode); | ||
2799 | if (ret) { | ||
2800 | btrfs_free_path(path); | ||
2801 | btrfs_free_path(dst_path); | ||
2802 | return ret; | ||
2803 | } | ||
2804 | |||
2776 | mutex_lock(&BTRFS_I(inode)->log_mutex); | 2805 | mutex_lock(&BTRFS_I(inode)->log_mutex); |
2777 | 2806 | ||
2778 | /* | 2807 | /* |
@@ -2784,8 +2813,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
2784 | 2813 | ||
2785 | if (inode_only == LOG_INODE_EXISTS) | 2814 | if (inode_only == LOG_INODE_EXISTS) |
2786 | max_key_type = BTRFS_XATTR_ITEM_KEY; | 2815 | max_key_type = BTRFS_XATTR_ITEM_KEY; |
2787 | ret = drop_objectid_items(trans, log, path, | 2816 | ret = drop_objectid_items(trans, log, path, ino, max_key_type); |
2788 | inode->i_ino, max_key_type); | ||
2789 | } else { | 2817 | } else { |
2790 | ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); | 2818 | ret = btrfs_truncate_inode_items(trans, log, inode, 0, 0); |
2791 | } | 2819 | } |
@@ -2803,7 +2831,7 @@ static int btrfs_log_inode(struct btrfs_trans_handle *trans, | |||
2803 | break; | 2831 | break; |
2804 | again: | 2832 | again: |
2805 | /* note, ins_nr might be > 0 here, cleanup outside the loop */ | 2833 | /* note, ins_nr might be > 0 here, cleanup outside the loop */ |
2806 | if (min_key.objectid != inode->i_ino) | 2834 | if (min_key.objectid != ino) |
2807 | break; | 2835 | break; |
2808 | if (min_key.type > max_key.type) | 2836 | if (min_key.type > max_key.type) |
2809 | break; | 2837 | break; |
@@ -2845,7 +2873,7 @@ next_slot: | |||
2845 | } | 2873 | } |
2846 | ins_nr = 0; | 2874 | ins_nr = 0; |
2847 | } | 2875 | } |
2848 | btrfs_release_path(root, path); | 2876 | btrfs_release_path(path); |
2849 | 2877 | ||
2850 | if (min_key.offset < (u64)-1) | 2878 | if (min_key.offset < (u64)-1) |
2851 | min_key.offset++; | 2879 | min_key.offset++; |
@@ -2868,8 +2896,8 @@ next_slot: | |||
2868 | } | 2896 | } |
2869 | WARN_ON(ins_nr); | 2897 | WARN_ON(ins_nr); |
2870 | if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { | 2898 | if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) { |
2871 | btrfs_release_path(root, path); | 2899 | btrfs_release_path(path); |
2872 | btrfs_release_path(log, dst_path); | 2900 | btrfs_release_path(dst_path); |
2873 | ret = log_directory_changes(trans, root, inode, path, dst_path); | 2901 | ret = log_directory_changes(trans, root, inode, path, dst_path); |
2874 | if (ret) { | 2902 | if (ret) { |
2875 | err = ret; | 2903 | err = ret; |
@@ -3136,7 +3164,7 @@ again: | |||
3136 | } | 3164 | } |
3137 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, | 3165 | btrfs_item_key_to_cpu(path->nodes[0], &found_key, |
3138 | path->slots[0]); | 3166 | path->slots[0]); |
3139 | btrfs_release_path(log_root_tree, path); | 3167 | btrfs_release_path(path); |
3140 | if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) | 3168 | if (found_key.objectid != BTRFS_TREE_LOG_OBJECTID) |
3141 | break; | 3169 | break; |
3142 | 3170 | ||
@@ -3171,7 +3199,7 @@ again: | |||
3171 | if (found_key.offset == 0) | 3199 | if (found_key.offset == 0) |
3172 | break; | 3200 | break; |
3173 | } | 3201 | } |
3174 | btrfs_release_path(log_root_tree, path); | 3202 | btrfs_release_path(path); |
3175 | 3203 | ||
3176 | /* step one is to pin it all, step two is to replay just inodes */ | 3204 | /* step one is to pin it all, step two is to replay just inodes */ |
3177 | if (wc.pin) { | 3205 | if (wc.pin) { |
diff --git a/fs/btrfs/tree-log.h b/fs/btrfs/tree-log.h index 3dfae84c8cc8..2270ac58d746 100644 --- a/fs/btrfs/tree-log.h +++ b/fs/btrfs/tree-log.h | |||
@@ -38,7 +38,6 @@ int btrfs_del_inode_ref_in_log(struct btrfs_trans_handle *trans, | |||
38 | struct btrfs_root *root, | 38 | struct btrfs_root *root, |
39 | const char *name, int name_len, | 39 | const char *name, int name_len, |
40 | struct inode *inode, u64 dirid); | 40 | struct inode *inode, u64 dirid); |
41 | int btrfs_join_running_log_trans(struct btrfs_root *root); | ||
42 | int btrfs_end_log_trans(struct btrfs_root *root); | 41 | int btrfs_end_log_trans(struct btrfs_root *root); |
43 | int btrfs_pin_log_trans(struct btrfs_root *root); | 42 | int btrfs_pin_log_trans(struct btrfs_root *root); |
44 | int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, | 43 | int btrfs_log_inode_parent(struct btrfs_trans_handle *trans, |
diff --git a/fs/btrfs/version.sh b/fs/btrfs/version.sh deleted file mode 100644 index 1ca1952fd917..000000000000 --- a/fs/btrfs/version.sh +++ /dev/null | |||
@@ -1,43 +0,0 @@ | |||
1 | #!/bin/bash | ||
2 | # | ||
3 | # determine-version -- report a useful version for releases | ||
4 | # | ||
5 | # Copyright 2008, Aron Griffis <agriffis@n01se.net> | ||
6 | # Copyright 2008, Oracle | ||
7 | # Released under the GNU GPLv2 | ||
8 | |||
9 | v="v0.16" | ||
10 | |||
11 | which git &> /dev/null | ||
12 | if [ $? == 0 ]; then | ||
13 | git branch >& /dev/null | ||
14 | if [ $? == 0 ]; then | ||
15 | if head=`git rev-parse --verify HEAD 2>/dev/null`; then | ||
16 | if tag=`git describe --tags 2>/dev/null`; then | ||
17 | v="$tag" | ||
18 | fi | ||
19 | |||
20 | # Are there uncommitted changes? | ||
21 | git update-index --refresh --unmerged > /dev/null | ||
22 | if git diff-index --name-only HEAD | \ | ||
23 | grep -v "^scripts/package" \ | ||
24 | | read dummy; then | ||
25 | v="$v"-dirty | ||
26 | fi | ||
27 | fi | ||
28 | fi | ||
29 | fi | ||
30 | |||
31 | echo "#ifndef __BUILD_VERSION" > .build-version.h | ||
32 | echo "#define __BUILD_VERSION" >> .build-version.h | ||
33 | echo "#define BTRFS_BUILD_VERSION \"Btrfs $v\"" >> .build-version.h | ||
34 | echo "#endif" >> .build-version.h | ||
35 | |||
36 | diff -q version.h .build-version.h >& /dev/null | ||
37 | |||
38 | if [ $? == 0 ]; then | ||
39 | rm .build-version.h | ||
40 | exit 0 | ||
41 | fi | ||
42 | |||
43 | mv .build-version.h version.h | ||
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index c7367ae5a3e6..c48214ef5c09 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c | |||
@@ -38,22 +38,9 @@ static int init_first_rw_device(struct btrfs_trans_handle *trans, | |||
38 | struct btrfs_device *device); | 38 | struct btrfs_device *device); |
39 | static int btrfs_relocate_sys_chunks(struct btrfs_root *root); | 39 | static int btrfs_relocate_sys_chunks(struct btrfs_root *root); |
40 | 40 | ||
41 | #define map_lookup_size(n) (sizeof(struct map_lookup) + \ | ||
42 | (sizeof(struct btrfs_bio_stripe) * (n))) | ||
43 | |||
44 | static DEFINE_MUTEX(uuid_mutex); | 41 | static DEFINE_MUTEX(uuid_mutex); |
45 | static LIST_HEAD(fs_uuids); | 42 | static LIST_HEAD(fs_uuids); |
46 | 43 | ||
47 | void btrfs_lock_volumes(void) | ||
48 | { | ||
49 | mutex_lock(&uuid_mutex); | ||
50 | } | ||
51 | |||
52 | void btrfs_unlock_volumes(void) | ||
53 | { | ||
54 | mutex_unlock(&uuid_mutex); | ||
55 | } | ||
56 | |||
57 | static void lock_chunks(struct btrfs_root *root) | 44 | static void lock_chunks(struct btrfs_root *root) |
58 | { | 45 | { |
59 | mutex_lock(&root->fs_info->chunk_mutex); | 46 | mutex_lock(&root->fs_info->chunk_mutex); |
@@ -363,7 +350,7 @@ static noinline int device_list_add(const char *path, | |||
363 | INIT_LIST_HEAD(&device->dev_alloc_list); | 350 | INIT_LIST_HEAD(&device->dev_alloc_list); |
364 | 351 | ||
365 | mutex_lock(&fs_devices->device_list_mutex); | 352 | mutex_lock(&fs_devices->device_list_mutex); |
366 | list_add(&device->dev_list, &fs_devices->devices); | 353 | list_add_rcu(&device->dev_list, &fs_devices->devices); |
367 | mutex_unlock(&fs_devices->device_list_mutex); | 354 | mutex_unlock(&fs_devices->device_list_mutex); |
368 | 355 | ||
369 | device->fs_devices = fs_devices; | 356 | device->fs_devices = fs_devices; |
@@ -406,7 +393,7 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) | |||
406 | fs_devices->latest_trans = orig->latest_trans; | 393 | fs_devices->latest_trans = orig->latest_trans; |
407 | memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid)); | 394 | memcpy(fs_devices->fsid, orig->fsid, sizeof(fs_devices->fsid)); |
408 | 395 | ||
409 | mutex_lock(&orig->device_list_mutex); | 396 | /* We have held the volume lock, it is safe to get the devices. */ |
410 | list_for_each_entry(orig_dev, &orig->devices, dev_list) { | 397 | list_for_each_entry(orig_dev, &orig->devices, dev_list) { |
411 | device = kzalloc(sizeof(*device), GFP_NOFS); | 398 | device = kzalloc(sizeof(*device), GFP_NOFS); |
412 | if (!device) | 399 | if (!device) |
@@ -429,10 +416,8 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig) | |||
429 | device->fs_devices = fs_devices; | 416 | device->fs_devices = fs_devices; |
430 | fs_devices->num_devices++; | 417 | fs_devices->num_devices++; |
431 | } | 418 | } |
432 | mutex_unlock(&orig->device_list_mutex); | ||
433 | return fs_devices; | 419 | return fs_devices; |
434 | error: | 420 | error: |
435 | mutex_unlock(&orig->device_list_mutex); | ||
436 | free_fs_devices(fs_devices); | 421 | free_fs_devices(fs_devices); |
437 | return ERR_PTR(-ENOMEM); | 422 | return ERR_PTR(-ENOMEM); |
438 | } | 423 | } |
@@ -443,7 +428,7 @@ int btrfs_close_extra_devices(struct btrfs_fs_devices *fs_devices) | |||
443 | 428 | ||
444 | mutex_lock(&uuid_mutex); | 429 | mutex_lock(&uuid_mutex); |
445 | again: | 430 | again: |
446 | mutex_lock(&fs_devices->device_list_mutex); | 431 | /* This is the initialized path, it is safe to release the devices. */ |
447 | list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { | 432 | list_for_each_entry_safe(device, next, &fs_devices->devices, dev_list) { |
448 | if (device->in_fs_metadata) | 433 | if (device->in_fs_metadata) |
449 | continue; | 434 | continue; |
@@ -463,7 +448,6 @@ again: | |||
463 | kfree(device->name); | 448 | kfree(device->name); |
464 | kfree(device); | 449 | kfree(device); |
465 | } | 450 | } |
466 | mutex_unlock(&fs_devices->device_list_mutex); | ||
467 | 451 | ||
468 | if (fs_devices->seed) { | 452 | if (fs_devices->seed) { |
469 | fs_devices = fs_devices->seed; | 453 | fs_devices = fs_devices->seed; |
@@ -474,6 +458,29 @@ again: | |||
474 | return 0; | 458 | return 0; |
475 | } | 459 | } |
476 | 460 | ||
461 | static void __free_device(struct work_struct *work) | ||
462 | { | ||
463 | struct btrfs_device *device; | ||
464 | |||
465 | device = container_of(work, struct btrfs_device, rcu_work); | ||
466 | |||
467 | if (device->bdev) | ||
468 | blkdev_put(device->bdev, device->mode); | ||
469 | |||
470 | kfree(device->name); | ||
471 | kfree(device); | ||
472 | } | ||
473 | |||
474 | static void free_device(struct rcu_head *head) | ||
475 | { | ||
476 | struct btrfs_device *device; | ||
477 | |||
478 | device = container_of(head, struct btrfs_device, rcu); | ||
479 | |||
480 | INIT_WORK(&device->rcu_work, __free_device); | ||
481 | schedule_work(&device->rcu_work); | ||
482 | } | ||
483 | |||
477 | static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | 484 | static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) |
478 | { | 485 | { |
479 | struct btrfs_device *device; | 486 | struct btrfs_device *device; |
@@ -481,20 +488,32 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) | |||
481 | if (--fs_devices->opened > 0) | 488 | if (--fs_devices->opened > 0) |
482 | return 0; | 489 | return 0; |
483 | 490 | ||
491 | mutex_lock(&fs_devices->device_list_mutex); | ||
484 | list_for_each_entry(device, &fs_devices->devices, dev_list) { | 492 | list_for_each_entry(device, &fs_devices->devices, dev_list) { |
485 | if (device->bdev) { | 493 | struct btrfs_device *new_device; |
486 | blkdev_put(device->bdev, device->mode); | 494 | |
495 | if (device->bdev) | ||
487 | fs_devices->open_devices--; | 496 | fs_devices->open_devices--; |
488 | } | 497 | |
489 | if (device->writeable) { | 498 | if (device->writeable) { |
490 | list_del_init(&device->dev_alloc_list); | 499 | list_del_init(&device->dev_alloc_list); |
491 | fs_devices->rw_devices--; | 500 | fs_devices->rw_devices--; |
492 | } | 501 | } |
493 | 502 | ||
494 | device->bdev = NULL; | 503 | new_device = kmalloc(sizeof(*new_device), GFP_NOFS); |
495 | device->writeable = 0; | 504 | BUG_ON(!new_device); |
496 | device->in_fs_metadata = 0; | 505 | memcpy(new_device, device, sizeof(*new_device)); |
506 | new_device->name = kstrdup(device->name, GFP_NOFS); | ||
507 | BUG_ON(!new_device->name); | ||
508 | new_device->bdev = NULL; | ||
509 | new_device->writeable = 0; | ||
510 | new_device->in_fs_metadata = 0; | ||
511 | list_replace_rcu(&device->dev_list, &new_device->dev_list); | ||
512 | |||
513 | call_rcu(&device->rcu, free_device); | ||
497 | } | 514 | } |
515 | mutex_unlock(&fs_devices->device_list_mutex); | ||
516 | |||
498 | WARN_ON(fs_devices->open_devices); | 517 | WARN_ON(fs_devices->open_devices); |
499 | WARN_ON(fs_devices->rw_devices); | 518 | WARN_ON(fs_devices->rw_devices); |
500 | fs_devices->opened = 0; | 519 | fs_devices->opened = 0; |
@@ -597,6 +616,7 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | |||
597 | list_add(&device->dev_alloc_list, | 616 | list_add(&device->dev_alloc_list, |
598 | &fs_devices->alloc_list); | 617 | &fs_devices->alloc_list); |
599 | } | 618 | } |
619 | brelse(bh); | ||
600 | continue; | 620 | continue; |
601 | 621 | ||
602 | error_brelse: | 622 | error_brelse: |
@@ -815,10 +835,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans, | |||
815 | /* we don't want to overwrite the superblock on the drive, | 835 | /* we don't want to overwrite the superblock on the drive, |
816 | * so we make sure to start at an offset of at least 1MB | 836 | * so we make sure to start at an offset of at least 1MB |
817 | */ | 837 | */ |
818 | search_start = 1024 * 1024; | 838 | search_start = max(root->fs_info->alloc_start, 1024ull * 1024); |
819 | |||
820 | if (root->fs_info->alloc_start + num_bytes <= search_end) | ||
821 | search_start = max(root->fs_info->alloc_start, search_start); | ||
822 | 839 | ||
823 | max_hole_start = search_start; | 840 | max_hole_start = search_start; |
824 | max_hole_size = 0; | 841 | max_hole_size = 0; |
@@ -949,14 +966,14 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, | |||
949 | if (ret > 0) { | 966 | if (ret > 0) { |
950 | ret = btrfs_previous_item(root, path, key.objectid, | 967 | ret = btrfs_previous_item(root, path, key.objectid, |
951 | BTRFS_DEV_EXTENT_KEY); | 968 | BTRFS_DEV_EXTENT_KEY); |
952 | BUG_ON(ret); | 969 | if (ret) |
970 | goto out; | ||
953 | leaf = path->nodes[0]; | 971 | leaf = path->nodes[0]; |
954 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); | 972 | btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]); |
955 | extent = btrfs_item_ptr(leaf, path->slots[0], | 973 | extent = btrfs_item_ptr(leaf, path->slots[0], |
956 | struct btrfs_dev_extent); | 974 | struct btrfs_dev_extent); |
957 | BUG_ON(found_key.offset > start || found_key.offset + | 975 | BUG_ON(found_key.offset > start || found_key.offset + |
958 | btrfs_dev_extent_length(leaf, extent) < start); | 976 | btrfs_dev_extent_length(leaf, extent) < start); |
959 | ret = 0; | ||
960 | } else if (ret == 0) { | 977 | } else if (ret == 0) { |
961 | leaf = path->nodes[0]; | 978 | leaf = path->nodes[0]; |
962 | extent = btrfs_item_ptr(leaf, path->slots[0], | 979 | extent = btrfs_item_ptr(leaf, path->slots[0], |
@@ -967,8 +984,8 @@ static int btrfs_free_dev_extent(struct btrfs_trans_handle *trans, | |||
967 | if (device->bytes_used > 0) | 984 | if (device->bytes_used > 0) |
968 | device->bytes_used -= btrfs_dev_extent_length(leaf, extent); | 985 | device->bytes_used -= btrfs_dev_extent_length(leaf, extent); |
969 | ret = btrfs_del_item(trans, root, path); | 986 | ret = btrfs_del_item(trans, root, path); |
970 | BUG_ON(ret); | ||
971 | 987 | ||
988 | out: | ||
972 | btrfs_free_path(path); | 989 | btrfs_free_path(path); |
973 | return ret; | 990 | return ret; |
974 | } | 991 | } |
@@ -1203,11 +1220,13 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
1203 | struct block_device *bdev; | 1220 | struct block_device *bdev; |
1204 | struct buffer_head *bh = NULL; | 1221 | struct buffer_head *bh = NULL; |
1205 | struct btrfs_super_block *disk_super; | 1222 | struct btrfs_super_block *disk_super; |
1223 | struct btrfs_fs_devices *cur_devices; | ||
1206 | u64 all_avail; | 1224 | u64 all_avail; |
1207 | u64 devid; | 1225 | u64 devid; |
1208 | u64 num_devices; | 1226 | u64 num_devices; |
1209 | u8 *dev_uuid; | 1227 | u8 *dev_uuid; |
1210 | int ret = 0; | 1228 | int ret = 0; |
1229 | bool clear_super = false; | ||
1211 | 1230 | ||
1212 | mutex_lock(&uuid_mutex); | 1231 | mutex_lock(&uuid_mutex); |
1213 | mutex_lock(&root->fs_info->volume_mutex); | 1232 | mutex_lock(&root->fs_info->volume_mutex); |
@@ -1238,14 +1257,16 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
1238 | 1257 | ||
1239 | device = NULL; | 1258 | device = NULL; |
1240 | devices = &root->fs_info->fs_devices->devices; | 1259 | devices = &root->fs_info->fs_devices->devices; |
1241 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | 1260 | /* |
1261 | * It is safe to read the devices since the volume_mutex | ||
1262 | * is held. | ||
1263 | */ | ||
1242 | list_for_each_entry(tmp, devices, dev_list) { | 1264 | list_for_each_entry(tmp, devices, dev_list) { |
1243 | if (tmp->in_fs_metadata && !tmp->bdev) { | 1265 | if (tmp->in_fs_metadata && !tmp->bdev) { |
1244 | device = tmp; | 1266 | device = tmp; |
1245 | break; | 1267 | break; |
1246 | } | 1268 | } |
1247 | } | 1269 | } |
1248 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | ||
1249 | bdev = NULL; | 1270 | bdev = NULL; |
1250 | bh = NULL; | 1271 | bh = NULL; |
1251 | disk_super = NULL; | 1272 | disk_super = NULL; |
@@ -1287,8 +1308,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
1287 | } | 1308 | } |
1288 | 1309 | ||
1289 | if (device->writeable) { | 1310 | if (device->writeable) { |
1311 | lock_chunks(root); | ||
1290 | list_del_init(&device->dev_alloc_list); | 1312 | list_del_init(&device->dev_alloc_list); |
1313 | unlock_chunks(root); | ||
1291 | root->fs_info->fs_devices->rw_devices--; | 1314 | root->fs_info->fs_devices->rw_devices--; |
1315 | clear_super = true; | ||
1292 | } | 1316 | } |
1293 | 1317 | ||
1294 | ret = btrfs_shrink_device(device, 0); | 1318 | ret = btrfs_shrink_device(device, 0); |
@@ -1300,15 +1324,17 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
1300 | goto error_undo; | 1324 | goto error_undo; |
1301 | 1325 | ||
1302 | device->in_fs_metadata = 0; | 1326 | device->in_fs_metadata = 0; |
1327 | btrfs_scrub_cancel_dev(root, device); | ||
1303 | 1328 | ||
1304 | /* | 1329 | /* |
1305 | * the device list mutex makes sure that we don't change | 1330 | * the device list mutex makes sure that we don't change |
1306 | * the device list while someone else is writing out all | 1331 | * the device list while someone else is writing out all |
1307 | * the device supers. | 1332 | * the device supers. |
1308 | */ | 1333 | */ |
1334 | |||
1335 | cur_devices = device->fs_devices; | ||
1309 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | 1336 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); |
1310 | list_del_init(&device->dev_list); | 1337 | list_del_rcu(&device->dev_list); |
1311 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | ||
1312 | 1338 | ||
1313 | device->fs_devices->num_devices--; | 1339 | device->fs_devices->num_devices--; |
1314 | 1340 | ||
@@ -1322,34 +1348,36 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
1322 | if (device->bdev == root->fs_info->fs_devices->latest_bdev) | 1348 | if (device->bdev == root->fs_info->fs_devices->latest_bdev) |
1323 | root->fs_info->fs_devices->latest_bdev = next_device->bdev; | 1349 | root->fs_info->fs_devices->latest_bdev = next_device->bdev; |
1324 | 1350 | ||
1325 | if (device->bdev) { | 1351 | if (device->bdev) |
1326 | blkdev_put(device->bdev, device->mode); | ||
1327 | device->bdev = NULL; | ||
1328 | device->fs_devices->open_devices--; | 1352 | device->fs_devices->open_devices--; |
1329 | } | 1353 | |
1354 | call_rcu(&device->rcu, free_device); | ||
1355 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | ||
1330 | 1356 | ||
1331 | num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; | 1357 | num_devices = btrfs_super_num_devices(&root->fs_info->super_copy) - 1; |
1332 | btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices); | 1358 | btrfs_set_super_num_devices(&root->fs_info->super_copy, num_devices); |
1333 | 1359 | ||
1334 | if (device->fs_devices->open_devices == 0) { | 1360 | if (cur_devices->open_devices == 0) { |
1335 | struct btrfs_fs_devices *fs_devices; | 1361 | struct btrfs_fs_devices *fs_devices; |
1336 | fs_devices = root->fs_info->fs_devices; | 1362 | fs_devices = root->fs_info->fs_devices; |
1337 | while (fs_devices) { | 1363 | while (fs_devices) { |
1338 | if (fs_devices->seed == device->fs_devices) | 1364 | if (fs_devices->seed == cur_devices) |
1339 | break; | 1365 | break; |
1340 | fs_devices = fs_devices->seed; | 1366 | fs_devices = fs_devices->seed; |
1341 | } | 1367 | } |
1342 | fs_devices->seed = device->fs_devices->seed; | 1368 | fs_devices->seed = cur_devices->seed; |
1343 | device->fs_devices->seed = NULL; | 1369 | cur_devices->seed = NULL; |
1344 | __btrfs_close_devices(device->fs_devices); | 1370 | lock_chunks(root); |
1345 | free_fs_devices(device->fs_devices); | 1371 | __btrfs_close_devices(cur_devices); |
1372 | unlock_chunks(root); | ||
1373 | free_fs_devices(cur_devices); | ||
1346 | } | 1374 | } |
1347 | 1375 | ||
1348 | /* | 1376 | /* |
1349 | * at this point, the device is zero sized. We want to | 1377 | * at this point, the device is zero sized. We want to |
1350 | * remove it from the devices list and zero out the old super | 1378 | * remove it from the devices list and zero out the old super |
1351 | */ | 1379 | */ |
1352 | if (device->writeable) { | 1380 | if (clear_super) { |
1353 | /* make sure this device isn't detected as part of | 1381 | /* make sure this device isn't detected as part of |
1354 | * the FS anymore | 1382 | * the FS anymore |
1355 | */ | 1383 | */ |
@@ -1358,8 +1386,6 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path) | |||
1358 | sync_dirty_buffer(bh); | 1386 | sync_dirty_buffer(bh); |
1359 | } | 1387 | } |
1360 | 1388 | ||
1361 | kfree(device->name); | ||
1362 | kfree(device); | ||
1363 | ret = 0; | 1389 | ret = 0; |
1364 | 1390 | ||
1365 | error_brelse: | 1391 | error_brelse: |
@@ -1373,8 +1399,10 @@ out: | |||
1373 | return ret; | 1399 | return ret; |
1374 | error_undo: | 1400 | error_undo: |
1375 | if (device->writeable) { | 1401 | if (device->writeable) { |
1402 | lock_chunks(root); | ||
1376 | list_add(&device->dev_alloc_list, | 1403 | list_add(&device->dev_alloc_list, |
1377 | &root->fs_info->fs_devices->alloc_list); | 1404 | &root->fs_info->fs_devices->alloc_list); |
1405 | unlock_chunks(root); | ||
1378 | root->fs_info->fs_devices->rw_devices++; | 1406 | root->fs_info->fs_devices->rw_devices++; |
1379 | } | 1407 | } |
1380 | goto error_brelse; | 1408 | goto error_brelse; |
@@ -1414,7 +1442,12 @@ static int btrfs_prepare_sprout(struct btrfs_trans_handle *trans, | |||
1414 | INIT_LIST_HEAD(&seed_devices->devices); | 1442 | INIT_LIST_HEAD(&seed_devices->devices); |
1415 | INIT_LIST_HEAD(&seed_devices->alloc_list); | 1443 | INIT_LIST_HEAD(&seed_devices->alloc_list); |
1416 | mutex_init(&seed_devices->device_list_mutex); | 1444 | mutex_init(&seed_devices->device_list_mutex); |
1417 | list_splice_init(&fs_devices->devices, &seed_devices->devices); | 1445 | |
1446 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | ||
1447 | list_splice_init_rcu(&fs_devices->devices, &seed_devices->devices, | ||
1448 | synchronize_rcu); | ||
1449 | mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); | ||
1450 | |||
1418 | list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); | 1451 | list_splice_init(&fs_devices->alloc_list, &seed_devices->alloc_list); |
1419 | list_for_each_entry(device, &seed_devices->devices, dev_list) { | 1452 | list_for_each_entry(device, &seed_devices->devices, dev_list) { |
1420 | device->fs_devices = seed_devices; | 1453 | device->fs_devices = seed_devices; |
@@ -1475,7 +1508,7 @@ next_slot: | |||
1475 | goto error; | 1508 | goto error; |
1476 | leaf = path->nodes[0]; | 1509 | leaf = path->nodes[0]; |
1477 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); | 1510 | btrfs_item_key_to_cpu(leaf, &key, path->slots[0]); |
1478 | btrfs_release_path(root, path); | 1511 | btrfs_release_path(path); |
1479 | continue; | 1512 | continue; |
1480 | } | 1513 | } |
1481 | 1514 | ||
@@ -1611,7 +1644,7 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) | |||
1611 | * half setup | 1644 | * half setup |
1612 | */ | 1645 | */ |
1613 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); | 1646 | mutex_lock(&root->fs_info->fs_devices->device_list_mutex); |
1614 | list_add(&device->dev_list, &root->fs_info->fs_devices->devices); | 1647 | list_add_rcu(&device->dev_list, &root->fs_info->fs_devices->devices); |
1615 | list_add(&device->dev_alloc_list, | 1648 | list_add(&device->dev_alloc_list, |
1616 | &root->fs_info->fs_devices->alloc_list); | 1649 | &root->fs_info->fs_devices->alloc_list); |
1617 | root->fs_info->fs_devices->num_devices++; | 1650 | root->fs_info->fs_devices->num_devices++; |
@@ -1769,10 +1802,9 @@ static int btrfs_free_chunk(struct btrfs_trans_handle *trans, | |||
1769 | BUG_ON(ret); | 1802 | BUG_ON(ret); |
1770 | 1803 | ||
1771 | ret = btrfs_del_item(trans, root, path); | 1804 | ret = btrfs_del_item(trans, root, path); |
1772 | BUG_ON(ret); | ||
1773 | 1805 | ||
1774 | btrfs_free_path(path); | 1806 | btrfs_free_path(path); |
1775 | return 0; | 1807 | return ret; |
1776 | } | 1808 | } |
1777 | 1809 | ||
1778 | static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 | 1810 | static int btrfs_del_sys_chunk(struct btrfs_root *root, u64 chunk_objectid, u64 |
@@ -1947,7 +1979,7 @@ again: | |||
1947 | chunk = btrfs_item_ptr(leaf, path->slots[0], | 1979 | chunk = btrfs_item_ptr(leaf, path->slots[0], |
1948 | struct btrfs_chunk); | 1980 | struct btrfs_chunk); |
1949 | chunk_type = btrfs_chunk_type(leaf, chunk); | 1981 | chunk_type = btrfs_chunk_type(leaf, chunk); |
1950 | btrfs_release_path(chunk_root, path); | 1982 | btrfs_release_path(path); |
1951 | 1983 | ||
1952 | if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { | 1984 | if (chunk_type & BTRFS_BLOCK_GROUP_SYSTEM) { |
1953 | ret = btrfs_relocate_chunk(chunk_root, chunk_tree, | 1985 | ret = btrfs_relocate_chunk(chunk_root, chunk_tree, |
@@ -2065,7 +2097,7 @@ int btrfs_balance(struct btrfs_root *dev_root) | |||
2065 | if (found_key.offset == 0) | 2097 | if (found_key.offset == 0) |
2066 | break; | 2098 | break; |
2067 | 2099 | ||
2068 | btrfs_release_path(chunk_root, path); | 2100 | btrfs_release_path(path); |
2069 | ret = btrfs_relocate_chunk(chunk_root, | 2101 | ret = btrfs_relocate_chunk(chunk_root, |
2070 | chunk_root->root_key.objectid, | 2102 | chunk_root->root_key.objectid, |
2071 | found_key.objectid, | 2103 | found_key.objectid, |
@@ -2137,7 +2169,7 @@ again: | |||
2137 | goto done; | 2169 | goto done; |
2138 | if (ret) { | 2170 | if (ret) { |
2139 | ret = 0; | 2171 | ret = 0; |
2140 | btrfs_release_path(root, path); | 2172 | btrfs_release_path(path); |
2141 | break; | 2173 | break; |
2142 | } | 2174 | } |
2143 | 2175 | ||
@@ -2146,7 +2178,7 @@ again: | |||
2146 | btrfs_item_key_to_cpu(l, &key, path->slots[0]); | 2178 | btrfs_item_key_to_cpu(l, &key, path->slots[0]); |
2147 | 2179 | ||
2148 | if (key.objectid != device->devid) { | 2180 | if (key.objectid != device->devid) { |
2149 | btrfs_release_path(root, path); | 2181 | btrfs_release_path(path); |
2150 | break; | 2182 | break; |
2151 | } | 2183 | } |
2152 | 2184 | ||
@@ -2154,14 +2186,14 @@ again: | |||
2154 | length = btrfs_dev_extent_length(l, dev_extent); | 2186 | length = btrfs_dev_extent_length(l, dev_extent); |
2155 | 2187 | ||
2156 | if (key.offset + length <= new_size) { | 2188 | if (key.offset + length <= new_size) { |
2157 | btrfs_release_path(root, path); | 2189 | btrfs_release_path(path); |
2158 | break; | 2190 | break; |
2159 | } | 2191 | } |
2160 | 2192 | ||
2161 | chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); | 2193 | chunk_tree = btrfs_dev_extent_chunk_tree(l, dev_extent); |
2162 | chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); | 2194 | chunk_objectid = btrfs_dev_extent_chunk_objectid(l, dev_extent); |
2163 | chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); | 2195 | chunk_offset = btrfs_dev_extent_chunk_offset(l, dev_extent); |
2164 | btrfs_release_path(root, path); | 2196 | btrfs_release_path(path); |
2165 | 2197 | ||
2166 | ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid, | 2198 | ret = btrfs_relocate_chunk(root, chunk_tree, chunk_objectid, |
2167 | chunk_offset); | 2199 | chunk_offset); |
@@ -2237,275 +2269,204 @@ static int btrfs_add_system_chunk(struct btrfs_trans_handle *trans, | |||
2237 | return 0; | 2269 | return 0; |
2238 | } | 2270 | } |
2239 | 2271 | ||
2240 | static noinline u64 chunk_bytes_by_type(u64 type, u64 calc_size, | 2272 | /* |
2241 | int num_stripes, int sub_stripes) | 2273 | * sort the devices in descending order by max_avail, total_avail |
2274 | */ | ||
2275 | static int btrfs_cmp_device_info(const void *a, const void *b) | ||
2242 | { | 2276 | { |
2243 | if (type & (BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_DUP)) | 2277 | const struct btrfs_device_info *di_a = a; |
2244 | return calc_size; | 2278 | const struct btrfs_device_info *di_b = b; |
2245 | else if (type & BTRFS_BLOCK_GROUP_RAID10) | ||
2246 | return calc_size * (num_stripes / sub_stripes); | ||
2247 | else | ||
2248 | return calc_size * num_stripes; | ||
2249 | } | ||
2250 | 2279 | ||
2251 | /* Used to sort the devices by max_avail(descending sort) */ | 2280 | if (di_a->max_avail > di_b->max_avail) |
2252 | int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2) | ||
2253 | { | ||
2254 | if (((struct btrfs_device_info *)dev_info1)->max_avail > | ||
2255 | ((struct btrfs_device_info *)dev_info2)->max_avail) | ||
2256 | return -1; | 2281 | return -1; |
2257 | else if (((struct btrfs_device_info *)dev_info1)->max_avail < | 2282 | if (di_a->max_avail < di_b->max_avail) |
2258 | ((struct btrfs_device_info *)dev_info2)->max_avail) | ||
2259 | return 1; | 2283 | return 1; |
2260 | else | 2284 | if (di_a->total_avail > di_b->total_avail) |
2261 | return 0; | 2285 | return -1; |
2286 | if (di_a->total_avail < di_b->total_avail) | ||
2287 | return 1; | ||
2288 | return 0; | ||
2262 | } | 2289 | } |
2263 | 2290 | ||
2264 | static int __btrfs_calc_nstripes(struct btrfs_fs_devices *fs_devices, u64 type, | 2291 | static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, |
2265 | int *num_stripes, int *min_stripes, | 2292 | struct btrfs_root *extent_root, |
2266 | int *sub_stripes) | 2293 | struct map_lookup **map_ret, |
2294 | u64 *num_bytes_out, u64 *stripe_size_out, | ||
2295 | u64 start, u64 type) | ||
2267 | { | 2296 | { |
2268 | *num_stripes = 1; | 2297 | struct btrfs_fs_info *info = extent_root->fs_info; |
2269 | *min_stripes = 1; | 2298 | struct btrfs_fs_devices *fs_devices = info->fs_devices; |
2270 | *sub_stripes = 0; | 2299 | struct list_head *cur; |
2300 | struct map_lookup *map = NULL; | ||
2301 | struct extent_map_tree *em_tree; | ||
2302 | struct extent_map *em; | ||
2303 | struct btrfs_device_info *devices_info = NULL; | ||
2304 | u64 total_avail; | ||
2305 | int num_stripes; /* total number of stripes to allocate */ | ||
2306 | int sub_stripes; /* sub_stripes info for map */ | ||
2307 | int dev_stripes; /* stripes per dev */ | ||
2308 | int devs_max; /* max devs to use */ | ||
2309 | int devs_min; /* min devs needed */ | ||
2310 | int devs_increment; /* ndevs has to be a multiple of this */ | ||
2311 | int ncopies; /* how many copies to data has */ | ||
2312 | int ret; | ||
2313 | u64 max_stripe_size; | ||
2314 | u64 max_chunk_size; | ||
2315 | u64 stripe_size; | ||
2316 | u64 num_bytes; | ||
2317 | int ndevs; | ||
2318 | int i; | ||
2319 | int j; | ||
2271 | 2320 | ||
2272 | if (type & (BTRFS_BLOCK_GROUP_RAID0)) { | 2321 | if ((type & BTRFS_BLOCK_GROUP_RAID1) && |
2273 | *num_stripes = fs_devices->rw_devices; | 2322 | (type & BTRFS_BLOCK_GROUP_DUP)) { |
2274 | *min_stripes = 2; | 2323 | WARN_ON(1); |
2275 | } | 2324 | type &= ~BTRFS_BLOCK_GROUP_DUP; |
2276 | if (type & (BTRFS_BLOCK_GROUP_DUP)) { | ||
2277 | *num_stripes = 2; | ||
2278 | *min_stripes = 2; | ||
2279 | } | ||
2280 | if (type & (BTRFS_BLOCK_GROUP_RAID1)) { | ||
2281 | if (fs_devices->rw_devices < 2) | ||
2282 | return -ENOSPC; | ||
2283 | *num_stripes = 2; | ||
2284 | *min_stripes = 2; | ||
2285 | } | ||
2286 | if (type & (BTRFS_BLOCK_GROUP_RAID10)) { | ||
2287 | *num_stripes = fs_devices->rw_devices; | ||
2288 | if (*num_stripes < 4) | ||
2289 | return -ENOSPC; | ||
2290 | *num_stripes &= ~(u32)1; | ||
2291 | *sub_stripes = 2; | ||
2292 | *min_stripes = 4; | ||
2293 | } | 2325 | } |
2294 | 2326 | ||
2295 | return 0; | 2327 | if (list_empty(&fs_devices->alloc_list)) |
2296 | } | 2328 | return -ENOSPC; |
2297 | 2329 | ||
2298 | static u64 __btrfs_calc_stripe_size(struct btrfs_fs_devices *fs_devices, | 2330 | sub_stripes = 1; |
2299 | u64 proposed_size, u64 type, | 2331 | dev_stripes = 1; |
2300 | int num_stripes, int small_stripe) | 2332 | devs_increment = 1; |
2301 | { | 2333 | ncopies = 1; |
2302 | int min_stripe_size = 1 * 1024 * 1024; | 2334 | devs_max = 0; /* 0 == as many as possible */ |
2303 | u64 calc_size = proposed_size; | 2335 | devs_min = 1; |
2304 | u64 max_chunk_size = calc_size; | ||
2305 | int ncopies = 1; | ||
2306 | 2336 | ||
2307 | if (type & (BTRFS_BLOCK_GROUP_RAID1 | | 2337 | /* |
2308 | BTRFS_BLOCK_GROUP_DUP | | 2338 | * define the properties of each RAID type. |
2309 | BTRFS_BLOCK_GROUP_RAID10)) | 2339 | * FIXME: move this to a global table and use it in all RAID |
2340 | * calculation code | ||
2341 | */ | ||
2342 | if (type & (BTRFS_BLOCK_GROUP_DUP)) { | ||
2343 | dev_stripes = 2; | ||
2344 | ncopies = 2; | ||
2345 | devs_max = 1; | ||
2346 | } else if (type & (BTRFS_BLOCK_GROUP_RAID0)) { | ||
2347 | devs_min = 2; | ||
2348 | } else if (type & (BTRFS_BLOCK_GROUP_RAID1)) { | ||
2349 | devs_increment = 2; | ||
2310 | ncopies = 2; | 2350 | ncopies = 2; |
2351 | devs_max = 2; | ||
2352 | devs_min = 2; | ||
2353 | } else if (type & (BTRFS_BLOCK_GROUP_RAID10)) { | ||
2354 | sub_stripes = 2; | ||
2355 | devs_increment = 2; | ||
2356 | ncopies = 2; | ||
2357 | devs_min = 4; | ||
2358 | } else { | ||
2359 | devs_max = 1; | ||
2360 | } | ||
2311 | 2361 | ||
2312 | if (type & BTRFS_BLOCK_GROUP_DATA) { | 2362 | if (type & BTRFS_BLOCK_GROUP_DATA) { |
2313 | max_chunk_size = 10 * calc_size; | 2363 | max_stripe_size = 1024 * 1024 * 1024; |
2314 | min_stripe_size = 64 * 1024 * 1024; | 2364 | max_chunk_size = 10 * max_stripe_size; |
2315 | } else if (type & BTRFS_BLOCK_GROUP_METADATA) { | 2365 | } else if (type & BTRFS_BLOCK_GROUP_METADATA) { |
2316 | max_chunk_size = 256 * 1024 * 1024; | 2366 | max_stripe_size = 256 * 1024 * 1024; |
2317 | min_stripe_size = 32 * 1024 * 1024; | 2367 | max_chunk_size = max_stripe_size; |
2318 | } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { | 2368 | } else if (type & BTRFS_BLOCK_GROUP_SYSTEM) { |
2319 | calc_size = 8 * 1024 * 1024; | 2369 | max_stripe_size = 8 * 1024 * 1024; |
2320 | max_chunk_size = calc_size * 2; | 2370 | max_chunk_size = 2 * max_stripe_size; |
2321 | min_stripe_size = 1 * 1024 * 1024; | 2371 | } else { |
2372 | printk(KERN_ERR "btrfs: invalid chunk type 0x%llx requested\n", | ||
2373 | type); | ||
2374 | BUG_ON(1); | ||
2322 | } | 2375 | } |
2323 | 2376 | ||
2324 | /* we don't want a chunk larger than 10% of writeable space */ | 2377 | /* we don't want a chunk larger than 10% of writeable space */ |
2325 | max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), | 2378 | max_chunk_size = min(div_factor(fs_devices->total_rw_bytes, 1), |
2326 | max_chunk_size); | 2379 | max_chunk_size); |
2327 | 2380 | ||
2328 | if (calc_size * num_stripes > max_chunk_size * ncopies) { | 2381 | devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, |
2329 | calc_size = max_chunk_size * ncopies; | 2382 | GFP_NOFS); |
2330 | do_div(calc_size, num_stripes); | 2383 | if (!devices_info) |
2331 | do_div(calc_size, BTRFS_STRIPE_LEN); | 2384 | return -ENOMEM; |
2332 | calc_size *= BTRFS_STRIPE_LEN; | ||
2333 | } | ||
2334 | 2385 | ||
2335 | /* we don't want tiny stripes */ | 2386 | cur = fs_devices->alloc_list.next; |
2336 | if (!small_stripe) | ||
2337 | calc_size = max_t(u64, min_stripe_size, calc_size); | ||
2338 | 2387 | ||
2339 | /* | 2388 | /* |
2340 | * we're about to do_div by the BTRFS_STRIPE_LEN so lets make sure | 2389 | * in the first pass through the devices list, we gather information |
2341 | * we end up with something bigger than a stripe | 2390 | * about the available holes on each device. |
2342 | */ | 2391 | */ |
2343 | calc_size = max_t(u64, calc_size, BTRFS_STRIPE_LEN); | 2392 | ndevs = 0; |
2344 | 2393 | while (cur != &fs_devices->alloc_list) { | |
2345 | do_div(calc_size, BTRFS_STRIPE_LEN); | 2394 | struct btrfs_device *device; |
2346 | calc_size *= BTRFS_STRIPE_LEN; | 2395 | u64 max_avail; |
2347 | 2396 | u64 dev_offset; | |
2348 | return calc_size; | ||
2349 | } | ||
2350 | |||
2351 | static struct map_lookup *__shrink_map_lookup_stripes(struct map_lookup *map, | ||
2352 | int num_stripes) | ||
2353 | { | ||
2354 | struct map_lookup *new; | ||
2355 | size_t len = map_lookup_size(num_stripes); | ||
2356 | |||
2357 | BUG_ON(map->num_stripes < num_stripes); | ||
2358 | |||
2359 | if (map->num_stripes == num_stripes) | ||
2360 | return map; | ||
2361 | |||
2362 | new = kmalloc(len, GFP_NOFS); | ||
2363 | if (!new) { | ||
2364 | /* just change map->num_stripes */ | ||
2365 | map->num_stripes = num_stripes; | ||
2366 | return map; | ||
2367 | } | ||
2368 | |||
2369 | memcpy(new, map, len); | ||
2370 | new->num_stripes = num_stripes; | ||
2371 | kfree(map); | ||
2372 | return new; | ||
2373 | } | ||
2374 | 2397 | ||
2375 | /* | 2398 | device = list_entry(cur, struct btrfs_device, dev_alloc_list); |
2376 | * helper to allocate device space from btrfs_device_info, in which we stored | ||
2377 | * max free space information of every device. It is used when we can not | ||
2378 | * allocate chunks by default size. | ||
2379 | * | ||
2380 | * By this helper, we can allocate a new chunk as larger as possible. | ||
2381 | */ | ||
2382 | static int __btrfs_alloc_tiny_space(struct btrfs_trans_handle *trans, | ||
2383 | struct btrfs_fs_devices *fs_devices, | ||
2384 | struct btrfs_device_info *devices, | ||
2385 | int nr_device, u64 type, | ||
2386 | struct map_lookup **map_lookup, | ||
2387 | int min_stripes, u64 *stripe_size) | ||
2388 | { | ||
2389 | int i, index, sort_again = 0; | ||
2390 | int min_devices = min_stripes; | ||
2391 | u64 max_avail, min_free; | ||
2392 | struct map_lookup *map = *map_lookup; | ||
2393 | int ret; | ||
2394 | 2399 | ||
2395 | if (nr_device < min_stripes) | 2400 | cur = cur->next; |
2396 | return -ENOSPC; | ||
2397 | 2401 | ||
2398 | btrfs_descending_sort_devices(devices, nr_device); | 2402 | if (!device->writeable) { |
2403 | printk(KERN_ERR | ||
2404 | "btrfs: read-only device in alloc_list\n"); | ||
2405 | WARN_ON(1); | ||
2406 | continue; | ||
2407 | } | ||
2399 | 2408 | ||
2400 | max_avail = devices[0].max_avail; | 2409 | if (!device->in_fs_metadata) |
2401 | if (!max_avail) | 2410 | continue; |
2402 | return -ENOSPC; | ||
2403 | 2411 | ||
2404 | for (i = 0; i < nr_device; i++) { | 2412 | if (device->total_bytes > device->bytes_used) |
2405 | /* | 2413 | total_avail = device->total_bytes - device->bytes_used; |
2406 | * if dev_offset = 0, it means the free space of this device | 2414 | else |
2407 | * is less than what we need, and we didn't search max avail | 2415 | total_avail = 0; |
2408 | * extent on this device, so do it now. | 2416 | /* avail is off by max(alloc_start, 1MB), but that is the same |
2417 | * for all devices, so it doesn't hurt the sorting later on | ||
2409 | */ | 2418 | */ |
2410 | if (!devices[i].dev_offset) { | ||
2411 | ret = find_free_dev_extent(trans, devices[i].dev, | ||
2412 | max_avail, | ||
2413 | &devices[i].dev_offset, | ||
2414 | &devices[i].max_avail); | ||
2415 | if (ret != 0 && ret != -ENOSPC) | ||
2416 | return ret; | ||
2417 | sort_again = 1; | ||
2418 | } | ||
2419 | } | ||
2420 | |||
2421 | /* we update the max avail free extent of each devices, sort again */ | ||
2422 | if (sort_again) | ||
2423 | btrfs_descending_sort_devices(devices, nr_device); | ||
2424 | 2419 | ||
2425 | if (type & BTRFS_BLOCK_GROUP_DUP) | 2420 | ret = find_free_dev_extent(trans, device, |
2426 | min_devices = 1; | 2421 | max_stripe_size * dev_stripes, |
2422 | &dev_offset, &max_avail); | ||
2423 | if (ret && ret != -ENOSPC) | ||
2424 | goto error; | ||
2427 | 2425 | ||
2428 | if (!devices[min_devices - 1].max_avail) | 2426 | if (ret == 0) |
2429 | return -ENOSPC; | 2427 | max_avail = max_stripe_size * dev_stripes; |
2430 | 2428 | ||
2431 | max_avail = devices[min_devices - 1].max_avail; | 2429 | if (max_avail < BTRFS_STRIPE_LEN * dev_stripes) |
2432 | if (type & BTRFS_BLOCK_GROUP_DUP) | 2430 | continue; |
2433 | do_div(max_avail, 2); | ||
2434 | 2431 | ||
2435 | max_avail = __btrfs_calc_stripe_size(fs_devices, max_avail, type, | 2432 | devices_info[ndevs].dev_offset = dev_offset; |
2436 | min_stripes, 1); | 2433 | devices_info[ndevs].max_avail = max_avail; |
2437 | if (type & BTRFS_BLOCK_GROUP_DUP) | 2434 | devices_info[ndevs].total_avail = total_avail; |
2438 | min_free = max_avail * 2; | 2435 | devices_info[ndevs].dev = device; |
2439 | else | 2436 | ++ndevs; |
2440 | min_free = max_avail; | 2437 | } |
2441 | 2438 | ||
2442 | if (min_free > devices[min_devices - 1].max_avail) | 2439 | /* |
2443 | return -ENOSPC; | 2440 | * now sort the devices by hole size / available space |
2441 | */ | ||
2442 | sort(devices_info, ndevs, sizeof(struct btrfs_device_info), | ||
2443 | btrfs_cmp_device_info, NULL); | ||
2444 | 2444 | ||
2445 | map = __shrink_map_lookup_stripes(map, min_stripes); | 2445 | /* round down to number of usable stripes */ |
2446 | *stripe_size = max_avail; | 2446 | ndevs -= ndevs % devs_increment; |
2447 | 2447 | ||
2448 | index = 0; | 2448 | if (ndevs < devs_increment * sub_stripes || ndevs < devs_min) { |
2449 | for (i = 0; i < min_stripes; i++) { | 2449 | ret = -ENOSPC; |
2450 | map->stripes[i].dev = devices[index].dev; | 2450 | goto error; |
2451 | map->stripes[i].physical = devices[index].dev_offset; | ||
2452 | if (type & BTRFS_BLOCK_GROUP_DUP) { | ||
2453 | i++; | ||
2454 | map->stripes[i].dev = devices[index].dev; | ||
2455 | map->stripes[i].physical = devices[index].dev_offset + | ||
2456 | max_avail; | ||
2457 | } | ||
2458 | index++; | ||
2459 | } | 2451 | } |
2460 | *map_lookup = map; | ||
2461 | 2452 | ||
2462 | return 0; | 2453 | if (devs_max && ndevs > devs_max) |
2463 | } | 2454 | ndevs = devs_max; |
2464 | 2455 | /* | |
2465 | static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | 2456 | * the primary goal is to maximize the number of stripes, so use as many |
2466 | struct btrfs_root *extent_root, | 2457 | * devices as possible, even if the stripes are not maximum sized. |
2467 | struct map_lookup **map_ret, | 2458 | */ |
2468 | u64 *num_bytes, u64 *stripe_size, | 2459 | stripe_size = devices_info[ndevs-1].max_avail; |
2469 | u64 start, u64 type) | 2460 | num_stripes = ndevs * dev_stripes; |
2470 | { | ||
2471 | struct btrfs_fs_info *info = extent_root->fs_info; | ||
2472 | struct btrfs_device *device = NULL; | ||
2473 | struct btrfs_fs_devices *fs_devices = info->fs_devices; | ||
2474 | struct list_head *cur; | ||
2475 | struct map_lookup *map; | ||
2476 | struct extent_map_tree *em_tree; | ||
2477 | struct extent_map *em; | ||
2478 | struct btrfs_device_info *devices_info; | ||
2479 | struct list_head private_devs; | ||
2480 | u64 calc_size = 1024 * 1024 * 1024; | ||
2481 | u64 min_free; | ||
2482 | u64 avail; | ||
2483 | u64 dev_offset; | ||
2484 | int num_stripes; | ||
2485 | int min_stripes; | ||
2486 | int sub_stripes; | ||
2487 | int min_devices; /* the min number of devices we need */ | ||
2488 | int i; | ||
2489 | int ret; | ||
2490 | int index; | ||
2491 | 2461 | ||
2492 | if ((type & BTRFS_BLOCK_GROUP_RAID1) && | 2462 | if (stripe_size * num_stripes > max_chunk_size * ncopies) { |
2493 | (type & BTRFS_BLOCK_GROUP_DUP)) { | 2463 | stripe_size = max_chunk_size * ncopies; |
2494 | WARN_ON(1); | 2464 | do_div(stripe_size, num_stripes); |
2495 | type &= ~BTRFS_BLOCK_GROUP_DUP; | ||
2496 | } | 2465 | } |
2497 | if (list_empty(&fs_devices->alloc_list)) | ||
2498 | return -ENOSPC; | ||
2499 | |||
2500 | ret = __btrfs_calc_nstripes(fs_devices, type, &num_stripes, | ||
2501 | &min_stripes, &sub_stripes); | ||
2502 | if (ret) | ||
2503 | return ret; | ||
2504 | 2466 | ||
2505 | devices_info = kzalloc(sizeof(*devices_info) * fs_devices->rw_devices, | 2467 | do_div(stripe_size, dev_stripes); |
2506 | GFP_NOFS); | 2468 | do_div(stripe_size, BTRFS_STRIPE_LEN); |
2507 | if (!devices_info) | 2469 | stripe_size *= BTRFS_STRIPE_LEN; |
2508 | return -ENOMEM; | ||
2509 | 2470 | ||
2510 | map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); | 2471 | map = kmalloc(map_lookup_size(num_stripes), GFP_NOFS); |
2511 | if (!map) { | 2472 | if (!map) { |
@@ -2514,85 +2475,12 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
2514 | } | 2475 | } |
2515 | map->num_stripes = num_stripes; | 2476 | map->num_stripes = num_stripes; |
2516 | 2477 | ||
2517 | cur = fs_devices->alloc_list.next; | 2478 | for (i = 0; i < ndevs; ++i) { |
2518 | index = 0; | 2479 | for (j = 0; j < dev_stripes; ++j) { |
2519 | i = 0; | 2480 | int s = i * dev_stripes + j; |
2520 | 2481 | map->stripes[s].dev = devices_info[i].dev; | |
2521 | calc_size = __btrfs_calc_stripe_size(fs_devices, calc_size, type, | 2482 | map->stripes[s].physical = devices_info[i].dev_offset + |
2522 | num_stripes, 0); | 2483 | j * stripe_size; |
2523 | |||
2524 | if (type & BTRFS_BLOCK_GROUP_DUP) { | ||
2525 | min_free = calc_size * 2; | ||
2526 | min_devices = 1; | ||
2527 | } else { | ||
2528 | min_free = calc_size; | ||
2529 | min_devices = min_stripes; | ||
2530 | } | ||
2531 | |||
2532 | INIT_LIST_HEAD(&private_devs); | ||
2533 | while (index < num_stripes) { | ||
2534 | device = list_entry(cur, struct btrfs_device, dev_alloc_list); | ||
2535 | BUG_ON(!device->writeable); | ||
2536 | if (device->total_bytes > device->bytes_used) | ||
2537 | avail = device->total_bytes - device->bytes_used; | ||
2538 | else | ||
2539 | avail = 0; | ||
2540 | cur = cur->next; | ||
2541 | |||
2542 | if (device->in_fs_metadata && avail >= min_free) { | ||
2543 | ret = find_free_dev_extent(trans, device, min_free, | ||
2544 | &devices_info[i].dev_offset, | ||
2545 | &devices_info[i].max_avail); | ||
2546 | if (ret == 0) { | ||
2547 | list_move_tail(&device->dev_alloc_list, | ||
2548 | &private_devs); | ||
2549 | map->stripes[index].dev = device; | ||
2550 | map->stripes[index].physical = | ||
2551 | devices_info[i].dev_offset; | ||
2552 | index++; | ||
2553 | if (type & BTRFS_BLOCK_GROUP_DUP) { | ||
2554 | map->stripes[index].dev = device; | ||
2555 | map->stripes[index].physical = | ||
2556 | devices_info[i].dev_offset + | ||
2557 | calc_size; | ||
2558 | index++; | ||
2559 | } | ||
2560 | } else if (ret != -ENOSPC) | ||
2561 | goto error; | ||
2562 | |||
2563 | devices_info[i].dev = device; | ||
2564 | i++; | ||
2565 | } else if (device->in_fs_metadata && | ||
2566 | avail >= BTRFS_STRIPE_LEN) { | ||
2567 | devices_info[i].dev = device; | ||
2568 | devices_info[i].max_avail = avail; | ||
2569 | i++; | ||
2570 | } | ||
2571 | |||
2572 | if (cur == &fs_devices->alloc_list) | ||
2573 | break; | ||
2574 | } | ||
2575 | |||
2576 | list_splice(&private_devs, &fs_devices->alloc_list); | ||
2577 | if (index < num_stripes) { | ||
2578 | if (index >= min_stripes) { | ||
2579 | num_stripes = index; | ||
2580 | if (type & (BTRFS_BLOCK_GROUP_RAID10)) { | ||
2581 | num_stripes /= sub_stripes; | ||
2582 | num_stripes *= sub_stripes; | ||
2583 | } | ||
2584 | |||
2585 | map = __shrink_map_lookup_stripes(map, num_stripes); | ||
2586 | } else if (i >= min_devices) { | ||
2587 | ret = __btrfs_alloc_tiny_space(trans, fs_devices, | ||
2588 | devices_info, i, type, | ||
2589 | &map, min_stripes, | ||
2590 | &calc_size); | ||
2591 | if (ret) | ||
2592 | goto error; | ||
2593 | } else { | ||
2594 | ret = -ENOSPC; | ||
2595 | goto error; | ||
2596 | } | 2484 | } |
2597 | } | 2485 | } |
2598 | map->sector_size = extent_root->sectorsize; | 2486 | map->sector_size = extent_root->sectorsize; |
@@ -2603,20 +2491,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
2603 | map->sub_stripes = sub_stripes; | 2491 | map->sub_stripes = sub_stripes; |
2604 | 2492 | ||
2605 | *map_ret = map; | 2493 | *map_ret = map; |
2606 | *stripe_size = calc_size; | 2494 | num_bytes = stripe_size * (num_stripes / ncopies); |
2607 | *num_bytes = chunk_bytes_by_type(type, calc_size, | ||
2608 | map->num_stripes, sub_stripes); | ||
2609 | 2495 | ||
2610 | trace_btrfs_chunk_alloc(info->chunk_root, map, start, *num_bytes); | 2496 | *stripe_size_out = stripe_size; |
2497 | *num_bytes_out = num_bytes; | ||
2611 | 2498 | ||
2612 | em = alloc_extent_map(GFP_NOFS); | 2499 | trace_btrfs_chunk_alloc(info->chunk_root, map, start, num_bytes); |
2500 | |||
2501 | em = alloc_extent_map(); | ||
2613 | if (!em) { | 2502 | if (!em) { |
2614 | ret = -ENOMEM; | 2503 | ret = -ENOMEM; |
2615 | goto error; | 2504 | goto error; |
2616 | } | 2505 | } |
2617 | em->bdev = (struct block_device *)map; | 2506 | em->bdev = (struct block_device *)map; |
2618 | em->start = start; | 2507 | em->start = start; |
2619 | em->len = *num_bytes; | 2508 | em->len = num_bytes; |
2620 | em->block_start = 0; | 2509 | em->block_start = 0; |
2621 | em->block_len = em->len; | 2510 | em->block_len = em->len; |
2622 | 2511 | ||
@@ -2629,20 +2518,21 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, | |||
2629 | 2518 | ||
2630 | ret = btrfs_make_block_group(trans, extent_root, 0, type, | 2519 | ret = btrfs_make_block_group(trans, extent_root, 0, type, |
2631 | BTRFS_FIRST_CHUNK_TREE_OBJECTID, | 2520 | BTRFS_FIRST_CHUNK_TREE_OBJECTID, |
2632 | start, *num_bytes); | 2521 | start, num_bytes); |
2633 | BUG_ON(ret); | 2522 | BUG_ON(ret); |
2634 | 2523 | ||
2635 | index = 0; | 2524 | for (i = 0; i < map->num_stripes; ++i) { |
2636 | while (index < map->num_stripes) { | 2525 | struct btrfs_device *device; |
2637 | device = map->stripes[index].dev; | 2526 | u64 dev_offset; |
2638 | dev_offset = map->stripes[index].physical; | 2527 | |
2528 | device = map->stripes[i].dev; | ||
2529 | dev_offset = map->stripes[i].physical; | ||
2639 | 2530 | ||
2640 | ret = btrfs_alloc_dev_extent(trans, device, | 2531 | ret = btrfs_alloc_dev_extent(trans, device, |
2641 | info->chunk_root->root_key.objectid, | 2532 | info->chunk_root->root_key.objectid, |
2642 | BTRFS_FIRST_CHUNK_TREE_OBJECTID, | 2533 | BTRFS_FIRST_CHUNK_TREE_OBJECTID, |
2643 | start, dev_offset, calc_size); | 2534 | start, dev_offset, stripe_size); |
2644 | BUG_ON(ret); | 2535 | BUG_ON(ret); |
2645 | index++; | ||
2646 | } | 2536 | } |
2647 | 2537 | ||
2648 | kfree(devices_info); | 2538 | kfree(devices_info); |
@@ -2849,7 +2739,7 @@ int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset) | |||
2849 | 2739 | ||
2850 | void btrfs_mapping_init(struct btrfs_mapping_tree *tree) | 2740 | void btrfs_mapping_init(struct btrfs_mapping_tree *tree) |
2851 | { | 2741 | { |
2852 | extent_map_tree_init(&tree->map_tree, GFP_NOFS); | 2742 | extent_map_tree_init(&tree->map_tree); |
2853 | } | 2743 | } |
2854 | 2744 | ||
2855 | void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) | 2745 | void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree) |
@@ -3499,7 +3389,7 @@ static int read_one_chunk(struct btrfs_root *root, struct btrfs_key *key, | |||
3499 | free_extent_map(em); | 3389 | free_extent_map(em); |
3500 | } | 3390 | } |
3501 | 3391 | ||
3502 | em = alloc_extent_map(GFP_NOFS); | 3392 | em = alloc_extent_map(); |
3503 | if (!em) | 3393 | if (!em) |
3504 | return -ENOMEM; | 3394 | return -ENOMEM; |
3505 | num_stripes = btrfs_chunk_num_stripes(leaf, chunk); | 3395 | num_stripes = btrfs_chunk_num_stripes(leaf, chunk); |
@@ -3688,15 +3578,6 @@ static int read_one_dev(struct btrfs_root *root, | |||
3688 | return ret; | 3578 | return ret; |
3689 | } | 3579 | } |
3690 | 3580 | ||
3691 | int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf) | ||
3692 | { | ||
3693 | struct btrfs_dev_item *dev_item; | ||
3694 | |||
3695 | dev_item = (struct btrfs_dev_item *)offsetof(struct btrfs_super_block, | ||
3696 | dev_item); | ||
3697 | return read_one_dev(root, buf, dev_item); | ||
3698 | } | ||
3699 | |||
3700 | int btrfs_read_sys_array(struct btrfs_root *root) | 3581 | int btrfs_read_sys_array(struct btrfs_root *root) |
3701 | { | 3582 | { |
3702 | struct btrfs_super_block *super_copy = &root->fs_info->super_copy; | 3583 | struct btrfs_super_block *super_copy = &root->fs_info->super_copy; |
@@ -3813,7 +3694,7 @@ again: | |||
3813 | } | 3694 | } |
3814 | if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { | 3695 | if (key.objectid == BTRFS_DEV_ITEMS_OBJECTID) { |
3815 | key.objectid = 0; | 3696 | key.objectid = 0; |
3816 | btrfs_release_path(root, path); | 3697 | btrfs_release_path(path); |
3817 | goto again; | 3698 | goto again; |
3818 | } | 3699 | } |
3819 | ret = 0; | 3700 | ret = 0; |
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index cc2eadaf7a27..7c12d61ae7ae 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h | |||
@@ -85,7 +85,12 @@ struct btrfs_device { | |||
85 | /* physical drive uuid (or lvm uuid) */ | 85 | /* physical drive uuid (or lvm uuid) */ |
86 | u8 uuid[BTRFS_UUID_SIZE]; | 86 | u8 uuid[BTRFS_UUID_SIZE]; |
87 | 87 | ||
88 | /* per-device scrub information */ | ||
89 | struct scrub_dev *scrub_device; | ||
90 | |||
88 | struct btrfs_work work; | 91 | struct btrfs_work work; |
92 | struct rcu_head rcu; | ||
93 | struct work_struct rcu_work; | ||
89 | }; | 94 | }; |
90 | 95 | ||
91 | struct btrfs_fs_devices { | 96 | struct btrfs_fs_devices { |
@@ -144,6 +149,7 @@ struct btrfs_device_info { | |||
144 | struct btrfs_device *dev; | 149 | struct btrfs_device *dev; |
145 | u64 dev_offset; | 150 | u64 dev_offset; |
146 | u64 max_avail; | 151 | u64 max_avail; |
152 | u64 total_avail; | ||
147 | }; | 153 | }; |
148 | 154 | ||
149 | struct map_lookup { | 155 | struct map_lookup { |
@@ -157,20 +163,8 @@ struct map_lookup { | |||
157 | struct btrfs_bio_stripe stripes[]; | 163 | struct btrfs_bio_stripe stripes[]; |
158 | }; | 164 | }; |
159 | 165 | ||
160 | /* Used to sort the devices by max_avail(descending sort) */ | 166 | #define map_lookup_size(n) (sizeof(struct map_lookup) + \ |
161 | int btrfs_cmp_device_free_bytes(const void *dev_info1, const void *dev_info2); | 167 | (sizeof(struct btrfs_bio_stripe) * (n))) |
162 | |||
163 | /* | ||
164 | * sort the devices by max_avail, in which max free extent size of each device | ||
165 | * is stored.(Descending Sort) | ||
166 | */ | ||
167 | static inline void btrfs_descending_sort_devices( | ||
168 | struct btrfs_device_info *devices, | ||
169 | size_t nr_devices) | ||
170 | { | ||
171 | sort(devices, nr_devices, sizeof(struct btrfs_device_info), | ||
172 | btrfs_cmp_device_free_bytes, NULL); | ||
173 | } | ||
174 | 168 | ||
175 | int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, | 169 | int btrfs_account_dev_extents_size(struct btrfs_device *device, u64 start, |
176 | u64 end, u64 *length); | 170 | u64 end, u64 *length); |
@@ -196,7 +190,6 @@ void btrfs_mapping_init(struct btrfs_mapping_tree *tree); | |||
196 | void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); | 190 | void btrfs_mapping_tree_free(struct btrfs_mapping_tree *tree); |
197 | int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, | 191 | int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio, |
198 | int mirror_num, int async_submit); | 192 | int mirror_num, int async_submit); |
199 | int btrfs_read_super_device(struct btrfs_root *root, struct extent_buffer *buf); | ||
200 | int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, | 193 | int btrfs_open_devices(struct btrfs_fs_devices *fs_devices, |
201 | fmode_t flags, void *holder); | 194 | fmode_t flags, void *holder); |
202 | int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, | 195 | int btrfs_scan_one_device(const char *path, fmode_t flags, void *holder, |
@@ -209,8 +202,6 @@ int btrfs_add_device(struct btrfs_trans_handle *trans, | |||
209 | int btrfs_rm_device(struct btrfs_root *root, char *device_path); | 202 | int btrfs_rm_device(struct btrfs_root *root, char *device_path); |
210 | int btrfs_cleanup_fs_uuids(void); | 203 | int btrfs_cleanup_fs_uuids(void); |
211 | int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len); | 204 | int btrfs_num_copies(struct btrfs_mapping_tree *map_tree, u64 logical, u64 len); |
212 | int btrfs_unplug_page(struct btrfs_mapping_tree *map_tree, | ||
213 | u64 logical, struct page *page); | ||
214 | int btrfs_grow_device(struct btrfs_trans_handle *trans, | 205 | int btrfs_grow_device(struct btrfs_trans_handle *trans, |
215 | struct btrfs_device *device, u64 new_size); | 206 | struct btrfs_device *device, u64 new_size); |
216 | struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, | 207 | struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, |
@@ -218,8 +209,6 @@ struct btrfs_device *btrfs_find_device(struct btrfs_root *root, u64 devid, | |||
218 | int btrfs_shrink_device(struct btrfs_device *device, u64 new_size); | 209 | int btrfs_shrink_device(struct btrfs_device *device, u64 new_size); |
219 | int btrfs_init_new_device(struct btrfs_root *root, char *path); | 210 | int btrfs_init_new_device(struct btrfs_root *root, char *path); |
220 | int btrfs_balance(struct btrfs_root *dev_root); | 211 | int btrfs_balance(struct btrfs_root *dev_root); |
221 | void btrfs_unlock_volumes(void); | ||
222 | void btrfs_lock_volumes(void); | ||
223 | int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); | 212 | int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); |
224 | int find_free_dev_extent(struct btrfs_trans_handle *trans, | 213 | int find_free_dev_extent(struct btrfs_trans_handle *trans, |
225 | struct btrfs_device *device, u64 num_bytes, | 214 | struct btrfs_device *device, u64 num_bytes, |
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c index cfd660550ded..f3107e4b4d56 100644 --- a/fs/btrfs/xattr.c +++ b/fs/btrfs/xattr.c | |||
@@ -44,7 +44,7 @@ ssize_t __btrfs_getxattr(struct inode *inode, const char *name, | |||
44 | return -ENOMEM; | 44 | return -ENOMEM; |
45 | 45 | ||
46 | /* lookup the xattr by name */ | 46 | /* lookup the xattr by name */ |
47 | di = btrfs_lookup_xattr(NULL, root, path, inode->i_ino, name, | 47 | di = btrfs_lookup_xattr(NULL, root, path, btrfs_ino(inode), name, |
48 | strlen(name), 0); | 48 | strlen(name), 0); |
49 | if (!di) { | 49 | if (!di) { |
50 | ret = -ENODATA; | 50 | ret = -ENODATA; |
@@ -103,7 +103,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans, | |||
103 | return -ENOMEM; | 103 | return -ENOMEM; |
104 | 104 | ||
105 | /* first lets see if we already have this xattr */ | 105 | /* first lets see if we already have this xattr */ |
106 | di = btrfs_lookup_xattr(trans, root, path, inode->i_ino, name, | 106 | di = btrfs_lookup_xattr(trans, root, path, btrfs_ino(inode), name, |
107 | strlen(name), -1); | 107 | strlen(name), -1); |
108 | if (IS_ERR(di)) { | 108 | if (IS_ERR(di)) { |
109 | ret = PTR_ERR(di); | 109 | ret = PTR_ERR(di); |
@@ -120,13 +120,13 @@ static int do_setxattr(struct btrfs_trans_handle *trans, | |||
120 | 120 | ||
121 | ret = btrfs_delete_one_dir_name(trans, root, path, di); | 121 | ret = btrfs_delete_one_dir_name(trans, root, path, di); |
122 | BUG_ON(ret); | 122 | BUG_ON(ret); |
123 | btrfs_release_path(root, path); | 123 | btrfs_release_path(path); |
124 | 124 | ||
125 | /* if we don't have a value then we are removing the xattr */ | 125 | /* if we don't have a value then we are removing the xattr */ |
126 | if (!value) | 126 | if (!value) |
127 | goto out; | 127 | goto out; |
128 | } else { | 128 | } else { |
129 | btrfs_release_path(root, path); | 129 | btrfs_release_path(path); |
130 | 130 | ||
131 | if (flags & XATTR_REPLACE) { | 131 | if (flags & XATTR_REPLACE) { |
132 | /* we couldn't find the attr to replace */ | 132 | /* we couldn't find the attr to replace */ |
@@ -136,7 +136,7 @@ static int do_setxattr(struct btrfs_trans_handle *trans, | |||
136 | } | 136 | } |
137 | 137 | ||
138 | /* ok we have to create a completely new xattr */ | 138 | /* ok we have to create a completely new xattr */ |
139 | ret = btrfs_insert_xattr_item(trans, root, path, inode->i_ino, | 139 | ret = btrfs_insert_xattr_item(trans, root, path, btrfs_ino(inode), |
140 | name, name_len, value, size); | 140 | name, name_len, value, size); |
141 | BUG_ON(ret); | 141 | BUG_ON(ret); |
142 | out: | 142 | out: |
@@ -190,7 +190,7 @@ ssize_t btrfs_listxattr(struct dentry *dentry, char *buffer, size_t size) | |||
190 | * NOTE: we set key.offset = 0; because we want to start with the | 190 | * NOTE: we set key.offset = 0; because we want to start with the |
191 | * first xattr that we find and walk forward | 191 | * first xattr that we find and walk forward |
192 | */ | 192 | */ |
193 | key.objectid = inode->i_ino; | 193 | key.objectid = btrfs_ino(inode); |
194 | btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); | 194 | btrfs_set_key_type(&key, BTRFS_XATTR_ITEM_KEY); |
195 | key.offset = 0; | 195 | key.offset = 0; |
196 | 196 | ||
diff --git a/fs/buffer.c b/fs/buffer.c index a08bb8e61c6f..698c6b2cc462 100644 --- a/fs/buffer.c +++ b/fs/buffer.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/bitops.h> | 41 | #include <linux/bitops.h> |
42 | #include <linux/mpage.h> | 42 | #include <linux/mpage.h> |
43 | #include <linux/bit_spinlock.h> | 43 | #include <linux/bit_spinlock.h> |
44 | #include <linux/cleancache.h> | ||
44 | 45 | ||
45 | static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); | 46 | static int fsync_buffers_list(spinlock_t *lock, struct list_head *list); |
46 | 47 | ||
@@ -269,6 +270,10 @@ void invalidate_bdev(struct block_device *bdev) | |||
269 | invalidate_bh_lrus(); | 270 | invalidate_bh_lrus(); |
270 | lru_add_drain_all(); /* make sure all lru add caches are flushed */ | 271 | lru_add_drain_all(); /* make sure all lru add caches are flushed */ |
271 | invalidate_mapping_pages(mapping, 0, -1); | 272 | invalidate_mapping_pages(mapping, 0, -1); |
273 | /* 99% of the time, we don't need to flush the cleancache on the bdev. | ||
274 | * But, for the strange corners, lets be cautious | ||
275 | */ | ||
276 | cleancache_flush_inode(mapping); | ||
272 | } | 277 | } |
273 | EXPORT_SYMBOL(invalidate_bdev); | 278 | EXPORT_SYMBOL(invalidate_bdev); |
274 | 279 | ||
@@ -2331,24 +2336,26 @@ EXPORT_SYMBOL(block_commit_write); | |||
2331 | * page lock we can determine safely if the page is beyond EOF. If it is not | 2336 | * page lock we can determine safely if the page is beyond EOF. If it is not |
2332 | * beyond EOF, then the page is guaranteed safe against truncation until we | 2337 | * beyond EOF, then the page is guaranteed safe against truncation until we |
2333 | * unlock the page. | 2338 | * unlock the page. |
2339 | * | ||
2340 | * Direct callers of this function should call vfs_check_frozen() so that page | ||
2341 | * fault does not busyloop until the fs is thawed. | ||
2334 | */ | 2342 | */ |
2335 | int | 2343 | int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, |
2336 | block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | 2344 | get_block_t get_block) |
2337 | get_block_t get_block) | ||
2338 | { | 2345 | { |
2339 | struct page *page = vmf->page; | 2346 | struct page *page = vmf->page; |
2340 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; | 2347 | struct inode *inode = vma->vm_file->f_path.dentry->d_inode; |
2341 | unsigned long end; | 2348 | unsigned long end; |
2342 | loff_t size; | 2349 | loff_t size; |
2343 | int ret = VM_FAULT_NOPAGE; /* make the VM retry the fault */ | 2350 | int ret; |
2344 | 2351 | ||
2345 | lock_page(page); | 2352 | lock_page(page); |
2346 | size = i_size_read(inode); | 2353 | size = i_size_read(inode); |
2347 | if ((page->mapping != inode->i_mapping) || | 2354 | if ((page->mapping != inode->i_mapping) || |
2348 | (page_offset(page) > size)) { | 2355 | (page_offset(page) > size)) { |
2349 | /* page got truncated out from underneath us */ | 2356 | /* We overload EFAULT to mean page got truncated */ |
2350 | unlock_page(page); | 2357 | ret = -EFAULT; |
2351 | goto out; | 2358 | goto out_unlock; |
2352 | } | 2359 | } |
2353 | 2360 | ||
2354 | /* page is wholly or partially inside EOF */ | 2361 | /* page is wholly or partially inside EOF */ |
@@ -2361,18 +2368,41 @@ block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | |||
2361 | if (!ret) | 2368 | if (!ret) |
2362 | ret = block_commit_write(page, 0, end); | 2369 | ret = block_commit_write(page, 0, end); |
2363 | 2370 | ||
2364 | if (unlikely(ret)) { | 2371 | if (unlikely(ret < 0)) |
2365 | unlock_page(page); | 2372 | goto out_unlock; |
2366 | if (ret == -ENOMEM) | 2373 | /* |
2367 | ret = VM_FAULT_OOM; | 2374 | * Freezing in progress? We check after the page is marked dirty and |
2368 | else /* -ENOSPC, -EIO, etc */ | 2375 | * with page lock held so if the test here fails, we are sure freezing |
2369 | ret = VM_FAULT_SIGBUS; | 2376 | * code will wait during syncing until the page fault is done - at that |
2370 | } else | 2377 | * point page will be dirty and unlocked so freezing code will write it |
2371 | ret = VM_FAULT_LOCKED; | 2378 | * and writeprotect it again. |
2372 | 2379 | */ | |
2373 | out: | 2380 | set_page_dirty(page); |
2381 | if (inode->i_sb->s_frozen != SB_UNFROZEN) { | ||
2382 | ret = -EAGAIN; | ||
2383 | goto out_unlock; | ||
2384 | } | ||
2385 | return 0; | ||
2386 | out_unlock: | ||
2387 | unlock_page(page); | ||
2374 | return ret; | 2388 | return ret; |
2375 | } | 2389 | } |
2390 | EXPORT_SYMBOL(__block_page_mkwrite); | ||
2391 | |||
2392 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | ||
2393 | get_block_t get_block) | ||
2394 | { | ||
2395 | int ret; | ||
2396 | struct super_block *sb = vma->vm_file->f_path.dentry->d_inode->i_sb; | ||
2397 | |||
2398 | /* | ||
2399 | * This check is racy but catches the common case. The check in | ||
2400 | * __block_page_mkwrite() is reliable. | ||
2401 | */ | ||
2402 | vfs_check_frozen(sb, SB_FREEZE_WRITE); | ||
2403 | ret = __block_page_mkwrite(vma, vmf, get_block); | ||
2404 | return block_page_mkwrite_return(ret); | ||
2405 | } | ||
2376 | EXPORT_SYMBOL(block_page_mkwrite); | 2406 | EXPORT_SYMBOL(block_page_mkwrite); |
2377 | 2407 | ||
2378 | /* | 2408 | /* |
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig index 75c47cd8d086..1cd4c3a1862d 100644 --- a/fs/cifs/Kconfig +++ b/fs/cifs/Kconfig | |||
@@ -153,26 +153,6 @@ config CIFS_ACL | |||
153 | Allows to fetch CIFS/NTFS ACL from the server. The DACL blob | 153 | Allows to fetch CIFS/NTFS ACL from the server. The DACL blob |
154 | is handed over to the application/caller. | 154 | is handed over to the application/caller. |
155 | 155 | ||
156 | config CIFS_SMB2 | ||
157 | bool "SMB2 network file system support (EXPERIMENTAL)" | ||
158 | depends on EXPERIMENTAL && INET && BROKEN | ||
159 | select NLS | ||
160 | select KEYS | ||
161 | select FSCACHE | ||
162 | select DNS_RESOLVER | ||
163 | |||
164 | help | ||
165 | This enables experimental support for the SMB2 (Server Message Block | ||
166 | version 2) protocol. The SMB2 protocol is the successor to the | ||
167 | popular CIFS and SMB network file sharing protocols. SMB2 is the | ||
168 | native file sharing mechanism for recent versions of Windows | ||
169 | operating systems (since Vista). SMB2 enablement will eventually | ||
170 | allow users better performance, security and features, than would be | ||
171 | possible with cifs. Note that smb2 mount options also are simpler | ||
172 | (compared to cifs) due to protocol improvements. | ||
173 | |||
174 | Unless you are a developer or tester, say N. | ||
175 | |||
176 | config CIFS_NFSD_EXPORT | 156 | config CIFS_NFSD_EXPORT |
177 | bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)" | 157 | bool "Allow nfsd to export CIFS file system (EXPERIMENTAL)" |
178 | depends on CIFS && EXPERIMENTAL | 158 | depends on CIFS && EXPERIMENTAL |
diff --git a/fs/cifs/README b/fs/cifs/README index 4a3ca0e5ca24..c5c2c5e5f0f2 100644 --- a/fs/cifs/README +++ b/fs/cifs/README | |||
@@ -457,6 +457,9 @@ A partial list of the supported mount options follows: | |||
457 | otherwise - read from the server. All written data are stored | 457 | otherwise - read from the server. All written data are stored |
458 | in the cache, but if the client doesn't have Exclusive Oplock, | 458 | in the cache, but if the client doesn't have Exclusive Oplock, |
459 | it writes the data to the server. | 459 | it writes the data to the server. |
460 | rwpidforward Forward pid of a process who opened a file to any read or write | ||
461 | operation on that file. This prevent applications like WINE | ||
462 | from failing on read and write if we use mandatory brlock style. | ||
460 | acl Allow setfacl and getfacl to manage posix ACLs if server | 463 | acl Allow setfacl and getfacl to manage posix ACLs if server |
461 | supports them. (default) | 464 | supports them. (default) |
462 | noacl Do not allow setfacl and getfacl calls on this mount | 465 | noacl Do not allow setfacl and getfacl calls on this mount |
diff --git a/fs/cifs/cache.c b/fs/cifs/cache.c index 53d57a3fe427..dd8584d35a14 100644 --- a/fs/cifs/cache.c +++ b/fs/cifs/cache.c | |||
@@ -146,7 +146,7 @@ static char *extract_sharename(const char *treename) | |||
146 | static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer, | 146 | static uint16_t cifs_super_get_key(const void *cookie_netfs_data, void *buffer, |
147 | uint16_t maxbuf) | 147 | uint16_t maxbuf) |
148 | { | 148 | { |
149 | const struct cifsTconInfo *tcon = cookie_netfs_data; | 149 | const struct cifs_tcon *tcon = cookie_netfs_data; |
150 | char *sharename; | 150 | char *sharename; |
151 | uint16_t len; | 151 | uint16_t len; |
152 | 152 | ||
@@ -173,7 +173,7 @@ cifs_fscache_super_get_aux(const void *cookie_netfs_data, void *buffer, | |||
173 | uint16_t maxbuf) | 173 | uint16_t maxbuf) |
174 | { | 174 | { |
175 | struct cifs_fscache_super_auxdata auxdata; | 175 | struct cifs_fscache_super_auxdata auxdata; |
176 | const struct cifsTconInfo *tcon = cookie_netfs_data; | 176 | const struct cifs_tcon *tcon = cookie_netfs_data; |
177 | 177 | ||
178 | memset(&auxdata, 0, sizeof(auxdata)); | 178 | memset(&auxdata, 0, sizeof(auxdata)); |
179 | auxdata.resource_id = tcon->resource_id; | 179 | auxdata.resource_id = tcon->resource_id; |
@@ -192,7 +192,7 @@ fscache_checkaux cifs_fscache_super_check_aux(void *cookie_netfs_data, | |||
192 | uint16_t datalen) | 192 | uint16_t datalen) |
193 | { | 193 | { |
194 | struct cifs_fscache_super_auxdata auxdata; | 194 | struct cifs_fscache_super_auxdata auxdata; |
195 | const struct cifsTconInfo *tcon = cookie_netfs_data; | 195 | const struct cifs_tcon *tcon = cookie_netfs_data; |
196 | 196 | ||
197 | if (datalen != sizeof(auxdata)) | 197 | if (datalen != sizeof(auxdata)) |
198 | return FSCACHE_CHECKAUX_OBSOLETE; | 198 | return FSCACHE_CHECKAUX_OBSOLETE; |
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c index 18f4272d9047..2fe3cf13b2e9 100644 --- a/fs/cifs/cifs_debug.c +++ b/fs/cifs/cifs_debug.c | |||
@@ -110,8 +110,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) | |||
110 | struct list_head *tmp1, *tmp2, *tmp3; | 110 | struct list_head *tmp1, *tmp2, *tmp3; |
111 | struct mid_q_entry *mid_entry; | 111 | struct mid_q_entry *mid_entry; |
112 | struct TCP_Server_Info *server; | 112 | struct TCP_Server_Info *server; |
113 | struct cifsSesInfo *ses; | 113 | struct cifs_ses *ses; |
114 | struct cifsTconInfo *tcon; | 114 | struct cifs_tcon *tcon; |
115 | int i, j; | 115 | int i, j; |
116 | __u32 dev_type; | 116 | __u32 dev_type; |
117 | 117 | ||
@@ -152,7 +152,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) | |||
152 | tcp_ses_list); | 152 | tcp_ses_list); |
153 | i++; | 153 | i++; |
154 | list_for_each(tmp2, &server->smb_ses_list) { | 154 | list_for_each(tmp2, &server->smb_ses_list) { |
155 | ses = list_entry(tmp2, struct cifsSesInfo, | 155 | ses = list_entry(tmp2, struct cifs_ses, |
156 | smb_ses_list); | 156 | smb_ses_list); |
157 | if ((ses->serverDomain == NULL) || | 157 | if ((ses->serverDomain == NULL) || |
158 | (ses->serverOS == NULL) || | 158 | (ses->serverOS == NULL) || |
@@ -171,7 +171,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) | |||
171 | seq_printf(m, "TCP status: %d\n\tLocal Users To " | 171 | seq_printf(m, "TCP status: %d\n\tLocal Users To " |
172 | "Server: %d SecMode: 0x%x Req On Wire: %d", | 172 | "Server: %d SecMode: 0x%x Req On Wire: %d", |
173 | server->tcpStatus, server->srv_count, | 173 | server->tcpStatus, server->srv_count, |
174 | server->secMode, | 174 | server->sec_mode, |
175 | atomic_read(&server->inFlight)); | 175 | atomic_read(&server->inFlight)); |
176 | 176 | ||
177 | #ifdef CONFIG_CIFS_STATS2 | 177 | #ifdef CONFIG_CIFS_STATS2 |
@@ -183,7 +183,7 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v) | |||
183 | seq_puts(m, "\n\tShares:"); | 183 | seq_puts(m, "\n\tShares:"); |
184 | j = 0; | 184 | j = 0; |
185 | list_for_each(tmp3, &ses->tcon_list) { | 185 | list_for_each(tmp3, &ses->tcon_list) { |
186 | tcon = list_entry(tmp3, struct cifsTconInfo, | 186 | tcon = list_entry(tmp3, struct cifs_tcon, |
187 | tcon_list); | 187 | tcon_list); |
188 | ++j; | 188 | ++j; |
189 | dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType); | 189 | dev_type = le32_to_cpu(tcon->fsDevInfo.DeviceType); |
@@ -256,8 +256,8 @@ static ssize_t cifs_stats_proc_write(struct file *file, | |||
256 | int rc; | 256 | int rc; |
257 | struct list_head *tmp1, *tmp2, *tmp3; | 257 | struct list_head *tmp1, *tmp2, *tmp3; |
258 | struct TCP_Server_Info *server; | 258 | struct TCP_Server_Info *server; |
259 | struct cifsSesInfo *ses; | 259 | struct cifs_ses *ses; |
260 | struct cifsTconInfo *tcon; | 260 | struct cifs_tcon *tcon; |
261 | 261 | ||
262 | rc = get_user(c, buffer); | 262 | rc = get_user(c, buffer); |
263 | if (rc) | 263 | if (rc) |
@@ -273,11 +273,11 @@ static ssize_t cifs_stats_proc_write(struct file *file, | |||
273 | server = list_entry(tmp1, struct TCP_Server_Info, | 273 | server = list_entry(tmp1, struct TCP_Server_Info, |
274 | tcp_ses_list); | 274 | tcp_ses_list); |
275 | list_for_each(tmp2, &server->smb_ses_list) { | 275 | list_for_each(tmp2, &server->smb_ses_list) { |
276 | ses = list_entry(tmp2, struct cifsSesInfo, | 276 | ses = list_entry(tmp2, struct cifs_ses, |
277 | smb_ses_list); | 277 | smb_ses_list); |
278 | list_for_each(tmp3, &ses->tcon_list) { | 278 | list_for_each(tmp3, &ses->tcon_list) { |
279 | tcon = list_entry(tmp3, | 279 | tcon = list_entry(tmp3, |
280 | struct cifsTconInfo, | 280 | struct cifs_tcon, |
281 | tcon_list); | 281 | tcon_list); |
282 | atomic_set(&tcon->num_smbs_sent, 0); | 282 | atomic_set(&tcon->num_smbs_sent, 0); |
283 | atomic_set(&tcon->num_writes, 0); | 283 | atomic_set(&tcon->num_writes, 0); |
@@ -312,8 +312,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) | |||
312 | int i; | 312 | int i; |
313 | struct list_head *tmp1, *tmp2, *tmp3; | 313 | struct list_head *tmp1, *tmp2, *tmp3; |
314 | struct TCP_Server_Info *server; | 314 | struct TCP_Server_Info *server; |
315 | struct cifsSesInfo *ses; | 315 | struct cifs_ses *ses; |
316 | struct cifsTconInfo *tcon; | 316 | struct cifs_tcon *tcon; |
317 | 317 | ||
318 | seq_printf(m, | 318 | seq_printf(m, |
319 | "Resources in use\nCIFS Session: %d\n", | 319 | "Resources in use\nCIFS Session: %d\n", |
@@ -346,11 +346,11 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v) | |||
346 | server = list_entry(tmp1, struct TCP_Server_Info, | 346 | server = list_entry(tmp1, struct TCP_Server_Info, |
347 | tcp_ses_list); | 347 | tcp_ses_list); |
348 | list_for_each(tmp2, &server->smb_ses_list) { | 348 | list_for_each(tmp2, &server->smb_ses_list) { |
349 | ses = list_entry(tmp2, struct cifsSesInfo, | 349 | ses = list_entry(tmp2, struct cifs_ses, |
350 | smb_ses_list); | 350 | smb_ses_list); |
351 | list_for_each(tmp3, &ses->tcon_list) { | 351 | list_for_each(tmp3, &ses->tcon_list) { |
352 | tcon = list_entry(tmp3, | 352 | tcon = list_entry(tmp3, |
353 | struct cifsTconInfo, | 353 | struct cifs_tcon, |
354 | tcon_list); | 354 | tcon_list); |
355 | i++; | 355 | i++; |
356 | seq_printf(m, "\n%d) %s", i, tcon->treeName); | 356 | seq_printf(m, "\n%d) %s", i, tcon->treeName); |
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 2b68ac57d97d..8d8f28c94c0f 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c | |||
@@ -272,7 +272,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt) | |||
272 | struct dfs_info3_param *referrals = NULL; | 272 | struct dfs_info3_param *referrals = NULL; |
273 | unsigned int num_referrals = 0; | 273 | unsigned int num_referrals = 0; |
274 | struct cifs_sb_info *cifs_sb; | 274 | struct cifs_sb_info *cifs_sb; |
275 | struct cifsSesInfo *ses; | 275 | struct cifs_ses *ses; |
276 | char *full_path; | 276 | char *full_path; |
277 | int xid, i; | 277 | int xid, i; |
278 | int rc; | 278 | int rc; |
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h index a9d5692e0c20..ffb1459dc6ec 100644 --- a/fs/cifs/cifs_fs_sb.h +++ b/fs/cifs/cifs_fs_sb.h | |||
@@ -41,6 +41,7 @@ | |||
41 | #define CIFS_MOUNT_MF_SYMLINKS 0x10000 /* Minshall+French Symlinks enabled */ | 41 | #define CIFS_MOUNT_MF_SYMLINKS 0x10000 /* Minshall+French Symlinks enabled */ |
42 | #define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ | 42 | #define CIFS_MOUNT_MULTIUSER 0x20000 /* multiuser mount */ |
43 | #define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */ | 43 | #define CIFS_MOUNT_STRICT_IO 0x40000 /* strict cache mode */ |
44 | #define CIFS_MOUNT_RWPIDFORWARD 0x80000 /* use pid forwarding for rw */ | ||
44 | 45 | ||
45 | struct cifs_sb_info { | 46 | struct cifs_sb_info { |
46 | struct rb_root tlink_tree; | 47 | struct rb_root tlink_tree; |
@@ -56,8 +57,6 @@ struct cifs_sb_info { | |||
56 | mode_t mnt_file_mode; | 57 | mode_t mnt_file_mode; |
57 | mode_t mnt_dir_mode; | 58 | mode_t mnt_dir_mode; |
58 | unsigned int mnt_cifs_flags; | 59 | unsigned int mnt_cifs_flags; |
59 | int prepathlen; | ||
60 | char *prepath; /* relative path under the share to mount to */ | ||
61 | char *mountdata; /* options received at mount time or via DFS refs */ | 60 | char *mountdata; /* options received at mount time or via DFS refs */ |
62 | struct backing_dev_info bdi; | 61 | struct backing_dev_info bdi; |
63 | struct delayed_work prune_tlinks; | 62 | struct delayed_work prune_tlinks; |
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c index 33d221394aca..2272fd5fe5b7 100644 --- a/fs/cifs/cifs_spnego.c +++ b/fs/cifs/cifs_spnego.c | |||
@@ -95,7 +95,7 @@ struct key_type cifs_spnego_key_type = { | |||
95 | 95 | ||
96 | /* get a key struct with a SPNEGO security blob, suitable for session setup */ | 96 | /* get a key struct with a SPNEGO security blob, suitable for session setup */ |
97 | struct key * | 97 | struct key * |
98 | cifs_get_spnego_key(struct cifsSesInfo *sesInfo) | 98 | cifs_get_spnego_key(struct cifs_ses *sesInfo) |
99 | { | 99 | { |
100 | struct TCP_Server_Info *server = sesInfo->server; | 100 | struct TCP_Server_Info *server = sesInfo->server; |
101 | struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; | 101 | struct sockaddr_in *sa = (struct sockaddr_in *) &server->dstaddr; |
diff --git a/fs/cifs/cifs_spnego.h b/fs/cifs/cifs_spnego.h index e4041ec4d712..31bef9ee078b 100644 --- a/fs/cifs/cifs_spnego.h +++ b/fs/cifs/cifs_spnego.h | |||
@@ -41,7 +41,7 @@ struct cifs_spnego_msg { | |||
41 | 41 | ||
42 | #ifdef __KERNEL__ | 42 | #ifdef __KERNEL__ |
43 | extern struct key_type cifs_spnego_key_type; | 43 | extern struct key_type cifs_spnego_key_type; |
44 | extern struct key *cifs_get_spnego_key(struct cifsSesInfo *sesInfo); | 44 | extern struct key *cifs_get_spnego_key(struct cifs_ses *sesInfo); |
45 | #endif /* KERNEL */ | 45 | #endif /* KERNEL */ |
46 | 46 | ||
47 | #endif /* _CIFS_SPNEGO_H */ | 47 | #endif /* _CIFS_SPNEGO_H */ |
diff --git a/fs/cifs/cifsacl.c b/fs/cifs/cifsacl.c index f3c6fb9942ac..8f1700623b41 100644 --- a/fs/cifs/cifsacl.c +++ b/fs/cifs/cifsacl.c | |||
@@ -38,7 +38,7 @@ static const struct cifs_sid sid_everyone = { | |||
38 | 1, 1, {0, 0, 0, 0, 0, 1}, {0} }; | 38 | 1, 1, {0, 0, 0, 0, 0, 1}, {0} }; |
39 | /* security id for Authenticated Users system group */ | 39 | /* security id for Authenticated Users system group */ |
40 | static const struct cifs_sid sid_authusers = { | 40 | static const struct cifs_sid sid_authusers = { |
41 | 1, 1, {0, 0, 0, 0, 0, 5}, {11} }; | 41 | 1, 1, {0, 0, 0, 0, 0, 5}, {__constant_cpu_to_le32(11)} }; |
42 | /* group users */ | 42 | /* group users */ |
43 | static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} }; | 43 | static const struct cifs_sid sid_user = {1, 2 , {0, 0, 0, 0, 0, 5}, {} }; |
44 | 44 | ||
@@ -458,7 +458,8 @@ int compare_sids(const struct cifs_sid *ctsid, const struct cifs_sid *cwsid) | |||
458 | if (num_subauth) { | 458 | if (num_subauth) { |
459 | for (i = 0; i < num_subauth; ++i) { | 459 | for (i = 0; i < num_subauth; ++i) { |
460 | if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) { | 460 | if (ctsid->sub_auth[i] != cwsid->sub_auth[i]) { |
461 | if (ctsid->sub_auth[i] > cwsid->sub_auth[i]) | 461 | if (le32_to_cpu(ctsid->sub_auth[i]) > |
462 | le32_to_cpu(cwsid->sub_auth[i])) | ||
462 | return 1; | 463 | return 1; |
463 | else | 464 | else |
464 | return -1; | 465 | return -1; |
@@ -945,7 +946,7 @@ static struct cifs_ntsd *get_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, | |||
945 | int oplock = 0; | 946 | int oplock = 0; |
946 | int xid, rc; | 947 | int xid, rc; |
947 | __u16 fid; | 948 | __u16 fid; |
948 | struct cifsTconInfo *tcon; | 949 | struct cifs_tcon *tcon; |
949 | struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); | 950 | struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); |
950 | 951 | ||
951 | if (IS_ERR(tlink)) | 952 | if (IS_ERR(tlink)) |
@@ -1013,7 +1014,7 @@ static int set_cifs_acl_by_path(struct cifs_sb_info *cifs_sb, const char *path, | |||
1013 | int oplock = 0; | 1014 | int oplock = 0; |
1014 | int xid, rc; | 1015 | int xid, rc; |
1015 | __u16 fid; | 1016 | __u16 fid; |
1016 | struct cifsTconInfo *tcon; | 1017 | struct cifs_tcon *tcon; |
1017 | struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); | 1018 | struct tcon_link *tlink = cifs_sb_tlink(cifs_sb); |
1018 | 1019 | ||
1019 | if (IS_ERR(tlink)) | 1020 | if (IS_ERR(tlink)) |
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c index 45c3f78c8f81..dfbd9f1f373d 100644 --- a/fs/cifs/cifsencrypt.c +++ b/fs/cifs/cifsencrypt.c | |||
@@ -229,7 +229,7 @@ int cifs_verify_signature(struct smb_hdr *cifs_pdu, | |||
229 | } | 229 | } |
230 | 230 | ||
231 | /* first calculate 24 bytes ntlm response and then 16 byte session key */ | 231 | /* first calculate 24 bytes ntlm response and then 16 byte session key */ |
232 | int setup_ntlm_response(struct cifsSesInfo *ses) | 232 | int setup_ntlm_response(struct cifs_ses *ses) |
233 | { | 233 | { |
234 | int rc = 0; | 234 | int rc = 0; |
235 | unsigned int temp_len = CIFS_SESS_KEY_SIZE + CIFS_AUTH_RESP_SIZE; | 235 | unsigned int temp_len = CIFS_SESS_KEY_SIZE + CIFS_AUTH_RESP_SIZE; |
@@ -312,7 +312,7 @@ int calc_lanman_hash(const char *password, const char *cryptkey, bool encrypt, | |||
312 | * Allocate domain name which gets freed when session struct is deallocated. | 312 | * Allocate domain name which gets freed when session struct is deallocated. |
313 | */ | 313 | */ |
314 | static int | 314 | static int |
315 | build_avpair_blob(struct cifsSesInfo *ses, const struct nls_table *nls_cp) | 315 | build_avpair_blob(struct cifs_ses *ses, const struct nls_table *nls_cp) |
316 | { | 316 | { |
317 | unsigned int dlen; | 317 | unsigned int dlen; |
318 | unsigned int wlen; | 318 | unsigned int wlen; |
@@ -400,7 +400,7 @@ build_avpair_blob(struct cifsSesInfo *ses, const struct nls_table *nls_cp) | |||
400 | * about target string i.e. for some, just user name might suffice. | 400 | * about target string i.e. for some, just user name might suffice. |
401 | */ | 401 | */ |
402 | static int | 402 | static int |
403 | find_domain_name(struct cifsSesInfo *ses, const struct nls_table *nls_cp) | 403 | find_domain_name(struct cifs_ses *ses, const struct nls_table *nls_cp) |
404 | { | 404 | { |
405 | unsigned int attrsize; | 405 | unsigned int attrsize; |
406 | unsigned int type; | 406 | unsigned int type; |
@@ -445,7 +445,7 @@ find_domain_name(struct cifsSesInfo *ses, const struct nls_table *nls_cp) | |||
445 | return 0; | 445 | return 0; |
446 | } | 446 | } |
447 | 447 | ||
448 | static int calc_ntlmv2_hash(struct cifsSesInfo *ses, char *ntlmv2_hash, | 448 | static int calc_ntlmv2_hash(struct cifs_ses *ses, char *ntlmv2_hash, |
449 | const struct nls_table *nls_cp) | 449 | const struct nls_table *nls_cp) |
450 | { | 450 | { |
451 | int rc = 0; | 451 | int rc = 0; |
@@ -527,7 +527,7 @@ calc_exit_2: | |||
527 | } | 527 | } |
528 | 528 | ||
529 | static int | 529 | static int |
530 | CalcNTLMv2_response(const struct cifsSesInfo *ses, char *ntlmv2_hash) | 530 | CalcNTLMv2_response(const struct cifs_ses *ses, char *ntlmv2_hash) |
531 | { | 531 | { |
532 | int rc; | 532 | int rc; |
533 | unsigned int offset = CIFS_SESS_KEY_SIZE + 8; | 533 | unsigned int offset = CIFS_SESS_KEY_SIZE + 8; |
@@ -563,7 +563,7 @@ CalcNTLMv2_response(const struct cifsSesInfo *ses, char *ntlmv2_hash) | |||
563 | 563 | ||
564 | 564 | ||
565 | int | 565 | int |
566 | setup_ntlmv2_rsp(struct cifsSesInfo *ses, const struct nls_table *nls_cp) | 566 | setup_ntlmv2_rsp(struct cifs_ses *ses, const struct nls_table *nls_cp) |
567 | { | 567 | { |
568 | int rc; | 568 | int rc; |
569 | int baselen; | 569 | int baselen; |
@@ -649,7 +649,7 @@ setup_ntlmv2_rsp_ret: | |||
649 | } | 649 | } |
650 | 650 | ||
651 | int | 651 | int |
652 | calc_seckey(struct cifsSesInfo *ses) | 652 | calc_seckey(struct cifs_ses *ses) |
653 | { | 653 | { |
654 | int rc; | 654 | int rc; |
655 | struct crypto_blkcipher *tfm_arc4; | 655 | struct crypto_blkcipher *tfm_arc4; |
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c index 493b74ca5648..989442dcfb45 100644 --- a/fs/cifs/cifsfs.c +++ b/fs/cifs/cifsfs.c | |||
@@ -104,46 +104,25 @@ cifs_sb_deactive(struct super_block *sb) | |||
104 | } | 104 | } |
105 | 105 | ||
106 | static int | 106 | static int |
107 | cifs_read_super(struct super_block *sb, void *data, | 107 | cifs_read_super(struct super_block *sb, struct smb_vol *volume_info, |
108 | const char *devname, int silent) | 108 | const char *devname, int silent) |
109 | { | 109 | { |
110 | struct inode *inode; | 110 | struct inode *inode; |
111 | struct cifs_sb_info *cifs_sb; | 111 | struct cifs_sb_info *cifs_sb; |
112 | int rc = 0; | 112 | int rc = 0; |
113 | 113 | ||
114 | /* BB should we make this contingent on mount parm? */ | ||
115 | sb->s_flags |= MS_NODIRATIME | MS_NOATIME; | ||
116 | sb->s_fs_info = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); | ||
117 | cifs_sb = CIFS_SB(sb); | 114 | cifs_sb = CIFS_SB(sb); |
118 | if (cifs_sb == NULL) | ||
119 | return -ENOMEM; | ||
120 | 115 | ||
121 | spin_lock_init(&cifs_sb->tlink_tree_lock); | 116 | spin_lock_init(&cifs_sb->tlink_tree_lock); |
122 | cifs_sb->tlink_tree = RB_ROOT; | 117 | cifs_sb->tlink_tree = RB_ROOT; |
123 | 118 | ||
124 | rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); | 119 | rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY); |
125 | if (rc) { | 120 | if (rc) |
126 | kfree(cifs_sb); | ||
127 | return rc; | 121 | return rc; |
128 | } | ||
129 | cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages; | ||
130 | 122 | ||
131 | /* | 123 | cifs_sb->bdi.ra_pages = default_backing_dev_info.ra_pages; |
132 | * Copy mount params to sb for use in submounts. Better to do | ||
133 | * the copy here and deal with the error before cleanup gets | ||
134 | * complicated post-mount. | ||
135 | */ | ||
136 | if (data) { | ||
137 | cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); | ||
138 | if (cifs_sb->mountdata == NULL) { | ||
139 | bdi_destroy(&cifs_sb->bdi); | ||
140 | kfree(sb->s_fs_info); | ||
141 | sb->s_fs_info = NULL; | ||
142 | return -ENOMEM; | ||
143 | } | ||
144 | } | ||
145 | 124 | ||
146 | rc = cifs_mount(sb, cifs_sb, devname); | 125 | rc = cifs_mount(sb, cifs_sb, volume_info, devname); |
147 | 126 | ||
148 | if (rc) { | 127 | if (rc) { |
149 | if (!silent) | 128 | if (!silent) |
@@ -194,15 +173,7 @@ out_no_root: | |||
194 | cifs_umount(sb, cifs_sb); | 173 | cifs_umount(sb, cifs_sb); |
195 | 174 | ||
196 | out_mount_failed: | 175 | out_mount_failed: |
197 | if (cifs_sb) { | 176 | bdi_destroy(&cifs_sb->bdi); |
198 | if (cifs_sb->mountdata) { | ||
199 | kfree(cifs_sb->mountdata); | ||
200 | cifs_sb->mountdata = NULL; | ||
201 | } | ||
202 | unload_nls(cifs_sb->local_nls); | ||
203 | bdi_destroy(&cifs_sb->bdi); | ||
204 | kfree(cifs_sb); | ||
205 | } | ||
206 | return rc; | 177 | return rc; |
207 | } | 178 | } |
208 | 179 | ||
@@ -237,7 +208,7 @@ cifs_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
237 | { | 208 | { |
238 | struct super_block *sb = dentry->d_sb; | 209 | struct super_block *sb = dentry->d_sb; |
239 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | 210 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
240 | struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); | 211 | struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); |
241 | int rc = -EOPNOTSUPP; | 212 | int rc = -EOPNOTSUPP; |
242 | int xid; | 213 | int xid; |
243 | 214 | ||
@@ -390,7 +361,7 @@ static int | |||
390 | cifs_show_options(struct seq_file *s, struct vfsmount *m) | 361 | cifs_show_options(struct seq_file *s, struct vfsmount *m) |
391 | { | 362 | { |
392 | struct cifs_sb_info *cifs_sb = CIFS_SB(m->mnt_sb); | 363 | struct cifs_sb_info *cifs_sb = CIFS_SB(m->mnt_sb); |
393 | struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); | 364 | struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); |
394 | struct sockaddr *srcaddr; | 365 | struct sockaddr *srcaddr; |
395 | srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; | 366 | srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; |
396 | 367 | ||
@@ -444,14 +415,20 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m) | |||
444 | seq_printf(s, ",nocase"); | 415 | seq_printf(s, ",nocase"); |
445 | if (tcon->retry) | 416 | if (tcon->retry) |
446 | seq_printf(s, ",hard"); | 417 | seq_printf(s, ",hard"); |
447 | if (cifs_sb->prepath) | 418 | if (tcon->unix_ext) |
448 | seq_printf(s, ",prepath=%s", cifs_sb->prepath); | 419 | seq_printf(s, ",unix"); |
420 | else | ||
421 | seq_printf(s, ",nounix"); | ||
449 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) | 422 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) |
450 | seq_printf(s, ",posixpaths"); | 423 | seq_printf(s, ",posixpaths"); |
451 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) | 424 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID) |
452 | seq_printf(s, ",setuids"); | 425 | seq_printf(s, ",setuids"); |
453 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) | 426 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) |
454 | seq_printf(s, ",serverino"); | 427 | seq_printf(s, ",serverino"); |
428 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) | ||
429 | seq_printf(s, ",rwpidforward"); | ||
430 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) | ||
431 | seq_printf(s, ",forcemand"); | ||
455 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) | 432 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO) |
456 | seq_printf(s, ",directio"); | 433 | seq_printf(s, ",directio"); |
457 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) | 434 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) |
@@ -484,7 +461,7 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m) | |||
484 | static void cifs_umount_begin(struct super_block *sb) | 461 | static void cifs_umount_begin(struct super_block *sb) |
485 | { | 462 | { |
486 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | 463 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
487 | struct cifsTconInfo *tcon; | 464 | struct cifs_tcon *tcon; |
488 | 465 | ||
489 | if (cifs_sb == NULL) | 466 | if (cifs_sb == NULL) |
490 | return; | 467 | return; |
@@ -559,29 +536,189 @@ static const struct super_operations cifs_super_ops = { | |||
559 | #endif | 536 | #endif |
560 | }; | 537 | }; |
561 | 538 | ||
539 | /* | ||
540 | * Get root dentry from superblock according to prefix path mount option. | ||
541 | * Return dentry with refcount + 1 on success and NULL otherwise. | ||
542 | */ | ||
543 | static struct dentry * | ||
544 | cifs_get_root(struct smb_vol *vol, struct super_block *sb) | ||
545 | { | ||
546 | int xid, rc; | ||
547 | struct inode *inode; | ||
548 | struct qstr name; | ||
549 | struct dentry *dparent = NULL, *dchild = NULL, *alias; | ||
550 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | ||
551 | unsigned int i, full_len, len; | ||
552 | char *full_path = NULL, *pstart; | ||
553 | char sep; | ||
554 | |||
555 | full_path = cifs_build_path_to_root(vol, cifs_sb, | ||
556 | cifs_sb_master_tcon(cifs_sb)); | ||
557 | if (full_path == NULL) | ||
558 | return NULL; | ||
559 | |||
560 | cFYI(1, "Get root dentry for %s", full_path); | ||
561 | |||
562 | xid = GetXid(); | ||
563 | sep = CIFS_DIR_SEP(cifs_sb); | ||
564 | dparent = dget(sb->s_root); | ||
565 | full_len = strlen(full_path); | ||
566 | full_path[full_len] = sep; | ||
567 | pstart = full_path + 1; | ||
568 | |||
569 | for (i = 1, len = 0; i <= full_len; i++) { | ||
570 | if (full_path[i] != sep || !len) { | ||
571 | len++; | ||
572 | continue; | ||
573 | } | ||
574 | |||
575 | full_path[i] = 0; | ||
576 | cFYI(1, "get dentry for %s", pstart); | ||
577 | |||
578 | name.name = pstart; | ||
579 | name.len = len; | ||
580 | name.hash = full_name_hash(pstart, len); | ||
581 | dchild = d_lookup(dparent, &name); | ||
582 | if (dchild == NULL) { | ||
583 | cFYI(1, "not exists"); | ||
584 | dchild = d_alloc(dparent, &name); | ||
585 | if (dchild == NULL) { | ||
586 | dput(dparent); | ||
587 | dparent = NULL; | ||
588 | goto out; | ||
589 | } | ||
590 | } | ||
591 | |||
592 | cFYI(1, "get inode"); | ||
593 | if (dchild->d_inode == NULL) { | ||
594 | cFYI(1, "not exists"); | ||
595 | inode = NULL; | ||
596 | if (cifs_sb_master_tcon(CIFS_SB(sb))->unix_ext) | ||
597 | rc = cifs_get_inode_info_unix(&inode, full_path, | ||
598 | sb, xid); | ||
599 | else | ||
600 | rc = cifs_get_inode_info(&inode, full_path, | ||
601 | NULL, sb, xid, NULL); | ||
602 | if (rc) { | ||
603 | dput(dchild); | ||
604 | dput(dparent); | ||
605 | dparent = NULL; | ||
606 | goto out; | ||
607 | } | ||
608 | alias = d_materialise_unique(dchild, inode); | ||
609 | if (alias != NULL) { | ||
610 | dput(dchild); | ||
611 | if (IS_ERR(alias)) { | ||
612 | dput(dparent); | ||
613 | dparent = NULL; | ||
614 | goto out; | ||
615 | } | ||
616 | dchild = alias; | ||
617 | } | ||
618 | } | ||
619 | cFYI(1, "parent %p, child %p", dparent, dchild); | ||
620 | |||
621 | dput(dparent); | ||
622 | dparent = dchild; | ||
623 | len = 0; | ||
624 | pstart = full_path + i + 1; | ||
625 | full_path[i] = sep; | ||
626 | } | ||
627 | out: | ||
628 | _FreeXid(xid); | ||
629 | kfree(full_path); | ||
630 | return dparent; | ||
631 | } | ||
632 | |||
562 | static struct dentry * | 633 | static struct dentry * |
563 | cifs_do_mount(struct file_system_type *fs_type, | 634 | cifs_do_mount(struct file_system_type *fs_type, |
564 | int flags, const char *dev_name, void *data) | 635 | int flags, const char *dev_name, void *data) |
565 | { | 636 | { |
566 | int rc; | 637 | int rc; |
567 | struct super_block *sb; | 638 | struct super_block *sb; |
568 | 639 | struct cifs_sb_info *cifs_sb; | |
569 | sb = sget(fs_type, NULL, set_anon_super, NULL); | 640 | struct smb_vol *volume_info; |
641 | struct cifs_mnt_data mnt_data; | ||
642 | struct dentry *root; | ||
570 | 643 | ||
571 | cFYI(1, "Devname: %s flags: %d ", dev_name, flags); | 644 | cFYI(1, "Devname: %s flags: %d ", dev_name, flags); |
572 | 645 | ||
573 | if (IS_ERR(sb)) | 646 | rc = cifs_setup_volume_info(&volume_info, (char *)data, dev_name); |
574 | return ERR_CAST(sb); | 647 | if (rc) |
648 | return ERR_PTR(rc); | ||
649 | |||
650 | cifs_sb = kzalloc(sizeof(struct cifs_sb_info), GFP_KERNEL); | ||
651 | if (cifs_sb == NULL) { | ||
652 | root = ERR_PTR(-ENOMEM); | ||
653 | goto out; | ||
654 | } | ||
655 | |||
656 | cifs_setup_cifs_sb(volume_info, cifs_sb); | ||
657 | |||
658 | mnt_data.vol = volume_info; | ||
659 | mnt_data.cifs_sb = cifs_sb; | ||
660 | mnt_data.flags = flags; | ||
661 | |||
662 | sb = sget(fs_type, cifs_match_super, set_anon_super, &mnt_data); | ||
663 | if (IS_ERR(sb)) { | ||
664 | root = ERR_CAST(sb); | ||
665 | goto out_cifs_sb; | ||
666 | } | ||
667 | |||
668 | if (sb->s_fs_info) { | ||
669 | cFYI(1, "Use existing superblock"); | ||
670 | goto out_shared; | ||
671 | } | ||
672 | |||
673 | /* | ||
674 | * Copy mount params for use in submounts. Better to do | ||
675 | * the copy here and deal with the error before cleanup gets | ||
676 | * complicated post-mount. | ||
677 | */ | ||
678 | cifs_sb->mountdata = kstrndup(data, PAGE_SIZE, GFP_KERNEL); | ||
679 | if (cifs_sb->mountdata == NULL) { | ||
680 | root = ERR_PTR(-ENOMEM); | ||
681 | goto out_super; | ||
682 | } | ||
575 | 683 | ||
576 | sb->s_flags = flags; | 684 | sb->s_flags = flags; |
685 | /* BB should we make this contingent on mount parm? */ | ||
686 | sb->s_flags |= MS_NODIRATIME | MS_NOATIME; | ||
687 | sb->s_fs_info = cifs_sb; | ||
577 | 688 | ||
578 | rc = cifs_read_super(sb, data, dev_name, flags & MS_SILENT ? 1 : 0); | 689 | rc = cifs_read_super(sb, volume_info, dev_name, |
690 | flags & MS_SILENT ? 1 : 0); | ||
579 | if (rc) { | 691 | if (rc) { |
580 | deactivate_locked_super(sb); | 692 | root = ERR_PTR(rc); |
581 | return ERR_PTR(rc); | 693 | goto out_super; |
582 | } | 694 | } |
695 | |||
583 | sb->s_flags |= MS_ACTIVE; | 696 | sb->s_flags |= MS_ACTIVE; |
584 | return dget(sb->s_root); | 697 | |
698 | root = cifs_get_root(volume_info, sb); | ||
699 | if (root == NULL) | ||
700 | goto out_super; | ||
701 | |||
702 | cFYI(1, "dentry root is: %p", root); | ||
703 | goto out; | ||
704 | |||
705 | out_shared: | ||
706 | root = cifs_get_root(volume_info, sb); | ||
707 | if (root) | ||
708 | cFYI(1, "dentry root is: %p", root); | ||
709 | goto out; | ||
710 | |||
711 | out_super: | ||
712 | kfree(cifs_sb->mountdata); | ||
713 | deactivate_locked_super(sb); | ||
714 | |||
715 | out_cifs_sb: | ||
716 | unload_nls(cifs_sb->local_nls); | ||
717 | kfree(cifs_sb); | ||
718 | |||
719 | out: | ||
720 | cifs_cleanup_volume_info(&volume_info); | ||
721 | return root; | ||
585 | } | 722 | } |
586 | 723 | ||
587 | static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, | 724 | static ssize_t cifs_file_aio_write(struct kiocb *iocb, const struct iovec *iov, |
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h index 76b4517e74b0..6255fa812c7a 100644 --- a/fs/cifs/cifsglob.h +++ b/fs/cifs/cifsglob.h | |||
@@ -155,6 +155,81 @@ struct cifs_cred { | |||
155 | ***************************************************************** | 155 | ***************************************************************** |
156 | */ | 156 | */ |
157 | 157 | ||
158 | struct smb_vol { | ||
159 | char *username; | ||
160 | char *password; | ||
161 | char *domainname; | ||
162 | char *UNC; | ||
163 | char *UNCip; | ||
164 | char *iocharset; /* local code page for mapping to and from Unicode */ | ||
165 | char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */ | ||
166 | char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */ | ||
167 | uid_t cred_uid; | ||
168 | uid_t linux_uid; | ||
169 | gid_t linux_gid; | ||
170 | mode_t file_mode; | ||
171 | mode_t dir_mode; | ||
172 | unsigned secFlg; | ||
173 | bool retry:1; | ||
174 | bool intr:1; | ||
175 | bool setuids:1; | ||
176 | bool override_uid:1; | ||
177 | bool override_gid:1; | ||
178 | bool dynperm:1; | ||
179 | bool noperm:1; | ||
180 | bool no_psx_acl:1; /* set if posix acl support should be disabled */ | ||
181 | bool cifs_acl:1; | ||
182 | bool no_xattr:1; /* set if xattr (EA) support should be disabled*/ | ||
183 | bool server_ino:1; /* use inode numbers from server ie UniqueId */ | ||
184 | bool direct_io:1; | ||
185 | bool strict_io:1; /* strict cache behavior */ | ||
186 | bool remap:1; /* set to remap seven reserved chars in filenames */ | ||
187 | bool posix_paths:1; /* unset to not ask for posix pathnames. */ | ||
188 | bool no_linux_ext:1; | ||
189 | bool sfu_emul:1; | ||
190 | bool nullauth:1; /* attempt to authenticate with null user */ | ||
191 | bool nocase:1; /* request case insensitive filenames */ | ||
192 | bool nobrl:1; /* disable sending byte range locks to srv */ | ||
193 | bool mand_lock:1; /* send mandatory not posix byte range lock reqs */ | ||
194 | bool seal:1; /* request transport encryption on share */ | ||
195 | bool nodfs:1; /* Do not request DFS, even if available */ | ||
196 | bool local_lease:1; /* check leases only on local system, not remote */ | ||
197 | bool noblocksnd:1; | ||
198 | bool noautotune:1; | ||
199 | bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ | ||
200 | bool fsc:1; /* enable fscache */ | ||
201 | bool mfsymlinks:1; /* use Minshall+French Symlinks */ | ||
202 | bool multiuser:1; | ||
203 | bool rwpidforward:1; /* pid forward for read/write operations */ | ||
204 | unsigned int rsize; | ||
205 | unsigned int wsize; | ||
206 | bool sockopt_tcp_nodelay:1; | ||
207 | unsigned short int port; | ||
208 | unsigned long actimeo; /* attribute cache timeout (jiffies) */ | ||
209 | char *prepath; | ||
210 | struct sockaddr_storage srcaddr; /* allow binding to a local IP */ | ||
211 | struct nls_table *local_nls; | ||
212 | }; | ||
213 | |||
214 | #define CIFS_MOUNT_MASK (CIFS_MOUNT_NO_PERM | CIFS_MOUNT_SET_UID | \ | ||
215 | CIFS_MOUNT_SERVER_INUM | CIFS_MOUNT_DIRECT_IO | \ | ||
216 | CIFS_MOUNT_NO_XATTR | CIFS_MOUNT_MAP_SPECIAL_CHR | \ | ||
217 | CIFS_MOUNT_UNX_EMUL | CIFS_MOUNT_NO_BRL | \ | ||
218 | CIFS_MOUNT_CIFS_ACL | CIFS_MOUNT_OVERR_UID | \ | ||
219 | CIFS_MOUNT_OVERR_GID | CIFS_MOUNT_DYNPERM | \ | ||
220 | CIFS_MOUNT_NOPOSIXBRL | CIFS_MOUNT_NOSSYNC | \ | ||
221 | CIFS_MOUNT_FSCACHE | CIFS_MOUNT_MF_SYMLINKS | \ | ||
222 | CIFS_MOUNT_MULTIUSER | CIFS_MOUNT_STRICT_IO) | ||
223 | |||
224 | #define CIFS_MS_MASK (MS_RDONLY | MS_MANDLOCK | MS_NOEXEC | MS_NOSUID | \ | ||
225 | MS_NODEV | MS_SYNCHRONOUS) | ||
226 | |||
227 | struct cifs_mnt_data { | ||
228 | struct cifs_sb_info *cifs_sb; | ||
229 | struct smb_vol *vol; | ||
230 | int flags; | ||
231 | }; | ||
232 | |||
158 | struct TCP_Server_Info { | 233 | struct TCP_Server_Info { |
159 | struct list_head tcp_ses_list; | 234 | struct list_head tcp_ses_list; |
160 | struct list_head smb_ses_list; | 235 | struct list_head smb_ses_list; |
@@ -179,7 +254,7 @@ struct TCP_Server_Info { | |||
179 | struct mutex srv_mutex; | 254 | struct mutex srv_mutex; |
180 | struct task_struct *tsk; | 255 | struct task_struct *tsk; |
181 | char server_GUID[16]; | 256 | char server_GUID[16]; |
182 | char secMode; | 257 | char sec_mode; |
183 | bool session_estab; /* mark when very first sess is established */ | 258 | bool session_estab; /* mark when very first sess is established */ |
184 | u16 dialect; /* dialect index that server chose */ | 259 | u16 dialect; /* dialect index that server chose */ |
185 | enum securityEnum secType; | 260 | enum securityEnum secType; |
@@ -254,7 +329,7 @@ static inline void cifs_set_net_ns(struct TCP_Server_Info *srv, struct net *net) | |||
254 | /* | 329 | /* |
255 | * Session structure. One of these for each uid session with a particular host | 330 | * Session structure. One of these for each uid session with a particular host |
256 | */ | 331 | */ |
257 | struct cifsSesInfo { | 332 | struct cifs_ses { |
258 | struct list_head smb_ses_list; | 333 | struct list_head smb_ses_list; |
259 | struct list_head tcon_list; | 334 | struct list_head tcon_list; |
260 | struct mutex session_mutex; | 335 | struct mutex session_mutex; |
@@ -294,11 +369,11 @@ struct cifsSesInfo { | |||
294 | * there is one of these for each connection to a resource on a particular | 369 | * there is one of these for each connection to a resource on a particular |
295 | * session | 370 | * session |
296 | */ | 371 | */ |
297 | struct cifsTconInfo { | 372 | struct cifs_tcon { |
298 | struct list_head tcon_list; | 373 | struct list_head tcon_list; |
299 | int tc_count; | 374 | int tc_count; |
300 | struct list_head openFileList; | 375 | struct list_head openFileList; |
301 | struct cifsSesInfo *ses; /* pointer to session associated with */ | 376 | struct cifs_ses *ses; /* pointer to session associated with */ |
302 | char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */ | 377 | char treeName[MAX_TREE_SIZE + 1]; /* UNC name of resource in ASCII */ |
303 | char *nativeFileSystem; | 378 | char *nativeFileSystem; |
304 | char *password; /* for share-level security */ | 379 | char *password; /* for share-level security */ |
@@ -380,12 +455,12 @@ struct tcon_link { | |||
380 | #define TCON_LINK_IN_TREE 2 | 455 | #define TCON_LINK_IN_TREE 2 |
381 | unsigned long tl_time; | 456 | unsigned long tl_time; |
382 | atomic_t tl_count; | 457 | atomic_t tl_count; |
383 | struct cifsTconInfo *tl_tcon; | 458 | struct cifs_tcon *tl_tcon; |
384 | }; | 459 | }; |
385 | 460 | ||
386 | extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb); | 461 | extern struct tcon_link *cifs_sb_tlink(struct cifs_sb_info *cifs_sb); |
387 | 462 | ||
388 | static inline struct cifsTconInfo * | 463 | static inline struct cifs_tcon * |
389 | tlink_tcon(struct tcon_link *tlink) | 464 | tlink_tcon(struct tcon_link *tlink) |
390 | { | 465 | { |
391 | return tlink->tl_tcon; | 466 | return tlink->tl_tcon; |
@@ -402,7 +477,7 @@ cifs_get_tlink(struct tcon_link *tlink) | |||
402 | } | 477 | } |
403 | 478 | ||
404 | /* This function is always expected to succeed */ | 479 | /* This function is always expected to succeed */ |
405 | extern struct cifsTconInfo *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb); | 480 | extern struct cifs_tcon *cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb); |
406 | 481 | ||
407 | /* | 482 | /* |
408 | * This info hangs off the cifsFileInfo structure, pointed to by llist. | 483 | * This info hangs off the cifsFileInfo structure, pointed to by llist. |
@@ -455,6 +530,14 @@ struct cifsFileInfo { | |||
455 | struct work_struct oplock_break; /* work for oplock breaks */ | 530 | struct work_struct oplock_break; /* work for oplock breaks */ |
456 | }; | 531 | }; |
457 | 532 | ||
533 | struct cifs_io_parms { | ||
534 | __u16 netfid; | ||
535 | __u32 pid; | ||
536 | __u64 offset; | ||
537 | unsigned int length; | ||
538 | struct cifs_tcon *tcon; | ||
539 | }; | ||
540 | |||
458 | /* | 541 | /* |
459 | * Take a reference on the file private data. Must be called with | 542 | * Take a reference on the file private data. Must be called with |
460 | * cifs_file_list_lock held. | 543 | * cifs_file_list_lock held. |
@@ -509,10 +592,30 @@ static inline char CIFS_DIR_SEP(const struct cifs_sb_info *cifs_sb) | |||
509 | return '\\'; | 592 | return '\\'; |
510 | } | 593 | } |
511 | 594 | ||
595 | static inline void | ||
596 | convert_delimiter(char *path, char delim) | ||
597 | { | ||
598 | int i; | ||
599 | char old_delim; | ||
600 | |||
601 | if (path == NULL) | ||
602 | return; | ||
603 | |||
604 | if (delim == '/') | ||
605 | old_delim = '\\'; | ||
606 | else | ||
607 | old_delim = '/'; | ||
608 | |||
609 | for (i = 0; path[i] != '\0'; i++) { | ||
610 | if (path[i] == old_delim) | ||
611 | path[i] = delim; | ||
612 | } | ||
613 | } | ||
614 | |||
512 | #ifdef CONFIG_CIFS_STATS | 615 | #ifdef CONFIG_CIFS_STATS |
513 | #define cifs_stats_inc atomic_inc | 616 | #define cifs_stats_inc atomic_inc |
514 | 617 | ||
515 | static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon, | 618 | static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon, |
516 | unsigned int bytes) | 619 | unsigned int bytes) |
517 | { | 620 | { |
518 | if (bytes) { | 621 | if (bytes) { |
@@ -522,7 +625,7 @@ static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon, | |||
522 | } | 625 | } |
523 | } | 626 | } |
524 | 627 | ||
525 | static inline void cifs_stats_bytes_read(struct cifsTconInfo *tcon, | 628 | static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon, |
526 | unsigned int bytes) | 629 | unsigned int bytes) |
527 | { | 630 | { |
528 | spin_lock(&tcon->stat_lock); | 631 | spin_lock(&tcon->stat_lock); |
@@ -543,9 +646,8 @@ struct mid_q_entry; | |||
543 | * This is the prototype for the mid callback function. When creating one, | 646 | * This is the prototype for the mid callback function. When creating one, |
544 | * take special care to avoid deadlocks. Things to bear in mind: | 647 | * take special care to avoid deadlocks. Things to bear in mind: |
545 | * | 648 | * |
546 | * - it will be called by cifsd | 649 | * - it will be called by cifsd, with no locks held |
547 | * - the GlobalMid_Lock will be held | 650 | * - the mid will be removed from any lists |
548 | * - the mid will be removed from the pending_mid_q list | ||
549 | */ | 651 | */ |
550 | typedef void (mid_callback_t)(struct mid_q_entry *mid); | 652 | typedef void (mid_callback_t)(struct mid_q_entry *mid); |
551 | 653 | ||
@@ -573,7 +675,7 @@ struct mid_q_entry { | |||
573 | struct oplock_q_entry { | 675 | struct oplock_q_entry { |
574 | struct list_head qhead; | 676 | struct list_head qhead; |
575 | struct inode *pinode; | 677 | struct inode *pinode; |
576 | struct cifsTconInfo *tcon; | 678 | struct cifs_tcon *tcon; |
577 | __u16 netfid; | 679 | __u16 netfid; |
578 | }; | 680 | }; |
579 | 681 | ||
@@ -656,6 +758,7 @@ static inline void free_dfs_info_array(struct dfs_info3_param *param, | |||
656 | #define MID_RESPONSE_RECEIVED 4 | 758 | #define MID_RESPONSE_RECEIVED 4 |
657 | #define MID_RETRY_NEEDED 8 /* session closed while this request out */ | 759 | #define MID_RETRY_NEEDED 8 /* session closed while this request out */ |
658 | #define MID_RESPONSE_MALFORMED 0x10 | 760 | #define MID_RESPONSE_MALFORMED 0x10 |
761 | #define MID_SHUTDOWN 0x20 | ||
659 | 762 | ||
660 | /* Types of response buffer returned from SendReceive2 */ | 763 | /* Types of response buffer returned from SendReceive2 */ |
661 | #define CIFS_NO_BUFFER 0 /* Response buffer not returned */ | 764 | #define CIFS_NO_BUFFER 0 /* Response buffer not returned */ |
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h index 6e69e06a30b3..953f84413c77 100644 --- a/fs/cifs/cifsproto.h +++ b/fs/cifs/cifsproto.h | |||
@@ -57,8 +57,9 @@ extern int init_cifs_idmap(void); | |||
57 | extern void exit_cifs_idmap(void); | 57 | extern void exit_cifs_idmap(void); |
58 | extern void cifs_destroy_idmaptrees(void); | 58 | extern void cifs_destroy_idmaptrees(void); |
59 | extern char *build_path_from_dentry(struct dentry *); | 59 | extern char *build_path_from_dentry(struct dentry *); |
60 | extern char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb, | 60 | extern char *cifs_build_path_to_root(struct smb_vol *vol, |
61 | struct cifsTconInfo *tcon); | 61 | struct cifs_sb_info *cifs_sb, |
62 | struct cifs_tcon *tcon); | ||
62 | extern char *build_wildcard_path_from_dentry(struct dentry *direntry); | 63 | extern char *build_wildcard_path_from_dentry(struct dentry *direntry); |
63 | extern char *cifs_compose_mount_options(const char *sb_mountdata, | 64 | extern char *cifs_compose_mount_options(const char *sb_mountdata, |
64 | const char *fullpath, const struct dfs_info3_param *ref, | 65 | const char *fullpath, const struct dfs_info3_param *ref, |
@@ -67,20 +68,22 @@ extern char *cifs_compose_mount_options(const char *sb_mountdata, | |||
67 | extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, | 68 | extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, |
68 | struct TCP_Server_Info *server); | 69 | struct TCP_Server_Info *server); |
69 | extern void DeleteMidQEntry(struct mid_q_entry *midEntry); | 70 | extern void DeleteMidQEntry(struct mid_q_entry *midEntry); |
70 | extern int cifs_call_async(struct TCP_Server_Info *server, | 71 | extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, |
71 | struct smb_hdr *in_buf, mid_callback_t *callback, | 72 | unsigned int nvec, mid_callback_t *callback, |
72 | void *cbdata); | 73 | void *cbdata, bool ignore_pend); |
73 | extern int SendReceive(const unsigned int /* xid */ , struct cifsSesInfo *, | 74 | extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *, |
74 | struct smb_hdr * /* input */ , | 75 | struct smb_hdr * /* input */ , |
75 | struct smb_hdr * /* out */ , | 76 | struct smb_hdr * /* out */ , |
76 | int * /* bytes returned */ , const int long_op); | 77 | int * /* bytes returned */ , const int long_op); |
77 | extern int SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses, | 78 | extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses, |
78 | struct smb_hdr *in_buf, int flags); | 79 | struct smb_hdr *in_buf, int flags); |
79 | extern int SendReceive2(const unsigned int /* xid */ , struct cifsSesInfo *, | 80 | extern int cifs_check_receive(struct mid_q_entry *mid, |
81 | struct TCP_Server_Info *server, bool log_error); | ||
82 | extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *, | ||
80 | struct kvec *, int /* nvec to send */, | 83 | struct kvec *, int /* nvec to send */, |
81 | int * /* type of buf returned */ , const int flags); | 84 | int * /* type of buf returned */ , const int flags); |
82 | extern int SendReceiveBlockingLock(const unsigned int xid, | 85 | extern int SendReceiveBlockingLock(const unsigned int xid, |
83 | struct cifsTconInfo *ptcon, | 86 | struct cifs_tcon *ptcon, |
84 | struct smb_hdr *in_buf , | 87 | struct smb_hdr *in_buf , |
85 | struct smb_hdr *out_buf, | 88 | struct smb_hdr *out_buf, |
86 | int *bytes_returned); | 89 | int *bytes_returned); |
@@ -99,14 +102,14 @@ extern int cifs_convert_address(struct sockaddr *dst, const char *src, int len); | |||
99 | extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port); | 102 | extern int cifs_set_port(struct sockaddr *addr, const unsigned short int port); |
100 | extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, | 103 | extern int cifs_fill_sockaddr(struct sockaddr *dst, const char *src, int len, |
101 | const unsigned short int port); | 104 | const unsigned short int port); |
102 | extern int map_smb_to_linux_error(struct smb_hdr *smb, int logErr); | 105 | extern int map_smb_to_linux_error(struct smb_hdr *smb, bool logErr); |
103 | extern void header_assemble(struct smb_hdr *, char /* command */ , | 106 | extern void header_assemble(struct smb_hdr *, char /* command */ , |
104 | const struct cifsTconInfo *, int /* length of | 107 | const struct cifs_tcon *, int /* length of |
105 | fixed section (word count) in two byte units */); | 108 | fixed section (word count) in two byte units */); |
106 | extern int small_smb_init_no_tc(const int smb_cmd, const int wct, | 109 | extern int small_smb_init_no_tc(const int smb_cmd, const int wct, |
107 | struct cifsSesInfo *ses, | 110 | struct cifs_ses *ses, |
108 | void **request_buf); | 111 | void **request_buf); |
109 | extern int CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, | 112 | extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses, |
110 | const struct nls_table *nls_cp); | 113 | const struct nls_table *nls_cp); |
111 | extern __u16 GetNextMid(struct TCP_Server_Info *server); | 114 | extern __u16 GetNextMid(struct TCP_Server_Info *server); |
112 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); | 115 | extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); |
@@ -148,102 +151,108 @@ extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *, | |||
148 | extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, | 151 | extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, |
149 | const char *); | 152 | const char *); |
150 | 153 | ||
154 | extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, | ||
155 | struct cifs_sb_info *cifs_sb); | ||
156 | extern int cifs_match_super(struct super_block *, void *); | ||
157 | extern void cifs_cleanup_volume_info(struct smb_vol **pvolume_info); | ||
158 | extern int cifs_setup_volume_info(struct smb_vol **pvolume_info, | ||
159 | char *mount_data, const char *devname); | ||
151 | extern int cifs_mount(struct super_block *, struct cifs_sb_info *, | 160 | extern int cifs_mount(struct super_block *, struct cifs_sb_info *, |
152 | const char *); | 161 | struct smb_vol *, const char *); |
153 | extern int cifs_umount(struct super_block *, struct cifs_sb_info *); | 162 | extern int cifs_umount(struct super_block *, struct cifs_sb_info *); |
154 | extern void cifs_dfs_release_automount_timer(void); | 163 | extern void cifs_dfs_release_automount_timer(void); |
155 | void cifs_proc_init(void); | 164 | void cifs_proc_init(void); |
156 | void cifs_proc_clean(void); | 165 | void cifs_proc_clean(void); |
157 | 166 | ||
158 | extern int cifs_negotiate_protocol(unsigned int xid, | 167 | extern int cifs_negotiate_protocol(unsigned int xid, |
159 | struct cifsSesInfo *ses); | 168 | struct cifs_ses *ses); |
160 | extern int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses, | 169 | extern int cifs_setup_session(unsigned int xid, struct cifs_ses *ses, |
161 | struct nls_table *nls_info); | 170 | struct nls_table *nls_info); |
162 | extern int CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses); | 171 | extern int CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses); |
163 | 172 | ||
164 | extern int CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | 173 | extern int CIFSTCon(unsigned int xid, struct cifs_ses *ses, |
165 | const char *tree, struct cifsTconInfo *tcon, | 174 | const char *tree, struct cifs_tcon *tcon, |
166 | const struct nls_table *); | 175 | const struct nls_table *); |
167 | 176 | ||
168 | extern int CIFSFindFirst(const int xid, struct cifsTconInfo *tcon, | 177 | extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon, |
169 | const char *searchName, const struct nls_table *nls_codepage, | 178 | const char *searchName, const struct nls_table *nls_codepage, |
170 | __u16 *searchHandle, struct cifs_search_info *psrch_inf, | 179 | __u16 *searchHandle, struct cifs_search_info *psrch_inf, |
171 | int map, const char dirsep); | 180 | int map, const char dirsep); |
172 | 181 | ||
173 | extern int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, | 182 | extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon, |
174 | __u16 searchHandle, struct cifs_search_info *psrch_inf); | 183 | __u16 searchHandle, struct cifs_search_info *psrch_inf); |
175 | 184 | ||
176 | extern int CIFSFindClose(const int, struct cifsTconInfo *tcon, | 185 | extern int CIFSFindClose(const int, struct cifs_tcon *tcon, |
177 | const __u16 search_handle); | 186 | const __u16 search_handle); |
178 | 187 | ||
179 | extern int CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon, | 188 | extern int CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon, |
180 | u16 netfid, FILE_ALL_INFO *pFindData); | 189 | u16 netfid, FILE_ALL_INFO *pFindData); |
181 | extern int CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon, | 190 | extern int CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon, |
182 | const unsigned char *searchName, | 191 | const unsigned char *searchName, |
183 | FILE_ALL_INFO *findData, | 192 | FILE_ALL_INFO *findData, |
184 | int legacy /* whether to use old info level */, | 193 | int legacy /* whether to use old info level */, |
185 | const struct nls_table *nls_codepage, int remap); | 194 | const struct nls_table *nls_codepage, int remap); |
186 | extern int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon, | 195 | extern int SMBQueryInformation(const int xid, struct cifs_tcon *tcon, |
187 | const unsigned char *searchName, | 196 | const unsigned char *searchName, |
188 | FILE_ALL_INFO *findData, | 197 | FILE_ALL_INFO *findData, |
189 | const struct nls_table *nls_codepage, int remap); | 198 | const struct nls_table *nls_codepage, int remap); |
190 | 199 | ||
191 | extern int CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon, | 200 | extern int CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon, |
192 | u16 netfid, FILE_UNIX_BASIC_INFO *pFindData); | 201 | u16 netfid, FILE_UNIX_BASIC_INFO *pFindData); |
193 | extern int CIFSSMBUnixQPathInfo(const int xid, | 202 | extern int CIFSSMBUnixQPathInfo(const int xid, |
194 | struct cifsTconInfo *tcon, | 203 | struct cifs_tcon *tcon, |
195 | const unsigned char *searchName, | 204 | const unsigned char *searchName, |
196 | FILE_UNIX_BASIC_INFO *pFindData, | 205 | FILE_UNIX_BASIC_INFO *pFindData, |
197 | const struct nls_table *nls_codepage, int remap); | 206 | const struct nls_table *nls_codepage, int remap); |
198 | 207 | ||
199 | extern int CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses, | 208 | extern int CIFSGetDFSRefer(const int xid, struct cifs_ses *ses, |
200 | const unsigned char *searchName, | 209 | const unsigned char *searchName, |
201 | struct dfs_info3_param **target_nodes, | 210 | struct dfs_info3_param **target_nodes, |
202 | unsigned int *number_of_nodes_in_array, | 211 | unsigned int *number_of_nodes_in_array, |
203 | const struct nls_table *nls_codepage, int remap); | 212 | const struct nls_table *nls_codepage, int remap); |
204 | 213 | ||
205 | extern int get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, | 214 | extern int get_dfs_path(int xid, struct cifs_ses *pSesInfo, |
206 | const char *old_path, | 215 | const char *old_path, |
207 | const struct nls_table *nls_codepage, | 216 | const struct nls_table *nls_codepage, |
208 | unsigned int *pnum_referrals, | 217 | unsigned int *pnum_referrals, |
209 | struct dfs_info3_param **preferrals, | 218 | struct dfs_info3_param **preferrals, |
210 | int remap); | 219 | int remap); |
211 | extern void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | 220 | extern void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, |
212 | struct super_block *sb, struct smb_vol *vol); | 221 | struct super_block *sb, struct smb_vol *vol); |
213 | extern int CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon, | 222 | extern int CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, |
214 | struct kstatfs *FSData); | 223 | struct kstatfs *FSData); |
215 | extern int SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon, | 224 | extern int SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, |
216 | struct kstatfs *FSData); | 225 | struct kstatfs *FSData); |
217 | extern int CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, | 226 | extern int CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon, |
218 | __u64 cap); | 227 | __u64 cap); |
219 | 228 | ||
220 | extern int CIFSSMBQFSAttributeInfo(const int xid, | 229 | extern int CIFSSMBQFSAttributeInfo(const int xid, |
221 | struct cifsTconInfo *tcon); | 230 | struct cifs_tcon *tcon); |
222 | extern int CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon); | 231 | extern int CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon); |
223 | extern int CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon); | 232 | extern int CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon); |
224 | extern int CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon, | 233 | extern int CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon, |
225 | struct kstatfs *FSData); | 234 | struct kstatfs *FSData); |
226 | 235 | ||
227 | extern int CIFSSMBSetPathInfo(const int xid, struct cifsTconInfo *tcon, | 236 | extern int CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon, |
228 | const char *fileName, const FILE_BASIC_INFO *data, | 237 | const char *fileName, const FILE_BASIC_INFO *data, |
229 | const struct nls_table *nls_codepage, | 238 | const struct nls_table *nls_codepage, |
230 | int remap_special_chars); | 239 | int remap_special_chars); |
231 | extern int CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon, | 240 | extern int CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon, |
232 | const FILE_BASIC_INFO *data, __u16 fid, | 241 | const FILE_BASIC_INFO *data, __u16 fid, |
233 | __u32 pid_of_opener); | 242 | __u32 pid_of_opener); |
234 | extern int CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon, | 243 | extern int CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon, |
235 | bool delete_file, __u16 fid, __u32 pid_of_opener); | 244 | bool delete_file, __u16 fid, __u32 pid_of_opener); |
236 | #if 0 | 245 | #if 0 |
237 | extern int CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, | 246 | extern int CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon, |
238 | char *fileName, __u16 dos_attributes, | 247 | char *fileName, __u16 dos_attributes, |
239 | const struct nls_table *nls_codepage); | 248 | const struct nls_table *nls_codepage); |
240 | #endif /* possibly unneeded function */ | 249 | #endif /* possibly unneeded function */ |
241 | extern int CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, | 250 | extern int CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon, |
242 | const char *fileName, __u64 size, | 251 | const char *fileName, __u64 size, |
243 | bool setAllocationSizeFlag, | 252 | bool setAllocationSizeFlag, |
244 | const struct nls_table *nls_codepage, | 253 | const struct nls_table *nls_codepage, |
245 | int remap_special_chars); | 254 | int remap_special_chars); |
246 | extern int CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, | 255 | extern int CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, |
247 | __u64 size, __u16 fileHandle, __u32 opener_pid, | 256 | __u64 size, __u16 fileHandle, __u32 opener_pid, |
248 | bool AllocSizeFlag); | 257 | bool AllocSizeFlag); |
249 | 258 | ||
@@ -257,120 +266,116 @@ struct cifs_unix_set_info_args { | |||
257 | dev_t device; | 266 | dev_t device; |
258 | }; | 267 | }; |
259 | 268 | ||
260 | extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon, | 269 | extern int CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon, |
261 | const struct cifs_unix_set_info_args *args, | 270 | const struct cifs_unix_set_info_args *args, |
262 | u16 fid, u32 pid_of_opener); | 271 | u16 fid, u32 pid_of_opener); |
263 | 272 | ||
264 | extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *pTcon, | 273 | extern int CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *pTcon, |
265 | char *fileName, | 274 | char *fileName, |
266 | const struct cifs_unix_set_info_args *args, | 275 | const struct cifs_unix_set_info_args *args, |
267 | const struct nls_table *nls_codepage, | 276 | const struct nls_table *nls_codepage, |
268 | int remap_special_chars); | 277 | int remap_special_chars); |
269 | 278 | ||
270 | extern int CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon, | 279 | extern int CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon, |
271 | const char *newName, | 280 | const char *newName, |
272 | const struct nls_table *nls_codepage, | 281 | const struct nls_table *nls_codepage, |
273 | int remap_special_chars); | 282 | int remap_special_chars); |
274 | extern int CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, | 283 | extern int CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon, |
275 | const char *name, const struct nls_table *nls_codepage, | 284 | const char *name, const struct nls_table *nls_codepage, |
276 | int remap_special_chars); | 285 | int remap_special_chars); |
277 | extern int CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon, | 286 | extern int CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon, |
278 | const char *name, __u16 type, | 287 | const char *name, __u16 type, |
279 | const struct nls_table *nls_codepage, | 288 | const struct nls_table *nls_codepage, |
280 | int remap_special_chars); | 289 | int remap_special_chars); |
281 | extern int CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon, | 290 | extern int CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon, |
282 | const char *name, | 291 | const char *name, |
283 | const struct nls_table *nls_codepage, | 292 | const struct nls_table *nls_codepage, |
284 | int remap_special_chars); | 293 | int remap_special_chars); |
285 | extern int CIFSSMBRename(const int xid, struct cifsTconInfo *tcon, | 294 | extern int CIFSSMBRename(const int xid, struct cifs_tcon *tcon, |
286 | const char *fromName, const char *toName, | 295 | const char *fromName, const char *toName, |
287 | const struct nls_table *nls_codepage, | 296 | const struct nls_table *nls_codepage, |
288 | int remap_special_chars); | 297 | int remap_special_chars); |
289 | extern int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon, | 298 | extern int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon, |
290 | int netfid, const char *target_name, | 299 | int netfid, const char *target_name, |
291 | const struct nls_table *nls_codepage, | 300 | const struct nls_table *nls_codepage, |
292 | int remap_special_chars); | 301 | int remap_special_chars); |
293 | extern int CIFSCreateHardLink(const int xid, | 302 | extern int CIFSCreateHardLink(const int xid, |
294 | struct cifsTconInfo *tcon, | 303 | struct cifs_tcon *tcon, |
295 | const char *fromName, const char *toName, | 304 | const char *fromName, const char *toName, |
296 | const struct nls_table *nls_codepage, | 305 | const struct nls_table *nls_codepage, |
297 | int remap_special_chars); | 306 | int remap_special_chars); |
298 | extern int CIFSUnixCreateHardLink(const int xid, | 307 | extern int CIFSUnixCreateHardLink(const int xid, |
299 | struct cifsTconInfo *tcon, | 308 | struct cifs_tcon *tcon, |
300 | const char *fromName, const char *toName, | 309 | const char *fromName, const char *toName, |
301 | const struct nls_table *nls_codepage, | 310 | const struct nls_table *nls_codepage, |
302 | int remap_special_chars); | 311 | int remap_special_chars); |
303 | extern int CIFSUnixCreateSymLink(const int xid, | 312 | extern int CIFSUnixCreateSymLink(const int xid, |
304 | struct cifsTconInfo *tcon, | 313 | struct cifs_tcon *tcon, |
305 | const char *fromName, const char *toName, | 314 | const char *fromName, const char *toName, |
306 | const struct nls_table *nls_codepage); | 315 | const struct nls_table *nls_codepage); |
307 | extern int CIFSSMBUnixQuerySymLink(const int xid, | 316 | extern int CIFSSMBUnixQuerySymLink(const int xid, |
308 | struct cifsTconInfo *tcon, | 317 | struct cifs_tcon *tcon, |
309 | const unsigned char *searchName, char **syminfo, | 318 | const unsigned char *searchName, char **syminfo, |
310 | const struct nls_table *nls_codepage); | 319 | const struct nls_table *nls_codepage); |
311 | #ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL | 320 | #ifdef CONFIG_CIFS_SYMLINK_EXPERIMENTAL |
312 | extern int CIFSSMBQueryReparseLinkInfo(const int xid, | 321 | extern int CIFSSMBQueryReparseLinkInfo(const int xid, |
313 | struct cifsTconInfo *tcon, | 322 | struct cifs_tcon *tcon, |
314 | const unsigned char *searchName, | 323 | const unsigned char *searchName, |
315 | char *symlinkinfo, const int buflen, __u16 fid, | 324 | char *symlinkinfo, const int buflen, __u16 fid, |
316 | const struct nls_table *nls_codepage); | 325 | const struct nls_table *nls_codepage); |
317 | #endif /* temporarily unused until cifs_symlink fixed */ | 326 | #endif /* temporarily unused until cifs_symlink fixed */ |
318 | extern int CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon, | 327 | extern int CIFSSMBOpen(const int xid, struct cifs_tcon *tcon, |
319 | const char *fileName, const int disposition, | 328 | const char *fileName, const int disposition, |
320 | const int access_flags, const int omode, | 329 | const int access_flags, const int omode, |
321 | __u16 *netfid, int *pOplock, FILE_ALL_INFO *, | 330 | __u16 *netfid, int *pOplock, FILE_ALL_INFO *, |
322 | const struct nls_table *nls_codepage, int remap); | 331 | const struct nls_table *nls_codepage, int remap); |
323 | extern int SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon, | 332 | extern int SMBLegacyOpen(const int xid, struct cifs_tcon *tcon, |
324 | const char *fileName, const int disposition, | 333 | const char *fileName, const int disposition, |
325 | const int access_flags, const int omode, | 334 | const int access_flags, const int omode, |
326 | __u16 *netfid, int *pOplock, FILE_ALL_INFO *, | 335 | __u16 *netfid, int *pOplock, FILE_ALL_INFO *, |
327 | const struct nls_table *nls_codepage, int remap); | 336 | const struct nls_table *nls_codepage, int remap); |
328 | extern int CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, | 337 | extern int CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon, |
329 | u32 posix_flags, __u64 mode, __u16 *netfid, | 338 | u32 posix_flags, __u64 mode, __u16 *netfid, |
330 | FILE_UNIX_BASIC_INFO *pRetData, | 339 | FILE_UNIX_BASIC_INFO *pRetData, |
331 | __u32 *pOplock, const char *name, | 340 | __u32 *pOplock, const char *name, |
332 | const struct nls_table *nls_codepage, int remap); | 341 | const struct nls_table *nls_codepage, int remap); |
333 | extern int CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, | 342 | extern int CIFSSMBClose(const int xid, struct cifs_tcon *tcon, |
334 | const int smb_file_id); | 343 | const int smb_file_id); |
335 | 344 | ||
336 | extern int CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, | 345 | extern int CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, |
337 | const int smb_file_id); | 346 | const int smb_file_id); |
338 | 347 | ||
339 | extern int CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, | 348 | extern int CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, |
340 | const int netfid, unsigned int count, | 349 | unsigned int *nbytes, char **buf, |
341 | const __u64 lseek, unsigned int *nbytes, char **buf, | ||
342 | int *return_buf_type); | 350 | int *return_buf_type); |
343 | extern int CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, | 351 | extern int CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms, |
344 | const int netfid, const unsigned int count, | 352 | unsigned int *nbytes, const char *buf, |
345 | const __u64 lseek, unsigned int *nbytes, | 353 | const char __user *ubuf, const int long_op); |
346 | const char *buf, const char __user *ubuf, | 354 | extern int CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms, |
355 | unsigned int *nbytes, struct kvec *iov, const int nvec, | ||
347 | const int long_op); | 356 | const int long_op); |
348 | extern int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, | 357 | extern int CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon, |
349 | const int netfid, const unsigned int count, | ||
350 | const __u64 offset, unsigned int *nbytes, | ||
351 | struct kvec *iov, const int nvec, const int long_op); | ||
352 | extern int CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon, | ||
353 | const unsigned char *searchName, __u64 *inode_number, | 358 | const unsigned char *searchName, __u64 *inode_number, |
354 | const struct nls_table *nls_codepage, | 359 | const struct nls_table *nls_codepage, |
355 | int remap_special_chars); | 360 | int remap_special_chars); |
356 | 361 | ||
357 | extern int CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, | 362 | extern int CIFSSMBLock(const int xid, struct cifs_tcon *tcon, |
358 | const __u16 netfid, const __u64 len, | 363 | const __u16 netfid, const __u64 len, |
359 | const __u64 offset, const __u32 numUnlock, | 364 | const __u64 offset, const __u32 numUnlock, |
360 | const __u32 numLock, const __u8 lockType, | 365 | const __u32 numLock, const __u8 lockType, |
361 | const bool waitFlag, const __u8 oplock_level); | 366 | const bool waitFlag, const __u8 oplock_level); |
362 | extern int CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, | 367 | extern int CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon, |
363 | const __u16 smb_file_id, const int get_flag, | 368 | const __u16 smb_file_id, const int get_flag, |
364 | const __u64 len, struct file_lock *, | 369 | const __u64 len, struct file_lock *, |
365 | const __u16 lock_type, const bool waitFlag); | 370 | const __u16 lock_type, const bool waitFlag); |
366 | extern int CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon); | 371 | extern int CIFSSMBTDis(const int xid, struct cifs_tcon *tcon); |
367 | extern int CIFSSMBEcho(struct TCP_Server_Info *server); | 372 | extern int CIFSSMBEcho(struct TCP_Server_Info *server); |
368 | extern int CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses); | 373 | extern int CIFSSMBLogoff(const int xid, struct cifs_ses *ses); |
369 | 374 | ||
370 | extern struct cifsSesInfo *sesInfoAlloc(void); | 375 | extern struct cifs_ses *sesInfoAlloc(void); |
371 | extern void sesInfoFree(struct cifsSesInfo *); | 376 | extern void sesInfoFree(struct cifs_ses *); |
372 | extern struct cifsTconInfo *tconInfoAlloc(void); | 377 | extern struct cifs_tcon *tconInfoAlloc(void); |
373 | extern void tconInfoFree(struct cifsTconInfo *); | 378 | extern void tconInfoFree(struct cifs_tcon *); |
374 | 379 | ||
375 | extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *); | 380 | extern int cifs_sign_smb(struct smb_hdr *, struct TCP_Server_Info *, __u32 *); |
376 | extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, | 381 | extern int cifs_sign_smb2(struct kvec *iov, int n_vec, struct TCP_Server_Info *, |
@@ -379,51 +384,51 @@ extern int cifs_verify_signature(struct smb_hdr *, | |||
379 | struct TCP_Server_Info *server, | 384 | struct TCP_Server_Info *server, |
380 | __u32 expected_sequence_number); | 385 | __u32 expected_sequence_number); |
381 | extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *); | 386 | extern int SMBNTencrypt(unsigned char *, unsigned char *, unsigned char *); |
382 | extern int setup_ntlm_response(struct cifsSesInfo *); | 387 | extern int setup_ntlm_response(struct cifs_ses *); |
383 | extern int setup_ntlmv2_rsp(struct cifsSesInfo *, const struct nls_table *); | 388 | extern int setup_ntlmv2_rsp(struct cifs_ses *, const struct nls_table *); |
384 | extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *); | 389 | extern int cifs_crypto_shash_allocate(struct TCP_Server_Info *); |
385 | extern void cifs_crypto_shash_release(struct TCP_Server_Info *); | 390 | extern void cifs_crypto_shash_release(struct TCP_Server_Info *); |
386 | extern int calc_seckey(struct cifsSesInfo *); | 391 | extern int calc_seckey(struct cifs_ses *); |
387 | 392 | ||
388 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | 393 | #ifdef CONFIG_CIFS_WEAK_PW_HASH |
389 | extern int calc_lanman_hash(const char *password, const char *cryptkey, | 394 | extern int calc_lanman_hash(const char *password, const char *cryptkey, |
390 | bool encrypt, char *lnm_session_key); | 395 | bool encrypt, char *lnm_session_key); |
391 | #endif /* CIFS_WEAK_PW_HASH */ | 396 | #endif /* CIFS_WEAK_PW_HASH */ |
392 | #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */ | 397 | #ifdef CONFIG_CIFS_DNOTIFY_EXPERIMENTAL /* unused temporarily */ |
393 | extern int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon, | 398 | extern int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon, |
394 | const int notify_subdirs, const __u16 netfid, | 399 | const int notify_subdirs, const __u16 netfid, |
395 | __u32 filter, struct file *file, int multishot, | 400 | __u32 filter, struct file *file, int multishot, |
396 | const struct nls_table *nls_codepage); | 401 | const struct nls_table *nls_codepage); |
397 | #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */ | 402 | #endif /* was needed for dnotify, and will be needed for inotify when VFS fix */ |
398 | extern int CIFSSMBCopy(int xid, | 403 | extern int CIFSSMBCopy(int xid, |
399 | struct cifsTconInfo *source_tcon, | 404 | struct cifs_tcon *source_tcon, |
400 | const char *fromName, | 405 | const char *fromName, |
401 | const __u16 target_tid, | 406 | const __u16 target_tid, |
402 | const char *toName, const int flags, | 407 | const char *toName, const int flags, |
403 | const struct nls_table *nls_codepage, | 408 | const struct nls_table *nls_codepage, |
404 | int remap_special_chars); | 409 | int remap_special_chars); |
405 | extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon, | 410 | extern ssize_t CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon, |
406 | const unsigned char *searchName, | 411 | const unsigned char *searchName, |
407 | const unsigned char *ea_name, char *EAData, | 412 | const unsigned char *ea_name, char *EAData, |
408 | size_t bufsize, const struct nls_table *nls_codepage, | 413 | size_t bufsize, const struct nls_table *nls_codepage, |
409 | int remap_special_chars); | 414 | int remap_special_chars); |
410 | extern int CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, | 415 | extern int CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon, |
411 | const char *fileName, const char *ea_name, | 416 | const char *fileName, const char *ea_name, |
412 | const void *ea_value, const __u16 ea_value_len, | 417 | const void *ea_value, const __u16 ea_value_len, |
413 | const struct nls_table *nls_codepage, int remap_special_chars); | 418 | const struct nls_table *nls_codepage, int remap_special_chars); |
414 | extern int CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, | 419 | extern int CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, |
415 | __u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen); | 420 | __u16 fid, struct cifs_ntsd **acl_inf, __u32 *buflen); |
416 | extern int CIFSSMBSetCIFSACL(const int, struct cifsTconInfo *, __u16, | 421 | extern int CIFSSMBSetCIFSACL(const int, struct cifs_tcon *, __u16, |
417 | struct cifs_ntsd *, __u32); | 422 | struct cifs_ntsd *, __u32); |
418 | extern int CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon, | 423 | extern int CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon, |
419 | const unsigned char *searchName, | 424 | const unsigned char *searchName, |
420 | char *acl_inf, const int buflen, const int acl_type, | 425 | char *acl_inf, const int buflen, const int acl_type, |
421 | const struct nls_table *nls_codepage, int remap_special_chars); | 426 | const struct nls_table *nls_codepage, int remap_special_chars); |
422 | extern int CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon, | 427 | extern int CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon, |
423 | const unsigned char *fileName, | 428 | const unsigned char *fileName, |
424 | const char *local_acl, const int buflen, const int acl_type, | 429 | const char *local_acl, const int buflen, const int acl_type, |
425 | const struct nls_table *nls_codepage, int remap_special_chars); | 430 | const struct nls_table *nls_codepage, int remap_special_chars); |
426 | extern int CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon, | 431 | extern int CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon, |
427 | const int netfid, __u64 *pExtAttrBits, __u64 *pMask); | 432 | const int netfid, __u64 *pExtAttrBits, __u64 *pMask); |
428 | extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb); | 433 | extern void cifs_autodisable_serverino(struct cifs_sb_info *cifs_sb); |
429 | extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr); | 434 | extern bool CIFSCouldBeMFSymlink(const struct cifs_fattr *fattr); |
@@ -434,4 +439,22 @@ extern int mdfour(unsigned char *, unsigned char *, int); | |||
434 | extern int E_md4hash(const unsigned char *passwd, unsigned char *p16); | 439 | extern int E_md4hash(const unsigned char *passwd, unsigned char *p16); |
435 | extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8, | 440 | extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8, |
436 | unsigned char *p24); | 441 | unsigned char *p24); |
442 | |||
443 | /* asynchronous write support */ | ||
444 | struct cifs_writedata { | ||
445 | struct kref refcount; | ||
446 | enum writeback_sync_modes sync_mode; | ||
447 | struct work_struct work; | ||
448 | struct cifsFileInfo *cfile; | ||
449 | __u64 offset; | ||
450 | unsigned int bytes; | ||
451 | int result; | ||
452 | unsigned int nr_pages; | ||
453 | struct page *pages[1]; | ||
454 | }; | ||
455 | |||
456 | int cifs_async_writev(struct cifs_writedata *wdata); | ||
457 | struct cifs_writedata *cifs_writedata_alloc(unsigned int nr_pages); | ||
458 | void cifs_writedata_release(struct kref *refcount); | ||
459 | |||
437 | #endif /* _CIFSPROTO_H */ | 460 | #endif /* _CIFSPROTO_H */ |
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 83df937b814e..1a9fe7f816d1 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c | |||
@@ -32,6 +32,7 @@ | |||
32 | #include <linux/vfs.h> | 32 | #include <linux/vfs.h> |
33 | #include <linux/slab.h> | 33 | #include <linux/slab.h> |
34 | #include <linux/posix_acl_xattr.h> | 34 | #include <linux/posix_acl_xattr.h> |
35 | #include <linux/pagemap.h> | ||
35 | #include <asm/uaccess.h> | 36 | #include <asm/uaccess.h> |
36 | #include "cifspdu.h" | 37 | #include "cifspdu.h" |
37 | #include "cifsglob.h" | 38 | #include "cifsglob.h" |
@@ -84,7 +85,7 @@ static struct { | |||
84 | 85 | ||
85 | /* Mark as invalid, all open files on tree connections since they | 86 | /* Mark as invalid, all open files on tree connections since they |
86 | were closed when session to server was lost */ | 87 | were closed when session to server was lost */ |
87 | static void mark_open_files_invalid(struct cifsTconInfo *pTcon) | 88 | static void mark_open_files_invalid(struct cifs_tcon *pTcon) |
88 | { | 89 | { |
89 | struct cifsFileInfo *open_file = NULL; | 90 | struct cifsFileInfo *open_file = NULL; |
90 | struct list_head *tmp; | 91 | struct list_head *tmp; |
@@ -104,10 +105,10 @@ static void mark_open_files_invalid(struct cifsTconInfo *pTcon) | |||
104 | 105 | ||
105 | /* reconnect the socket, tcon, and smb session if needed */ | 106 | /* reconnect the socket, tcon, and smb session if needed */ |
106 | static int | 107 | static int |
107 | cifs_reconnect_tcon(struct cifsTconInfo *tcon, int smb_command) | 108 | cifs_reconnect_tcon(struct cifs_tcon *tcon, int smb_command) |
108 | { | 109 | { |
109 | int rc = 0; | 110 | int rc = 0; |
110 | struct cifsSesInfo *ses; | 111 | struct cifs_ses *ses; |
111 | struct TCP_Server_Info *server; | 112 | struct TCP_Server_Info *server; |
112 | struct nls_table *nls_codepage; | 113 | struct nls_table *nls_codepage; |
113 | 114 | ||
@@ -226,7 +227,7 @@ out: | |||
226 | SMB information in the SMB header. If the return code is zero, this | 227 | SMB information in the SMB header. If the return code is zero, this |
227 | function must have filled in request_buf pointer */ | 228 | function must have filled in request_buf pointer */ |
228 | static int | 229 | static int |
229 | small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | 230 | small_smb_init(int smb_command, int wct, struct cifs_tcon *tcon, |
230 | void **request_buf) | 231 | void **request_buf) |
231 | { | 232 | { |
232 | int rc; | 233 | int rc; |
@@ -252,7 +253,7 @@ small_smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | |||
252 | 253 | ||
253 | int | 254 | int |
254 | small_smb_init_no_tc(const int smb_command, const int wct, | 255 | small_smb_init_no_tc(const int smb_command, const int wct, |
255 | struct cifsSesInfo *ses, void **request_buf) | 256 | struct cifs_ses *ses, void **request_buf) |
256 | { | 257 | { |
257 | int rc; | 258 | int rc; |
258 | struct smb_hdr *buffer; | 259 | struct smb_hdr *buffer; |
@@ -278,7 +279,7 @@ small_smb_init_no_tc(const int smb_command, const int wct, | |||
278 | 279 | ||
279 | /* If the return code is zero, this function must fill in request_buf pointer */ | 280 | /* If the return code is zero, this function must fill in request_buf pointer */ |
280 | static int | 281 | static int |
281 | __smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | 282 | __smb_init(int smb_command, int wct, struct cifs_tcon *tcon, |
282 | void **request_buf, void **response_buf) | 283 | void **request_buf, void **response_buf) |
283 | { | 284 | { |
284 | *request_buf = cifs_buf_get(); | 285 | *request_buf = cifs_buf_get(); |
@@ -304,7 +305,7 @@ __smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | |||
304 | 305 | ||
305 | /* If the return code is zero, this function must fill in request_buf pointer */ | 306 | /* If the return code is zero, this function must fill in request_buf pointer */ |
306 | static int | 307 | static int |
307 | smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | 308 | smb_init(int smb_command, int wct, struct cifs_tcon *tcon, |
308 | void **request_buf, void **response_buf) | 309 | void **request_buf, void **response_buf) |
309 | { | 310 | { |
310 | int rc; | 311 | int rc; |
@@ -317,7 +318,7 @@ smb_init(int smb_command, int wct, struct cifsTconInfo *tcon, | |||
317 | } | 318 | } |
318 | 319 | ||
319 | static int | 320 | static int |
320 | smb_init_no_reconnect(int smb_command, int wct, struct cifsTconInfo *tcon, | 321 | smb_init_no_reconnect(int smb_command, int wct, struct cifs_tcon *tcon, |
321 | void **request_buf, void **response_buf) | 322 | void **request_buf, void **response_buf) |
322 | { | 323 | { |
323 | if (tcon->ses->need_reconnect || tcon->need_reconnect) | 324 | if (tcon->ses->need_reconnect || tcon->need_reconnect) |
@@ -366,7 +367,7 @@ static inline void inc_rfc1001_len(void *pSMB, int count) | |||
366 | } | 367 | } |
367 | 368 | ||
368 | int | 369 | int |
369 | CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | 370 | CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses) |
370 | { | 371 | { |
371 | NEGOTIATE_REQ *pSMB; | 372 | NEGOTIATE_REQ *pSMB; |
372 | NEGOTIATE_RSP *pSMBr; | 373 | NEGOTIATE_RSP *pSMBr; |
@@ -450,7 +451,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
450 | rc = -EOPNOTSUPP; | 451 | rc = -EOPNOTSUPP; |
451 | goto neg_err_exit; | 452 | goto neg_err_exit; |
452 | } | 453 | } |
453 | server->secMode = (__u8)le16_to_cpu(rsp->SecurityMode); | 454 | server->sec_mode = (__u8)le16_to_cpu(rsp->SecurityMode); |
454 | server->maxReq = le16_to_cpu(rsp->MaxMpxCount); | 455 | server->maxReq = le16_to_cpu(rsp->MaxMpxCount); |
455 | server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize), | 456 | server->maxBuf = min((__u32)le16_to_cpu(rsp->MaxBufSize), |
456 | (__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); | 457 | (__u32)CIFSMaxBufSize + MAX_CIFS_HDR_SIZE); |
@@ -504,7 +505,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
504 | cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) { | 505 | cpu_to_le16(CIFS_CRYPTO_KEY_SIZE)) { |
505 | memcpy(ses->server->cryptkey, rsp->EncryptionKey, | 506 | memcpy(ses->server->cryptkey, rsp->EncryptionKey, |
506 | CIFS_CRYPTO_KEY_SIZE); | 507 | CIFS_CRYPTO_KEY_SIZE); |
507 | } else if (server->secMode & SECMODE_PW_ENCRYPT) { | 508 | } else if (server->sec_mode & SECMODE_PW_ENCRYPT) { |
508 | rc = -EIO; /* need cryptkey unless plain text */ | 509 | rc = -EIO; /* need cryptkey unless plain text */ |
509 | goto neg_err_exit; | 510 | goto neg_err_exit; |
510 | } | 511 | } |
@@ -526,11 +527,11 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
526 | goto neg_err_exit; | 527 | goto neg_err_exit; |
527 | } | 528 | } |
528 | /* else wct == 17 NTLM */ | 529 | /* else wct == 17 NTLM */ |
529 | server->secMode = pSMBr->SecurityMode; | 530 | server->sec_mode = pSMBr->SecurityMode; |
530 | if ((server->secMode & SECMODE_USER) == 0) | 531 | if ((server->sec_mode & SECMODE_USER) == 0) |
531 | cFYI(1, "share mode security"); | 532 | cFYI(1, "share mode security"); |
532 | 533 | ||
533 | if ((server->secMode & SECMODE_PW_ENCRYPT) == 0) | 534 | if ((server->sec_mode & SECMODE_PW_ENCRYPT) == 0) |
534 | #ifdef CONFIG_CIFS_WEAK_PW_HASH | 535 | #ifdef CONFIG_CIFS_WEAK_PW_HASH |
535 | if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0) | 536 | if ((secFlags & CIFSSEC_MAY_PLNTXT) == 0) |
536 | #endif /* CIFS_WEAK_PW_HASH */ | 537 | #endif /* CIFS_WEAK_PW_HASH */ |
@@ -570,18 +571,10 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
570 | if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { | 571 | if (pSMBr->EncryptionKeyLength == CIFS_CRYPTO_KEY_SIZE) { |
571 | memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey, | 572 | memcpy(ses->server->cryptkey, pSMBr->u.EncryptionKey, |
572 | CIFS_CRYPTO_KEY_SIZE); | 573 | CIFS_CRYPTO_KEY_SIZE); |
573 | } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) | 574 | } else if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC || |
574 | && (pSMBr->EncryptionKeyLength == 0)) { | 575 | server->capabilities & CAP_EXTENDED_SECURITY) && |
576 | (pSMBr->EncryptionKeyLength == 0)) { | ||
575 | /* decode security blob */ | 577 | /* decode security blob */ |
576 | } else if (server->secMode & SECMODE_PW_ENCRYPT) { | ||
577 | rc = -EIO; /* no crypt key only if plain text pwd */ | ||
578 | goto neg_err_exit; | ||
579 | } | ||
580 | |||
581 | /* BB might be helpful to save off the domain of server here */ | ||
582 | |||
583 | if ((pSMBr->hdr.Flags2 & SMBFLG2_EXT_SEC) && | ||
584 | (server->capabilities & CAP_EXTENDED_SECURITY)) { | ||
585 | count = get_bcc(&pSMBr->hdr); | 578 | count = get_bcc(&pSMBr->hdr); |
586 | if (count < 16) { | 579 | if (count < 16) { |
587 | rc = -EIO; | 580 | rc = -EIO; |
@@ -624,6 +617,9 @@ CIFSSMBNegotiate(unsigned int xid, struct cifsSesInfo *ses) | |||
624 | } else | 617 | } else |
625 | rc = -EOPNOTSUPP; | 618 | rc = -EOPNOTSUPP; |
626 | } | 619 | } |
620 | } else if (server->sec_mode & SECMODE_PW_ENCRYPT) { | ||
621 | rc = -EIO; /* no crypt key only if plain text pwd */ | ||
622 | goto neg_err_exit; | ||
627 | } else | 623 | } else |
628 | server->capabilities &= ~CAP_EXTENDED_SECURITY; | 624 | server->capabilities &= ~CAP_EXTENDED_SECURITY; |
629 | 625 | ||
@@ -634,27 +630,27 @@ signing_check: | |||
634 | /* MUST_SIGN already includes the MAY_SIGN FLAG | 630 | /* MUST_SIGN already includes the MAY_SIGN FLAG |
635 | so if this is zero it means that signing is disabled */ | 631 | so if this is zero it means that signing is disabled */ |
636 | cFYI(1, "Signing disabled"); | 632 | cFYI(1, "Signing disabled"); |
637 | if (server->secMode & SECMODE_SIGN_REQUIRED) { | 633 | if (server->sec_mode & SECMODE_SIGN_REQUIRED) { |
638 | cERROR(1, "Server requires " | 634 | cERROR(1, "Server requires " |
639 | "packet signing to be enabled in " | 635 | "packet signing to be enabled in " |
640 | "/proc/fs/cifs/SecurityFlags."); | 636 | "/proc/fs/cifs/SecurityFlags."); |
641 | rc = -EOPNOTSUPP; | 637 | rc = -EOPNOTSUPP; |
642 | } | 638 | } |
643 | server->secMode &= | 639 | server->sec_mode &= |
644 | ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); | 640 | ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); |
645 | } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) { | 641 | } else if ((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) { |
646 | /* signing required */ | 642 | /* signing required */ |
647 | cFYI(1, "Must sign - secFlags 0x%x", secFlags); | 643 | cFYI(1, "Must sign - secFlags 0x%x", secFlags); |
648 | if ((server->secMode & | 644 | if ((server->sec_mode & |
649 | (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) { | 645 | (SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED)) == 0) { |
650 | cERROR(1, "signing required but server lacks support"); | 646 | cERROR(1, "signing required but server lacks support"); |
651 | rc = -EOPNOTSUPP; | 647 | rc = -EOPNOTSUPP; |
652 | } else | 648 | } else |
653 | server->secMode |= SECMODE_SIGN_REQUIRED; | 649 | server->sec_mode |= SECMODE_SIGN_REQUIRED; |
654 | } else { | 650 | } else { |
655 | /* signing optional ie CIFSSEC_MAY_SIGN */ | 651 | /* signing optional ie CIFSSEC_MAY_SIGN */ |
656 | if ((server->secMode & SECMODE_SIGN_REQUIRED) == 0) | 652 | if ((server->sec_mode & SECMODE_SIGN_REQUIRED) == 0) |
657 | server->secMode &= | 653 | server->sec_mode &= |
658 | ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); | 654 | ~(SECMODE_SIGN_ENABLED | SECMODE_SIGN_REQUIRED); |
659 | } | 655 | } |
660 | 656 | ||
@@ -666,7 +662,7 @@ neg_err_exit: | |||
666 | } | 662 | } |
667 | 663 | ||
668 | int | 664 | int |
669 | CIFSSMBTDis(const int xid, struct cifsTconInfo *tcon) | 665 | CIFSSMBTDis(const int xid, struct cifs_tcon *tcon) |
670 | { | 666 | { |
671 | struct smb_hdr *smb_buffer; | 667 | struct smb_hdr *smb_buffer; |
672 | int rc = 0; | 668 | int rc = 0; |
@@ -725,6 +721,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server) | |||
725 | { | 721 | { |
726 | ECHO_REQ *smb; | 722 | ECHO_REQ *smb; |
727 | int rc = 0; | 723 | int rc = 0; |
724 | struct kvec iov; | ||
728 | 725 | ||
729 | cFYI(1, "In echo request"); | 726 | cFYI(1, "In echo request"); |
730 | 727 | ||
@@ -739,9 +736,10 @@ CIFSSMBEcho(struct TCP_Server_Info *server) | |||
739 | put_bcc(1, &smb->hdr); | 736 | put_bcc(1, &smb->hdr); |
740 | smb->Data[0] = 'a'; | 737 | smb->Data[0] = 'a'; |
741 | inc_rfc1001_len(smb, 3); | 738 | inc_rfc1001_len(smb, 3); |
739 | iov.iov_base = smb; | ||
740 | iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; | ||
742 | 741 | ||
743 | rc = cifs_call_async(server, (struct smb_hdr *)smb, | 742 | rc = cifs_call_async(server, &iov, 1, cifs_echo_callback, server, true); |
744 | cifs_echo_callback, server); | ||
745 | if (rc) | 743 | if (rc) |
746 | cFYI(1, "Echo request failed: %d", rc); | 744 | cFYI(1, "Echo request failed: %d", rc); |
747 | 745 | ||
@@ -751,7 +749,7 @@ CIFSSMBEcho(struct TCP_Server_Info *server) | |||
751 | } | 749 | } |
752 | 750 | ||
753 | int | 751 | int |
754 | CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses) | 752 | CIFSSMBLogoff(const int xid, struct cifs_ses *ses) |
755 | { | 753 | { |
756 | LOGOFF_ANDX_REQ *pSMB; | 754 | LOGOFF_ANDX_REQ *pSMB; |
757 | int rc = 0; | 755 | int rc = 0; |
@@ -778,7 +776,7 @@ CIFSSMBLogoff(const int xid, struct cifsSesInfo *ses) | |||
778 | 776 | ||
779 | pSMB->hdr.Mid = GetNextMid(ses->server); | 777 | pSMB->hdr.Mid = GetNextMid(ses->server); |
780 | 778 | ||
781 | if (ses->server->secMode & | 779 | if (ses->server->sec_mode & |
782 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | 780 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) |
783 | pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; | 781 | pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; |
784 | 782 | ||
@@ -798,7 +796,7 @@ session_already_dead: | |||
798 | } | 796 | } |
799 | 797 | ||
800 | int | 798 | int |
801 | CIFSPOSIXDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName, | 799 | CIFSPOSIXDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName, |
802 | __u16 type, const struct nls_table *nls_codepage, int remap) | 800 | __u16 type, const struct nls_table *nls_codepage, int remap) |
803 | { | 801 | { |
804 | TRANSACTION2_SPI_REQ *pSMB = NULL; | 802 | TRANSACTION2_SPI_REQ *pSMB = NULL; |
@@ -873,7 +871,7 @@ PsxDelete: | |||
873 | } | 871 | } |
874 | 872 | ||
875 | int | 873 | int |
876 | CIFSSMBDelFile(const int xid, struct cifsTconInfo *tcon, const char *fileName, | 874 | CIFSSMBDelFile(const int xid, struct cifs_tcon *tcon, const char *fileName, |
877 | const struct nls_table *nls_codepage, int remap) | 875 | const struct nls_table *nls_codepage, int remap) |
878 | { | 876 | { |
879 | DELETE_FILE_REQ *pSMB = NULL; | 877 | DELETE_FILE_REQ *pSMB = NULL; |
@@ -918,7 +916,7 @@ DelFileRetry: | |||
918 | } | 916 | } |
919 | 917 | ||
920 | int | 918 | int |
921 | CIFSSMBRmDir(const int xid, struct cifsTconInfo *tcon, const char *dirName, | 919 | CIFSSMBRmDir(const int xid, struct cifs_tcon *tcon, const char *dirName, |
922 | const struct nls_table *nls_codepage, int remap) | 920 | const struct nls_table *nls_codepage, int remap) |
923 | { | 921 | { |
924 | DELETE_DIRECTORY_REQ *pSMB = NULL; | 922 | DELETE_DIRECTORY_REQ *pSMB = NULL; |
@@ -961,7 +959,7 @@ RmDirRetry: | |||
961 | } | 959 | } |
962 | 960 | ||
963 | int | 961 | int |
964 | CIFSSMBMkDir(const int xid, struct cifsTconInfo *tcon, | 962 | CIFSSMBMkDir(const int xid, struct cifs_tcon *tcon, |
965 | const char *name, const struct nls_table *nls_codepage, int remap) | 963 | const char *name, const struct nls_table *nls_codepage, int remap) |
966 | { | 964 | { |
967 | int rc = 0; | 965 | int rc = 0; |
@@ -1004,7 +1002,7 @@ MkDirRetry: | |||
1004 | } | 1002 | } |
1005 | 1003 | ||
1006 | int | 1004 | int |
1007 | CIFSPOSIXCreate(const int xid, struct cifsTconInfo *tcon, __u32 posix_flags, | 1005 | CIFSPOSIXCreate(const int xid, struct cifs_tcon *tcon, __u32 posix_flags, |
1008 | __u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData, | 1006 | __u64 mode, __u16 *netfid, FILE_UNIX_BASIC_INFO *pRetData, |
1009 | __u32 *pOplock, const char *name, | 1007 | __u32 *pOplock, const char *name, |
1010 | const struct nls_table *nls_codepage, int remap) | 1008 | const struct nls_table *nls_codepage, int remap) |
@@ -1170,7 +1168,7 @@ access_flags_to_smbopen_mode(const int access_flags) | |||
1170 | } | 1168 | } |
1171 | 1169 | ||
1172 | int | 1170 | int |
1173 | SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon, | 1171 | SMBLegacyOpen(const int xid, struct cifs_tcon *tcon, |
1174 | const char *fileName, const int openDisposition, | 1172 | const char *fileName, const int openDisposition, |
1175 | const int access_flags, const int create_options, __u16 *netfid, | 1173 | const int access_flags, const int create_options, __u16 *netfid, |
1176 | int *pOplock, FILE_ALL_INFO *pfile_info, | 1174 | int *pOplock, FILE_ALL_INFO *pfile_info, |
@@ -1277,7 +1275,7 @@ OldOpenRetry: | |||
1277 | } | 1275 | } |
1278 | 1276 | ||
1279 | int | 1277 | int |
1280 | CIFSSMBOpen(const int xid, struct cifsTconInfo *tcon, | 1278 | CIFSSMBOpen(const int xid, struct cifs_tcon *tcon, |
1281 | const char *fileName, const int openDisposition, | 1279 | const char *fileName, const int openDisposition, |
1282 | const int access_flags, const int create_options, __u16 *netfid, | 1280 | const int access_flags, const int create_options, __u16 *netfid, |
1283 | int *pOplock, FILE_ALL_INFO *pfile_info, | 1281 | int *pOplock, FILE_ALL_INFO *pfile_info, |
@@ -1379,8 +1377,7 @@ openRetry: | |||
1379 | } | 1377 | } |
1380 | 1378 | ||
1381 | int | 1379 | int |
1382 | CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid, | 1380 | CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, |
1383 | const unsigned int count, const __u64 lseek, unsigned int *nbytes, | ||
1384 | char **buf, int *pbuf_type) | 1381 | char **buf, int *pbuf_type) |
1385 | { | 1382 | { |
1386 | int rc = -EACCES; | 1383 | int rc = -EACCES; |
@@ -1390,13 +1387,18 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid, | |||
1390 | int wct; | 1387 | int wct; |
1391 | int resp_buf_type = 0; | 1388 | int resp_buf_type = 0; |
1392 | struct kvec iov[1]; | 1389 | struct kvec iov[1]; |
1390 | __u32 pid = io_parms->pid; | ||
1391 | __u16 netfid = io_parms->netfid; | ||
1392 | __u64 offset = io_parms->offset; | ||
1393 | struct cifs_tcon *tcon = io_parms->tcon; | ||
1394 | unsigned int count = io_parms->length; | ||
1393 | 1395 | ||
1394 | cFYI(1, "Reading %d bytes on fid %d", count, netfid); | 1396 | cFYI(1, "Reading %d bytes on fid %d", count, netfid); |
1395 | if (tcon->ses->capabilities & CAP_LARGE_FILES) | 1397 | if (tcon->ses->capabilities & CAP_LARGE_FILES) |
1396 | wct = 12; | 1398 | wct = 12; |
1397 | else { | 1399 | else { |
1398 | wct = 10; /* old style read */ | 1400 | wct = 10; /* old style read */ |
1399 | if ((lseek >> 32) > 0) { | 1401 | if ((offset >> 32) > 0) { |
1400 | /* can not handle this big offset for old */ | 1402 | /* can not handle this big offset for old */ |
1401 | return -EIO; | 1403 | return -EIO; |
1402 | } | 1404 | } |
@@ -1407,15 +1409,18 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid, | |||
1407 | if (rc) | 1409 | if (rc) |
1408 | return rc; | 1410 | return rc; |
1409 | 1411 | ||
1412 | pSMB->hdr.Pid = cpu_to_le16((__u16)pid); | ||
1413 | pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); | ||
1414 | |||
1410 | /* tcon and ses pointer are checked in smb_init */ | 1415 | /* tcon and ses pointer are checked in smb_init */ |
1411 | if (tcon->ses->server == NULL) | 1416 | if (tcon->ses->server == NULL) |
1412 | return -ECONNABORTED; | 1417 | return -ECONNABORTED; |
1413 | 1418 | ||
1414 | pSMB->AndXCommand = 0xFF; /* none */ | 1419 | pSMB->AndXCommand = 0xFF; /* none */ |
1415 | pSMB->Fid = netfid; | 1420 | pSMB->Fid = netfid; |
1416 | pSMB->OffsetLow = cpu_to_le32(lseek & 0xFFFFFFFF); | 1421 | pSMB->OffsetLow = cpu_to_le32(offset & 0xFFFFFFFF); |
1417 | if (wct == 12) | 1422 | if (wct == 12) |
1418 | pSMB->OffsetHigh = cpu_to_le32(lseek >> 32); | 1423 | pSMB->OffsetHigh = cpu_to_le32(offset >> 32); |
1419 | 1424 | ||
1420 | pSMB->Remaining = 0; | 1425 | pSMB->Remaining = 0; |
1421 | pSMB->MaxCount = cpu_to_le16(count & 0xFFFF); | 1426 | pSMB->MaxCount = cpu_to_le16(count & 0xFFFF); |
@@ -1484,9 +1489,8 @@ CIFSSMBRead(const int xid, struct cifsTconInfo *tcon, const int netfid, | |||
1484 | 1489 | ||
1485 | 1490 | ||
1486 | int | 1491 | int |
1487 | CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, | 1492 | CIFSSMBWrite(const int xid, struct cifs_io_parms *io_parms, |
1488 | const int netfid, const unsigned int count, | 1493 | unsigned int *nbytes, const char *buf, |
1489 | const __u64 offset, unsigned int *nbytes, const char *buf, | ||
1490 | const char __user *ubuf, const int long_op) | 1494 | const char __user *ubuf, const int long_op) |
1491 | { | 1495 | { |
1492 | int rc = -EACCES; | 1496 | int rc = -EACCES; |
@@ -1495,6 +1499,11 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, | |||
1495 | int bytes_returned, wct; | 1499 | int bytes_returned, wct; |
1496 | __u32 bytes_sent; | 1500 | __u32 bytes_sent; |
1497 | __u16 byte_count; | 1501 | __u16 byte_count; |
1502 | __u32 pid = io_parms->pid; | ||
1503 | __u16 netfid = io_parms->netfid; | ||
1504 | __u64 offset = io_parms->offset; | ||
1505 | struct cifs_tcon *tcon = io_parms->tcon; | ||
1506 | unsigned int count = io_parms->length; | ||
1498 | 1507 | ||
1499 | *nbytes = 0; | 1508 | *nbytes = 0; |
1500 | 1509 | ||
@@ -1516,6 +1525,10 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, | |||
1516 | (void **) &pSMBr); | 1525 | (void **) &pSMBr); |
1517 | if (rc) | 1526 | if (rc) |
1518 | return rc; | 1527 | return rc; |
1528 | |||
1529 | pSMB->hdr.Pid = cpu_to_le16((__u16)pid); | ||
1530 | pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); | ||
1531 | |||
1519 | /* tcon and ses pointer are checked in smb_init */ | 1532 | /* tcon and ses pointer are checked in smb_init */ |
1520 | if (tcon->ses->server == NULL) | 1533 | if (tcon->ses->server == NULL) |
1521 | return -ECONNABORTED; | 1534 | return -ECONNABORTED; |
@@ -1602,17 +1615,259 @@ CIFSSMBWrite(const int xid, struct cifsTconInfo *tcon, | |||
1602 | return rc; | 1615 | return rc; |
1603 | } | 1616 | } |
1604 | 1617 | ||
1618 | void | ||
1619 | cifs_writedata_release(struct kref *refcount) | ||
1620 | { | ||
1621 | struct cifs_writedata *wdata = container_of(refcount, | ||
1622 | struct cifs_writedata, refcount); | ||
1623 | |||
1624 | if (wdata->cfile) | ||
1625 | cifsFileInfo_put(wdata->cfile); | ||
1626 | |||
1627 | kfree(wdata); | ||
1628 | } | ||
1629 | |||
1630 | /* | ||
1631 | * Write failed with a retryable error. Resend the write request. It's also | ||
1632 | * possible that the page was redirtied so re-clean the page. | ||
1633 | */ | ||
1634 | static void | ||
1635 | cifs_writev_requeue(struct cifs_writedata *wdata) | ||
1636 | { | ||
1637 | int i, rc; | ||
1638 | struct inode *inode = wdata->cfile->dentry->d_inode; | ||
1639 | |||
1640 | for (i = 0; i < wdata->nr_pages; i++) { | ||
1641 | lock_page(wdata->pages[i]); | ||
1642 | clear_page_dirty_for_io(wdata->pages[i]); | ||
1643 | } | ||
1644 | |||
1645 | do { | ||
1646 | rc = cifs_async_writev(wdata); | ||
1647 | } while (rc == -EAGAIN); | ||
1648 | |||
1649 | for (i = 0; i < wdata->nr_pages; i++) { | ||
1650 | if (rc != 0) | ||
1651 | SetPageError(wdata->pages[i]); | ||
1652 | unlock_page(wdata->pages[i]); | ||
1653 | } | ||
1654 | |||
1655 | mapping_set_error(inode->i_mapping, rc); | ||
1656 | kref_put(&wdata->refcount, cifs_writedata_release); | ||
1657 | } | ||
1658 | |||
1659 | static void | ||
1660 | cifs_writev_complete(struct work_struct *work) | ||
1661 | { | ||
1662 | struct cifs_writedata *wdata = container_of(work, | ||
1663 | struct cifs_writedata, work); | ||
1664 | struct inode *inode = wdata->cfile->dentry->d_inode; | ||
1665 | int i = 0; | ||
1666 | |||
1667 | if (wdata->result == 0) { | ||
1668 | cifs_update_eof(CIFS_I(inode), wdata->offset, wdata->bytes); | ||
1669 | cifs_stats_bytes_written(tlink_tcon(wdata->cfile->tlink), | ||
1670 | wdata->bytes); | ||
1671 | } else if (wdata->sync_mode == WB_SYNC_ALL && wdata->result == -EAGAIN) | ||
1672 | return cifs_writev_requeue(wdata); | ||
1673 | |||
1674 | for (i = 0; i < wdata->nr_pages; i++) { | ||
1675 | struct page *page = wdata->pages[i]; | ||
1676 | if (wdata->result == -EAGAIN) | ||
1677 | __set_page_dirty_nobuffers(page); | ||
1678 | else if (wdata->result < 0) | ||
1679 | SetPageError(page); | ||
1680 | end_page_writeback(page); | ||
1681 | page_cache_release(page); | ||
1682 | } | ||
1683 | if (wdata->result != -EAGAIN) | ||
1684 | mapping_set_error(inode->i_mapping, wdata->result); | ||
1685 | kref_put(&wdata->refcount, cifs_writedata_release); | ||
1686 | } | ||
1687 | |||
1688 | struct cifs_writedata * | ||
1689 | cifs_writedata_alloc(unsigned int nr_pages) | ||
1690 | { | ||
1691 | struct cifs_writedata *wdata; | ||
1692 | |||
1693 | /* this would overflow */ | ||
1694 | if (nr_pages == 0) { | ||
1695 | cERROR(1, "%s: called with nr_pages == 0!", __func__); | ||
1696 | return NULL; | ||
1697 | } | ||
1698 | |||
1699 | /* writedata + number of page pointers */ | ||
1700 | wdata = kzalloc(sizeof(*wdata) + | ||
1701 | sizeof(struct page *) * (nr_pages - 1), GFP_NOFS); | ||
1702 | if (wdata != NULL) { | ||
1703 | INIT_WORK(&wdata->work, cifs_writev_complete); | ||
1704 | kref_init(&wdata->refcount); | ||
1705 | } | ||
1706 | return wdata; | ||
1707 | } | ||
1708 | |||
1709 | /* | ||
1710 | * Check the midState and signature on received buffer (if any), and queue the | ||
1711 | * workqueue completion task. | ||
1712 | */ | ||
1713 | static void | ||
1714 | cifs_writev_callback(struct mid_q_entry *mid) | ||
1715 | { | ||
1716 | struct cifs_writedata *wdata = mid->callback_data; | ||
1717 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); | ||
1718 | unsigned int written; | ||
1719 | WRITE_RSP *smb = (WRITE_RSP *)mid->resp_buf; | ||
1720 | |||
1721 | switch (mid->midState) { | ||
1722 | case MID_RESPONSE_RECEIVED: | ||
1723 | wdata->result = cifs_check_receive(mid, tcon->ses->server, 0); | ||
1724 | if (wdata->result != 0) | ||
1725 | break; | ||
1726 | |||
1727 | written = le16_to_cpu(smb->CountHigh); | ||
1728 | written <<= 16; | ||
1729 | written += le16_to_cpu(smb->Count); | ||
1730 | /* | ||
1731 | * Mask off high 16 bits when bytes written as returned | ||
1732 | * by the server is greater than bytes requested by the | ||
1733 | * client. OS/2 servers are known to set incorrect | ||
1734 | * CountHigh values. | ||
1735 | */ | ||
1736 | if (written > wdata->bytes) | ||
1737 | written &= 0xFFFF; | ||
1738 | |||
1739 | if (written < wdata->bytes) | ||
1740 | wdata->result = -ENOSPC; | ||
1741 | else | ||
1742 | wdata->bytes = written; | ||
1743 | break; | ||
1744 | case MID_REQUEST_SUBMITTED: | ||
1745 | case MID_RETRY_NEEDED: | ||
1746 | wdata->result = -EAGAIN; | ||
1747 | break; | ||
1748 | default: | ||
1749 | wdata->result = -EIO; | ||
1750 | break; | ||
1751 | } | ||
1752 | |||
1753 | queue_work(system_nrt_wq, &wdata->work); | ||
1754 | DeleteMidQEntry(mid); | ||
1755 | atomic_dec(&tcon->ses->server->inFlight); | ||
1756 | wake_up(&tcon->ses->server->request_q); | ||
1757 | } | ||
1758 | |||
1759 | /* cifs_async_writev - send an async write, and set up mid to handle result */ | ||
1760 | int | ||
1761 | cifs_async_writev(struct cifs_writedata *wdata) | ||
1762 | { | ||
1763 | int i, rc = -EACCES; | ||
1764 | WRITE_REQ *smb = NULL; | ||
1765 | int wct; | ||
1766 | struct cifs_tcon *tcon = tlink_tcon(wdata->cfile->tlink); | ||
1767 | struct inode *inode = wdata->cfile->dentry->d_inode; | ||
1768 | struct kvec *iov = NULL; | ||
1769 | |||
1770 | if (tcon->ses->capabilities & CAP_LARGE_FILES) { | ||
1771 | wct = 14; | ||
1772 | } else { | ||
1773 | wct = 12; | ||
1774 | if (wdata->offset >> 32 > 0) { | ||
1775 | /* can not handle big offset for old srv */ | ||
1776 | return -EIO; | ||
1777 | } | ||
1778 | } | ||
1779 | |||
1780 | rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **)&smb); | ||
1781 | if (rc) | ||
1782 | goto async_writev_out; | ||
1783 | |||
1784 | /* 1 iov per page + 1 for header */ | ||
1785 | iov = kzalloc((wdata->nr_pages + 1) * sizeof(*iov), GFP_NOFS); | ||
1786 | if (iov == NULL) { | ||
1787 | rc = -ENOMEM; | ||
1788 | goto async_writev_out; | ||
1789 | } | ||
1790 | |||
1791 | smb->hdr.Pid = cpu_to_le16((__u16)wdata->cfile->pid); | ||
1792 | smb->hdr.PidHigh = cpu_to_le16((__u16)(wdata->cfile->pid >> 16)); | ||
1793 | |||
1794 | smb->AndXCommand = 0xFF; /* none */ | ||
1795 | smb->Fid = wdata->cfile->netfid; | ||
1796 | smb->OffsetLow = cpu_to_le32(wdata->offset & 0xFFFFFFFF); | ||
1797 | if (wct == 14) | ||
1798 | smb->OffsetHigh = cpu_to_le32(wdata->offset >> 32); | ||
1799 | smb->Reserved = 0xFFFFFFFF; | ||
1800 | smb->WriteMode = 0; | ||
1801 | smb->Remaining = 0; | ||
1802 | |||
1803 | smb->DataOffset = | ||
1804 | cpu_to_le16(offsetof(struct smb_com_write_req, Data) - 4); | ||
1805 | |||
1806 | /* 4 for RFC1001 length + 1 for BCC */ | ||
1807 | iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4 + 1; | ||
1808 | iov[0].iov_base = smb; | ||
1809 | |||
1810 | /* marshal up the pages into iov array */ | ||
1811 | wdata->bytes = 0; | ||
1812 | for (i = 0; i < wdata->nr_pages; i++) { | ||
1813 | iov[i + 1].iov_len = min(inode->i_size - | ||
1814 | page_offset(wdata->pages[i]), | ||
1815 | (loff_t)PAGE_CACHE_SIZE); | ||
1816 | iov[i + 1].iov_base = kmap(wdata->pages[i]); | ||
1817 | wdata->bytes += iov[i + 1].iov_len; | ||
1818 | } | ||
1819 | |||
1820 | cFYI(1, "async write at %llu %u bytes", wdata->offset, wdata->bytes); | ||
1821 | |||
1822 | smb->DataLengthLow = cpu_to_le16(wdata->bytes & 0xFFFF); | ||
1823 | smb->DataLengthHigh = cpu_to_le16(wdata->bytes >> 16); | ||
1824 | |||
1825 | if (wct == 14) { | ||
1826 | inc_rfc1001_len(&smb->hdr, wdata->bytes + 1); | ||
1827 | put_bcc(wdata->bytes + 1, &smb->hdr); | ||
1828 | } else { | ||
1829 | /* wct == 12 */ | ||
1830 | struct smb_com_writex_req *smbw = | ||
1831 | (struct smb_com_writex_req *)smb; | ||
1832 | inc_rfc1001_len(&smbw->hdr, wdata->bytes + 5); | ||
1833 | put_bcc(wdata->bytes + 5, &smbw->hdr); | ||
1834 | iov[0].iov_len += 4; /* pad bigger by four bytes */ | ||
1835 | } | ||
1836 | |||
1837 | kref_get(&wdata->refcount); | ||
1838 | rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1, | ||
1839 | cifs_writev_callback, wdata, false); | ||
1840 | |||
1841 | if (rc == 0) | ||
1842 | cifs_stats_inc(&tcon->num_writes); | ||
1843 | else | ||
1844 | kref_put(&wdata->refcount, cifs_writedata_release); | ||
1845 | |||
1846 | /* send is done, unmap pages */ | ||
1847 | for (i = 0; i < wdata->nr_pages; i++) | ||
1848 | kunmap(wdata->pages[i]); | ||
1849 | |||
1850 | async_writev_out: | ||
1851 | cifs_small_buf_release(smb); | ||
1852 | kfree(iov); | ||
1853 | return rc; | ||
1854 | } | ||
1855 | |||
1605 | int | 1856 | int |
1606 | CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, | 1857 | CIFSSMBWrite2(const int xid, struct cifs_io_parms *io_parms, |
1607 | const int netfid, const unsigned int count, | 1858 | unsigned int *nbytes, struct kvec *iov, int n_vec, |
1608 | const __u64 offset, unsigned int *nbytes, struct kvec *iov, | 1859 | const int long_op) |
1609 | int n_vec, const int long_op) | ||
1610 | { | 1860 | { |
1611 | int rc = -EACCES; | 1861 | int rc = -EACCES; |
1612 | WRITE_REQ *pSMB = NULL; | 1862 | WRITE_REQ *pSMB = NULL; |
1613 | int wct; | 1863 | int wct; |
1614 | int smb_hdr_len; | 1864 | int smb_hdr_len; |
1615 | int resp_buf_type = 0; | 1865 | int resp_buf_type = 0; |
1866 | __u32 pid = io_parms->pid; | ||
1867 | __u16 netfid = io_parms->netfid; | ||
1868 | __u64 offset = io_parms->offset; | ||
1869 | struct cifs_tcon *tcon = io_parms->tcon; | ||
1870 | unsigned int count = io_parms->length; | ||
1616 | 1871 | ||
1617 | *nbytes = 0; | 1872 | *nbytes = 0; |
1618 | 1873 | ||
@@ -1630,6 +1885,10 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, | |||
1630 | rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB); | 1885 | rc = small_smb_init(SMB_COM_WRITE_ANDX, wct, tcon, (void **) &pSMB); |
1631 | if (rc) | 1886 | if (rc) |
1632 | return rc; | 1887 | return rc; |
1888 | |||
1889 | pSMB->hdr.Pid = cpu_to_le16((__u16)pid); | ||
1890 | pSMB->hdr.PidHigh = cpu_to_le16((__u16)(pid >> 16)); | ||
1891 | |||
1633 | /* tcon and ses pointer are checked in smb_init */ | 1892 | /* tcon and ses pointer are checked in smb_init */ |
1634 | if (tcon->ses->server == NULL) | 1893 | if (tcon->ses->server == NULL) |
1635 | return -ECONNABORTED; | 1894 | return -ECONNABORTED; |
@@ -1705,7 +1964,7 @@ CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon, | |||
1705 | 1964 | ||
1706 | 1965 | ||
1707 | int | 1966 | int |
1708 | CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, | 1967 | CIFSSMBLock(const int xid, struct cifs_tcon *tcon, |
1709 | const __u16 smb_file_id, const __u64 len, | 1968 | const __u16 smb_file_id, const __u64 len, |
1710 | const __u64 offset, const __u32 numUnlock, | 1969 | const __u64 offset, const __u32 numUnlock, |
1711 | const __u32 numLock, const __u8 lockType, | 1970 | const __u32 numLock, const __u8 lockType, |
@@ -1775,7 +2034,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon, | |||
1775 | } | 2034 | } |
1776 | 2035 | ||
1777 | int | 2036 | int |
1778 | CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon, | 2037 | CIFSSMBPosixLock(const int xid, struct cifs_tcon *tcon, |
1779 | const __u16 smb_file_id, const int get_flag, const __u64 len, | 2038 | const __u16 smb_file_id, const int get_flag, const __u64 len, |
1780 | struct file_lock *pLockData, const __u16 lock_type, | 2039 | struct file_lock *pLockData, const __u16 lock_type, |
1781 | const bool waitFlag) | 2040 | const bool waitFlag) |
@@ -1913,7 +2172,7 @@ plk_err_exit: | |||
1913 | 2172 | ||
1914 | 2173 | ||
1915 | int | 2174 | int |
1916 | CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id) | 2175 | CIFSSMBClose(const int xid, struct cifs_tcon *tcon, int smb_file_id) |
1917 | { | 2176 | { |
1918 | int rc = 0; | 2177 | int rc = 0; |
1919 | CLOSE_REQ *pSMB = NULL; | 2178 | CLOSE_REQ *pSMB = NULL; |
@@ -1946,7 +2205,7 @@ CIFSSMBClose(const int xid, struct cifsTconInfo *tcon, int smb_file_id) | |||
1946 | } | 2205 | } |
1947 | 2206 | ||
1948 | int | 2207 | int |
1949 | CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id) | 2208 | CIFSSMBFlush(const int xid, struct cifs_tcon *tcon, int smb_file_id) |
1950 | { | 2209 | { |
1951 | int rc = 0; | 2210 | int rc = 0; |
1952 | FLUSH_REQ *pSMB = NULL; | 2211 | FLUSH_REQ *pSMB = NULL; |
@@ -1967,7 +2226,7 @@ CIFSSMBFlush(const int xid, struct cifsTconInfo *tcon, int smb_file_id) | |||
1967 | } | 2226 | } |
1968 | 2227 | ||
1969 | int | 2228 | int |
1970 | CIFSSMBRename(const int xid, struct cifsTconInfo *tcon, | 2229 | CIFSSMBRename(const int xid, struct cifs_tcon *tcon, |
1971 | const char *fromName, const char *toName, | 2230 | const char *fromName, const char *toName, |
1972 | const struct nls_table *nls_codepage, int remap) | 2231 | const struct nls_table *nls_codepage, int remap) |
1973 | { | 2232 | { |
@@ -2034,7 +2293,7 @@ renameRetry: | |||
2034 | return rc; | 2293 | return rc; |
2035 | } | 2294 | } |
2036 | 2295 | ||
2037 | int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon, | 2296 | int CIFSSMBRenameOpenFile(const int xid, struct cifs_tcon *pTcon, |
2038 | int netfid, const char *target_name, | 2297 | int netfid, const char *target_name, |
2039 | const struct nls_table *nls_codepage, int remap) | 2298 | const struct nls_table *nls_codepage, int remap) |
2040 | { | 2299 | { |
@@ -2114,7 +2373,7 @@ int CIFSSMBRenameOpenFile(const int xid, struct cifsTconInfo *pTcon, | |||
2114 | } | 2373 | } |
2115 | 2374 | ||
2116 | int | 2375 | int |
2117 | CIFSSMBCopy(const int xid, struct cifsTconInfo *tcon, const char *fromName, | 2376 | CIFSSMBCopy(const int xid, struct cifs_tcon *tcon, const char *fromName, |
2118 | const __u16 target_tid, const char *toName, const int flags, | 2377 | const __u16 target_tid, const char *toName, const int flags, |
2119 | const struct nls_table *nls_codepage, int remap) | 2378 | const struct nls_table *nls_codepage, int remap) |
2120 | { | 2379 | { |
@@ -2182,7 +2441,7 @@ copyRetry: | |||
2182 | } | 2441 | } |
2183 | 2442 | ||
2184 | int | 2443 | int |
2185 | CIFSUnixCreateSymLink(const int xid, struct cifsTconInfo *tcon, | 2444 | CIFSUnixCreateSymLink(const int xid, struct cifs_tcon *tcon, |
2186 | const char *fromName, const char *toName, | 2445 | const char *fromName, const char *toName, |
2187 | const struct nls_table *nls_codepage) | 2446 | const struct nls_table *nls_codepage) |
2188 | { | 2447 | { |
@@ -2271,7 +2530,7 @@ createSymLinkRetry: | |||
2271 | } | 2530 | } |
2272 | 2531 | ||
2273 | int | 2532 | int |
2274 | CIFSUnixCreateHardLink(const int xid, struct cifsTconInfo *tcon, | 2533 | CIFSUnixCreateHardLink(const int xid, struct cifs_tcon *tcon, |
2275 | const char *fromName, const char *toName, | 2534 | const char *fromName, const char *toName, |
2276 | const struct nls_table *nls_codepage, int remap) | 2535 | const struct nls_table *nls_codepage, int remap) |
2277 | { | 2536 | { |
@@ -2356,7 +2615,7 @@ createHardLinkRetry: | |||
2356 | } | 2615 | } |
2357 | 2616 | ||
2358 | int | 2617 | int |
2359 | CIFSCreateHardLink(const int xid, struct cifsTconInfo *tcon, | 2618 | CIFSCreateHardLink(const int xid, struct cifs_tcon *tcon, |
2360 | const char *fromName, const char *toName, | 2619 | const char *fromName, const char *toName, |
2361 | const struct nls_table *nls_codepage, int remap) | 2620 | const struct nls_table *nls_codepage, int remap) |
2362 | { | 2621 | { |
@@ -2428,7 +2687,7 @@ winCreateHardLinkRetry: | |||
2428 | } | 2687 | } |
2429 | 2688 | ||
2430 | int | 2689 | int |
2431 | CIFSSMBUnixQuerySymLink(const int xid, struct cifsTconInfo *tcon, | 2690 | CIFSSMBUnixQuerySymLink(const int xid, struct cifs_tcon *tcon, |
2432 | const unsigned char *searchName, char **symlinkinfo, | 2691 | const unsigned char *searchName, char **symlinkinfo, |
2433 | const struct nls_table *nls_codepage) | 2692 | const struct nls_table *nls_codepage) |
2434 | { | 2693 | { |
@@ -2533,7 +2792,7 @@ querySymLinkRetry: | |||
2533 | * it is not compiled in by default until callers fixed up and more tested. | 2792 | * it is not compiled in by default until callers fixed up and more tested. |
2534 | */ | 2793 | */ |
2535 | int | 2794 | int |
2536 | CIFSSMBQueryReparseLinkInfo(const int xid, struct cifsTconInfo *tcon, | 2795 | CIFSSMBQueryReparseLinkInfo(const int xid, struct cifs_tcon *tcon, |
2537 | const unsigned char *searchName, | 2796 | const unsigned char *searchName, |
2538 | char *symlinkinfo, const int buflen, __u16 fid, | 2797 | char *symlinkinfo, const int buflen, __u16 fid, |
2539 | const struct nls_table *nls_codepage) | 2798 | const struct nls_table *nls_codepage) |
@@ -2771,7 +3030,7 @@ static __u16 ACL_to_cifs_posix(char *parm_data, const char *pACL, | |||
2771 | } | 3030 | } |
2772 | 3031 | ||
2773 | int | 3032 | int |
2774 | CIFSSMBGetPosixACL(const int xid, struct cifsTconInfo *tcon, | 3033 | CIFSSMBGetPosixACL(const int xid, struct cifs_tcon *tcon, |
2775 | const unsigned char *searchName, | 3034 | const unsigned char *searchName, |
2776 | char *acl_inf, const int buflen, const int acl_type, | 3035 | char *acl_inf, const int buflen, const int acl_type, |
2777 | const struct nls_table *nls_codepage, int remap) | 3036 | const struct nls_table *nls_codepage, int remap) |
@@ -2859,7 +3118,7 @@ queryAclRetry: | |||
2859 | } | 3118 | } |
2860 | 3119 | ||
2861 | int | 3120 | int |
2862 | CIFSSMBSetPosixACL(const int xid, struct cifsTconInfo *tcon, | 3121 | CIFSSMBSetPosixACL(const int xid, struct cifs_tcon *tcon, |
2863 | const unsigned char *fileName, | 3122 | const unsigned char *fileName, |
2864 | const char *local_acl, const int buflen, | 3123 | const char *local_acl, const int buflen, |
2865 | const int acl_type, | 3124 | const int acl_type, |
@@ -2939,7 +3198,7 @@ setACLerrorExit: | |||
2939 | 3198 | ||
2940 | /* BB fix tabs in this function FIXME BB */ | 3199 | /* BB fix tabs in this function FIXME BB */ |
2941 | int | 3200 | int |
2942 | CIFSGetExtAttr(const int xid, struct cifsTconInfo *tcon, | 3201 | CIFSGetExtAttr(const int xid, struct cifs_tcon *tcon, |
2943 | const int netfid, __u64 *pExtAttrBits, __u64 *pMask) | 3202 | const int netfid, __u64 *pExtAttrBits, __u64 *pMask) |
2944 | { | 3203 | { |
2945 | int rc = 0; | 3204 | int rc = 0; |
@@ -3032,7 +3291,7 @@ GetExtAttrOut: | |||
3032 | */ | 3291 | */ |
3033 | static int | 3292 | static int |
3034 | smb_init_nttransact(const __u16 sub_command, const int setup_count, | 3293 | smb_init_nttransact(const __u16 sub_command, const int setup_count, |
3035 | const int parm_len, struct cifsTconInfo *tcon, | 3294 | const int parm_len, struct cifs_tcon *tcon, |
3036 | void **ret_buf) | 3295 | void **ret_buf) |
3037 | { | 3296 | { |
3038 | int rc; | 3297 | int rc; |
@@ -3115,7 +3374,7 @@ validate_ntransact(char *buf, char **ppparm, char **ppdata, | |||
3115 | 3374 | ||
3116 | /* Get Security Descriptor (by handle) from remote server for a file or dir */ | 3375 | /* Get Security Descriptor (by handle) from remote server for a file or dir */ |
3117 | int | 3376 | int |
3118 | CIFSSMBGetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, | 3377 | CIFSSMBGetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid, |
3119 | struct cifs_ntsd **acl_inf, __u32 *pbuflen) | 3378 | struct cifs_ntsd **acl_inf, __u32 *pbuflen) |
3120 | { | 3379 | { |
3121 | int rc = 0; | 3380 | int rc = 0; |
@@ -3207,7 +3466,7 @@ qsec_out: | |||
3207 | } | 3466 | } |
3208 | 3467 | ||
3209 | int | 3468 | int |
3210 | CIFSSMBSetCIFSACL(const int xid, struct cifsTconInfo *tcon, __u16 fid, | 3469 | CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid, |
3211 | struct cifs_ntsd *pntsd, __u32 acllen) | 3470 | struct cifs_ntsd *pntsd, __u32 acllen) |
3212 | { | 3471 | { |
3213 | __u16 byte_count, param_count, data_count, param_offset, data_offset; | 3472 | __u16 byte_count, param_count, data_count, param_offset, data_offset; |
@@ -3273,7 +3532,7 @@ setCifsAclRetry: | |||
3273 | 3532 | ||
3274 | /* Legacy Query Path Information call for lookup to old servers such | 3533 | /* Legacy Query Path Information call for lookup to old servers such |
3275 | as Win9x/WinME */ | 3534 | as Win9x/WinME */ |
3276 | int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon, | 3535 | int SMBQueryInformation(const int xid, struct cifs_tcon *tcon, |
3277 | const unsigned char *searchName, | 3536 | const unsigned char *searchName, |
3278 | FILE_ALL_INFO *pFinfo, | 3537 | FILE_ALL_INFO *pFinfo, |
3279 | const struct nls_table *nls_codepage, int remap) | 3538 | const struct nls_table *nls_codepage, int remap) |
@@ -3341,7 +3600,7 @@ QInfRetry: | |||
3341 | } | 3600 | } |
3342 | 3601 | ||
3343 | int | 3602 | int |
3344 | CIFSSMBQFileInfo(const int xid, struct cifsTconInfo *tcon, | 3603 | CIFSSMBQFileInfo(const int xid, struct cifs_tcon *tcon, |
3345 | u16 netfid, FILE_ALL_INFO *pFindData) | 3604 | u16 netfid, FILE_ALL_INFO *pFindData) |
3346 | { | 3605 | { |
3347 | struct smb_t2_qfi_req *pSMB = NULL; | 3606 | struct smb_t2_qfi_req *pSMB = NULL; |
@@ -3408,7 +3667,7 @@ QFileInfoRetry: | |||
3408 | } | 3667 | } |
3409 | 3668 | ||
3410 | int | 3669 | int |
3411 | CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon, | 3670 | CIFSSMBQPathInfo(const int xid, struct cifs_tcon *tcon, |
3412 | const unsigned char *searchName, | 3671 | const unsigned char *searchName, |
3413 | FILE_ALL_INFO *pFindData, | 3672 | FILE_ALL_INFO *pFindData, |
3414 | int legacy /* old style infolevel */, | 3673 | int legacy /* old style infolevel */, |
@@ -3509,7 +3768,7 @@ QPathInfoRetry: | |||
3509 | } | 3768 | } |
3510 | 3769 | ||
3511 | int | 3770 | int |
3512 | CIFSSMBUnixQFileInfo(const int xid, struct cifsTconInfo *tcon, | 3771 | CIFSSMBUnixQFileInfo(const int xid, struct cifs_tcon *tcon, |
3513 | u16 netfid, FILE_UNIX_BASIC_INFO *pFindData) | 3772 | u16 netfid, FILE_UNIX_BASIC_INFO *pFindData) |
3514 | { | 3773 | { |
3515 | struct smb_t2_qfi_req *pSMB = NULL; | 3774 | struct smb_t2_qfi_req *pSMB = NULL; |
@@ -3578,7 +3837,7 @@ UnixQFileInfoRetry: | |||
3578 | } | 3837 | } |
3579 | 3838 | ||
3580 | int | 3839 | int |
3581 | CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon, | 3840 | CIFSSMBUnixQPathInfo(const int xid, struct cifs_tcon *tcon, |
3582 | const unsigned char *searchName, | 3841 | const unsigned char *searchName, |
3583 | FILE_UNIX_BASIC_INFO *pFindData, | 3842 | FILE_UNIX_BASIC_INFO *pFindData, |
3584 | const struct nls_table *nls_codepage, int remap) | 3843 | const struct nls_table *nls_codepage, int remap) |
@@ -3664,7 +3923,7 @@ UnixQPathInfoRetry: | |||
3664 | 3923 | ||
3665 | /* xid, tcon, searchName and codepage are input parms, rest are returned */ | 3924 | /* xid, tcon, searchName and codepage are input parms, rest are returned */ |
3666 | int | 3925 | int |
3667 | CIFSFindFirst(const int xid, struct cifsTconInfo *tcon, | 3926 | CIFSFindFirst(const int xid, struct cifs_tcon *tcon, |
3668 | const char *searchName, | 3927 | const char *searchName, |
3669 | const struct nls_table *nls_codepage, | 3928 | const struct nls_table *nls_codepage, |
3670 | __u16 *pnetfid, | 3929 | __u16 *pnetfid, |
@@ -3812,7 +4071,7 @@ findFirstRetry: | |||
3812 | return rc; | 4071 | return rc; |
3813 | } | 4072 | } |
3814 | 4073 | ||
3815 | int CIFSFindNext(const int xid, struct cifsTconInfo *tcon, | 4074 | int CIFSFindNext(const int xid, struct cifs_tcon *tcon, |
3816 | __u16 searchHandle, struct cifs_search_info *psrch_inf) | 4075 | __u16 searchHandle, struct cifs_search_info *psrch_inf) |
3817 | { | 4076 | { |
3818 | TRANSACTION2_FNEXT_REQ *pSMB = NULL; | 4077 | TRANSACTION2_FNEXT_REQ *pSMB = NULL; |
@@ -3950,7 +4209,7 @@ FNext2_err_exit: | |||
3950 | } | 4209 | } |
3951 | 4210 | ||
3952 | int | 4211 | int |
3953 | CIFSFindClose(const int xid, struct cifsTconInfo *tcon, | 4212 | CIFSFindClose(const int xid, struct cifs_tcon *tcon, |
3954 | const __u16 searchHandle) | 4213 | const __u16 searchHandle) |
3955 | { | 4214 | { |
3956 | int rc = 0; | 4215 | int rc = 0; |
@@ -3982,7 +4241,7 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon, | |||
3982 | } | 4241 | } |
3983 | 4242 | ||
3984 | int | 4243 | int |
3985 | CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon, | 4244 | CIFSGetSrvInodeNumber(const int xid, struct cifs_tcon *tcon, |
3986 | const unsigned char *searchName, | 4245 | const unsigned char *searchName, |
3987 | __u64 *inode_number, | 4246 | __u64 *inode_number, |
3988 | const struct nls_table *nls_codepage, int remap) | 4247 | const struct nls_table *nls_codepage, int remap) |
@@ -4184,7 +4443,7 @@ parse_DFS_referrals_exit: | |||
4184 | } | 4443 | } |
4185 | 4444 | ||
4186 | int | 4445 | int |
4187 | CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses, | 4446 | CIFSGetDFSRefer(const int xid, struct cifs_ses *ses, |
4188 | const unsigned char *searchName, | 4447 | const unsigned char *searchName, |
4189 | struct dfs_info3_param **target_nodes, | 4448 | struct dfs_info3_param **target_nodes, |
4190 | unsigned int *num_of_nodes, | 4449 | unsigned int *num_of_nodes, |
@@ -4233,7 +4492,7 @@ getDFSRetry: | |||
4233 | } | 4492 | } |
4234 | 4493 | ||
4235 | if (ses->server) { | 4494 | if (ses->server) { |
4236 | if (ses->server->secMode & | 4495 | if (ses->server->sec_mode & |
4237 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | 4496 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) |
4238 | pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; | 4497 | pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; |
4239 | } | 4498 | } |
@@ -4298,7 +4557,7 @@ GetDFSRefExit: | |||
4298 | 4557 | ||
4299 | /* Query File System Info such as free space to old servers such as Win 9x */ | 4558 | /* Query File System Info such as free space to old servers such as Win 9x */ |
4300 | int | 4559 | int |
4301 | SMBOldQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData) | 4560 | SMBOldQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) |
4302 | { | 4561 | { |
4303 | /* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */ | 4562 | /* level 0x01 SMB_QUERY_FILE_SYSTEM_INFO */ |
4304 | TRANSACTION2_QFSI_REQ *pSMB = NULL; | 4563 | TRANSACTION2_QFSI_REQ *pSMB = NULL; |
@@ -4377,7 +4636,7 @@ oldQFSInfoRetry: | |||
4377 | } | 4636 | } |
4378 | 4637 | ||
4379 | int | 4638 | int |
4380 | CIFSSMBQFSInfo(const int xid, struct cifsTconInfo *tcon, struct kstatfs *FSData) | 4639 | CIFSSMBQFSInfo(const int xid, struct cifs_tcon *tcon, struct kstatfs *FSData) |
4381 | { | 4640 | { |
4382 | /* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */ | 4641 | /* level 0x103 SMB_QUERY_FILE_SYSTEM_INFO */ |
4383 | TRANSACTION2_QFSI_REQ *pSMB = NULL; | 4642 | TRANSACTION2_QFSI_REQ *pSMB = NULL; |
@@ -4456,7 +4715,7 @@ QFSInfoRetry: | |||
4456 | } | 4715 | } |
4457 | 4716 | ||
4458 | int | 4717 | int |
4459 | CIFSSMBQFSAttributeInfo(const int xid, struct cifsTconInfo *tcon) | 4718 | CIFSSMBQFSAttributeInfo(const int xid, struct cifs_tcon *tcon) |
4460 | { | 4719 | { |
4461 | /* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */ | 4720 | /* level 0x105 SMB_QUERY_FILE_SYSTEM_INFO */ |
4462 | TRANSACTION2_QFSI_REQ *pSMB = NULL; | 4721 | TRANSACTION2_QFSI_REQ *pSMB = NULL; |
@@ -4526,7 +4785,7 @@ QFSAttributeRetry: | |||
4526 | } | 4785 | } |
4527 | 4786 | ||
4528 | int | 4787 | int |
4529 | CIFSSMBQFSDeviceInfo(const int xid, struct cifsTconInfo *tcon) | 4788 | CIFSSMBQFSDeviceInfo(const int xid, struct cifs_tcon *tcon) |
4530 | { | 4789 | { |
4531 | /* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */ | 4790 | /* level 0x104 SMB_QUERY_FILE_SYSTEM_INFO */ |
4532 | TRANSACTION2_QFSI_REQ *pSMB = NULL; | 4791 | TRANSACTION2_QFSI_REQ *pSMB = NULL; |
@@ -4597,7 +4856,7 @@ QFSDeviceRetry: | |||
4597 | } | 4856 | } |
4598 | 4857 | ||
4599 | int | 4858 | int |
4600 | CIFSSMBQFSUnixInfo(const int xid, struct cifsTconInfo *tcon) | 4859 | CIFSSMBQFSUnixInfo(const int xid, struct cifs_tcon *tcon) |
4601 | { | 4860 | { |
4602 | /* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */ | 4861 | /* level 0x200 SMB_QUERY_CIFS_UNIX_INFO */ |
4603 | TRANSACTION2_QFSI_REQ *pSMB = NULL; | 4862 | TRANSACTION2_QFSI_REQ *pSMB = NULL; |
@@ -4667,7 +4926,7 @@ QFSUnixRetry: | |||
4667 | } | 4926 | } |
4668 | 4927 | ||
4669 | int | 4928 | int |
4670 | CIFSSMBSetFSUnixInfo(const int xid, struct cifsTconInfo *tcon, __u64 cap) | 4929 | CIFSSMBSetFSUnixInfo(const int xid, struct cifs_tcon *tcon, __u64 cap) |
4671 | { | 4930 | { |
4672 | /* level 0x200 SMB_SET_CIFS_UNIX_INFO */ | 4931 | /* level 0x200 SMB_SET_CIFS_UNIX_INFO */ |
4673 | TRANSACTION2_SETFSI_REQ *pSMB = NULL; | 4932 | TRANSACTION2_SETFSI_REQ *pSMB = NULL; |
@@ -4741,7 +5000,7 @@ SETFSUnixRetry: | |||
4741 | 5000 | ||
4742 | 5001 | ||
4743 | int | 5002 | int |
4744 | CIFSSMBQFSPosixInfo(const int xid, struct cifsTconInfo *tcon, | 5003 | CIFSSMBQFSPosixInfo(const int xid, struct cifs_tcon *tcon, |
4745 | struct kstatfs *FSData) | 5004 | struct kstatfs *FSData) |
4746 | { | 5005 | { |
4747 | /* level 0x201 SMB_QUERY_CIFS_POSIX_INFO */ | 5006 | /* level 0x201 SMB_QUERY_CIFS_POSIX_INFO */ |
@@ -4834,7 +5093,7 @@ QFSPosixRetry: | |||
4834 | in Samba which this routine can run into */ | 5093 | in Samba which this routine can run into */ |
4835 | 5094 | ||
4836 | int | 5095 | int |
4837 | CIFSSMBSetEOF(const int xid, struct cifsTconInfo *tcon, const char *fileName, | 5096 | CIFSSMBSetEOF(const int xid, struct cifs_tcon *tcon, const char *fileName, |
4838 | __u64 size, bool SetAllocation, | 5097 | __u64 size, bool SetAllocation, |
4839 | const struct nls_table *nls_codepage, int remap) | 5098 | const struct nls_table *nls_codepage, int remap) |
4840 | { | 5099 | { |
@@ -4923,7 +5182,7 @@ SetEOFRetry: | |||
4923 | } | 5182 | } |
4924 | 5183 | ||
4925 | int | 5184 | int |
4926 | CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, | 5185 | CIFSSMBSetFileSize(const int xid, struct cifs_tcon *tcon, __u64 size, |
4927 | __u16 fid, __u32 pid_of_opener, bool SetAllocation) | 5186 | __u16 fid, __u32 pid_of_opener, bool SetAllocation) |
4928 | { | 5187 | { |
4929 | struct smb_com_transaction2_sfi_req *pSMB = NULL; | 5188 | struct smb_com_transaction2_sfi_req *pSMB = NULL; |
@@ -5005,7 +5264,7 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size, | |||
5005 | time and resort to the original setpathinfo level which takes the ancient | 5264 | time and resort to the original setpathinfo level which takes the ancient |
5006 | DOS time format with 2 second granularity */ | 5265 | DOS time format with 2 second granularity */ |
5007 | int | 5266 | int |
5008 | CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon, | 5267 | CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon, |
5009 | const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener) | 5268 | const FILE_BASIC_INFO *data, __u16 fid, __u32 pid_of_opener) |
5010 | { | 5269 | { |
5011 | struct smb_com_transaction2_sfi_req *pSMB = NULL; | 5270 | struct smb_com_transaction2_sfi_req *pSMB = NULL; |
@@ -5067,7 +5326,7 @@ CIFSSMBSetFileInfo(const int xid, struct cifsTconInfo *tcon, | |||
5067 | } | 5326 | } |
5068 | 5327 | ||
5069 | int | 5328 | int |
5070 | CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon, | 5329 | CIFSSMBSetFileDisposition(const int xid, struct cifs_tcon *tcon, |
5071 | bool delete_file, __u16 fid, __u32 pid_of_opener) | 5330 | bool delete_file, __u16 fid, __u32 pid_of_opener) |
5072 | { | 5331 | { |
5073 | struct smb_com_transaction2_sfi_req *pSMB = NULL; | 5332 | struct smb_com_transaction2_sfi_req *pSMB = NULL; |
@@ -5123,7 +5382,7 @@ CIFSSMBSetFileDisposition(const int xid, struct cifsTconInfo *tcon, | |||
5123 | } | 5382 | } |
5124 | 5383 | ||
5125 | int | 5384 | int |
5126 | CIFSSMBSetPathInfo(const int xid, struct cifsTconInfo *tcon, | 5385 | CIFSSMBSetPathInfo(const int xid, struct cifs_tcon *tcon, |
5127 | const char *fileName, const FILE_BASIC_INFO *data, | 5386 | const char *fileName, const FILE_BASIC_INFO *data, |
5128 | const struct nls_table *nls_codepage, int remap) | 5387 | const struct nls_table *nls_codepage, int remap) |
5129 | { | 5388 | { |
@@ -5207,7 +5466,7 @@ SetTimesRetry: | |||
5207 | handling it anyway and NT4 was what we thought it would be needed for | 5466 | handling it anyway and NT4 was what we thought it would be needed for |
5208 | Do not delete it until we prove whether needed for Win9x though */ | 5467 | Do not delete it until we prove whether needed for Win9x though */ |
5209 | int | 5468 | int |
5210 | CIFSSMBSetAttrLegacy(int xid, struct cifsTconInfo *tcon, char *fileName, | 5469 | CIFSSMBSetAttrLegacy(int xid, struct cifs_tcon *tcon, char *fileName, |
5211 | __u16 dos_attrs, const struct nls_table *nls_codepage) | 5470 | __u16 dos_attrs, const struct nls_table *nls_codepage) |
5212 | { | 5471 | { |
5213 | SETATTR_REQ *pSMB = NULL; | 5472 | SETATTR_REQ *pSMB = NULL; |
@@ -5295,7 +5554,7 @@ cifs_fill_unix_set_info(FILE_UNIX_BASIC_INFO *data_offset, | |||
5295 | } | 5554 | } |
5296 | 5555 | ||
5297 | int | 5556 | int |
5298 | CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon, | 5557 | CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon, |
5299 | const struct cifs_unix_set_info_args *args, | 5558 | const struct cifs_unix_set_info_args *args, |
5300 | u16 fid, u32 pid_of_opener) | 5559 | u16 fid, u32 pid_of_opener) |
5301 | { | 5560 | { |
@@ -5358,7 +5617,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifsTconInfo *tcon, | |||
5358 | } | 5617 | } |
5359 | 5618 | ||
5360 | int | 5619 | int |
5361 | CIFSSMBUnixSetPathInfo(const int xid, struct cifsTconInfo *tcon, char *fileName, | 5620 | CIFSSMBUnixSetPathInfo(const int xid, struct cifs_tcon *tcon, char *fileName, |
5362 | const struct cifs_unix_set_info_args *args, | 5621 | const struct cifs_unix_set_info_args *args, |
5363 | const struct nls_table *nls_codepage, int remap) | 5622 | const struct nls_table *nls_codepage, int remap) |
5364 | { | 5623 | { |
@@ -5445,7 +5704,7 @@ setPermsRetry: | |||
5445 | * the data isn't copied to it, but the length is returned. | 5704 | * the data isn't copied to it, but the length is returned. |
5446 | */ | 5705 | */ |
5447 | ssize_t | 5706 | ssize_t |
5448 | CIFSSMBQAllEAs(const int xid, struct cifsTconInfo *tcon, | 5707 | CIFSSMBQAllEAs(const int xid, struct cifs_tcon *tcon, |
5449 | const unsigned char *searchName, const unsigned char *ea_name, | 5708 | const unsigned char *searchName, const unsigned char *ea_name, |
5450 | char *EAData, size_t buf_size, | 5709 | char *EAData, size_t buf_size, |
5451 | const struct nls_table *nls_codepage, int remap) | 5710 | const struct nls_table *nls_codepage, int remap) |
@@ -5626,7 +5885,7 @@ QAllEAsOut: | |||
5626 | } | 5885 | } |
5627 | 5886 | ||
5628 | int | 5887 | int |
5629 | CIFSSMBSetEA(const int xid, struct cifsTconInfo *tcon, const char *fileName, | 5888 | CIFSSMBSetEA(const int xid, struct cifs_tcon *tcon, const char *fileName, |
5630 | const char *ea_name, const void *ea_value, | 5889 | const char *ea_name, const void *ea_value, |
5631 | const __u16 ea_value_len, const struct nls_table *nls_codepage, | 5890 | const __u16 ea_value_len, const struct nls_table *nls_codepage, |
5632 | int remap) | 5891 | int remap) |
@@ -5753,7 +6012,7 @@ SetEARetry: | |||
5753 | * incompatible for network fs clients, we could instead simply | 6012 | * incompatible for network fs clients, we could instead simply |
5754 | * expose this config flag by adding a future cifs (and smb2) notify ioctl. | 6013 | * expose this config flag by adding a future cifs (and smb2) notify ioctl. |
5755 | */ | 6014 | */ |
5756 | int CIFSSMBNotify(const int xid, struct cifsTconInfo *tcon, | 6015 | int CIFSSMBNotify(const int xid, struct cifs_tcon *tcon, |
5757 | const int notify_subdirs, const __u16 netfid, | 6016 | const int notify_subdirs, const __u16 netfid, |
5758 | __u32 filter, struct file *pfile, int multishot, | 6017 | __u32 filter, struct file *pfile, int multishot, |
5759 | const struct nls_table *nls_codepage) | 6018 | const struct nls_table *nls_codepage) |
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index da284e3cb653..6d88b82537c3 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c | |||
@@ -57,62 +57,6 @@ | |||
57 | 57 | ||
58 | extern mempool_t *cifs_req_poolp; | 58 | extern mempool_t *cifs_req_poolp; |
59 | 59 | ||
60 | struct smb_vol { | ||
61 | char *username; | ||
62 | char *password; | ||
63 | char *domainname; | ||
64 | char *UNC; | ||
65 | char *UNCip; | ||
66 | char *iocharset; /* local code page for mapping to and from Unicode */ | ||
67 | char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */ | ||
68 | char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */ | ||
69 | uid_t cred_uid; | ||
70 | uid_t linux_uid; | ||
71 | gid_t linux_gid; | ||
72 | mode_t file_mode; | ||
73 | mode_t dir_mode; | ||
74 | unsigned secFlg; | ||
75 | bool retry:1; | ||
76 | bool intr:1; | ||
77 | bool setuids:1; | ||
78 | bool override_uid:1; | ||
79 | bool override_gid:1; | ||
80 | bool dynperm:1; | ||
81 | bool noperm:1; | ||
82 | bool no_psx_acl:1; /* set if posix acl support should be disabled */ | ||
83 | bool cifs_acl:1; | ||
84 | bool no_xattr:1; /* set if xattr (EA) support should be disabled*/ | ||
85 | bool server_ino:1; /* use inode numbers from server ie UniqueId */ | ||
86 | bool direct_io:1; | ||
87 | bool strict_io:1; /* strict cache behavior */ | ||
88 | bool remap:1; /* set to remap seven reserved chars in filenames */ | ||
89 | bool posix_paths:1; /* unset to not ask for posix pathnames. */ | ||
90 | bool no_linux_ext:1; | ||
91 | bool sfu_emul:1; | ||
92 | bool nullauth:1; /* attempt to authenticate with null user */ | ||
93 | bool nocase:1; /* request case insensitive filenames */ | ||
94 | bool nobrl:1; /* disable sending byte range locks to srv */ | ||
95 | bool mand_lock:1; /* send mandatory not posix byte range lock reqs */ | ||
96 | bool seal:1; /* request transport encryption on share */ | ||
97 | bool nodfs:1; /* Do not request DFS, even if available */ | ||
98 | bool local_lease:1; /* check leases only on local system, not remote */ | ||
99 | bool noblocksnd:1; | ||
100 | bool noautotune:1; | ||
101 | bool nostrictsync:1; /* do not force expensive SMBflush on every sync */ | ||
102 | bool fsc:1; /* enable fscache */ | ||
103 | bool mfsymlinks:1; /* use Minshall+French Symlinks */ | ||
104 | bool multiuser:1; | ||
105 | bool use_smb2:1; /* force smb2 use on mount instead of cifs */ | ||
106 | unsigned int rsize; | ||
107 | unsigned int wsize; | ||
108 | bool sockopt_tcp_nodelay:1; | ||
109 | unsigned short int port; | ||
110 | unsigned long actimeo; /* attribute cache timeout (jiffies) */ | ||
111 | char *prepath; | ||
112 | struct sockaddr_storage srcaddr; /* allow binding to a local IP */ | ||
113 | struct nls_table *local_nls; | ||
114 | }; | ||
115 | |||
116 | /* FIXME: should these be tunable? */ | 60 | /* FIXME: should these be tunable? */ |
117 | #define TLINK_ERROR_EXPIRE (1 * HZ) | 61 | #define TLINK_ERROR_EXPIRE (1 * HZ) |
118 | #define TLINK_IDLE_EXPIRE (600 * HZ) | 62 | #define TLINK_IDLE_EXPIRE (600 * HZ) |
@@ -135,9 +79,10 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
135 | { | 79 | { |
136 | int rc = 0; | 80 | int rc = 0; |
137 | struct list_head *tmp, *tmp2; | 81 | struct list_head *tmp, *tmp2; |
138 | struct cifsSesInfo *ses; | 82 | struct cifs_ses *ses; |
139 | struct cifsTconInfo *tcon; | 83 | struct cifs_tcon *tcon; |
140 | struct mid_q_entry *mid_entry; | 84 | struct mid_q_entry *mid_entry; |
85 | struct list_head retry_list; | ||
141 | 86 | ||
142 | spin_lock(&GlobalMid_Lock); | 87 | spin_lock(&GlobalMid_Lock); |
143 | if (server->tcpStatus == CifsExiting) { | 88 | if (server->tcpStatus == CifsExiting) { |
@@ -157,11 +102,11 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
157 | cFYI(1, "%s: marking sessions and tcons for reconnect", __func__); | 102 | cFYI(1, "%s: marking sessions and tcons for reconnect", __func__); |
158 | spin_lock(&cifs_tcp_ses_lock); | 103 | spin_lock(&cifs_tcp_ses_lock); |
159 | list_for_each(tmp, &server->smb_ses_list) { | 104 | list_for_each(tmp, &server->smb_ses_list) { |
160 | ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); | 105 | ses = list_entry(tmp, struct cifs_ses, smb_ses_list); |
161 | ses->need_reconnect = true; | 106 | ses->need_reconnect = true; |
162 | ses->ipc_tid = 0; | 107 | ses->ipc_tid = 0; |
163 | list_for_each(tmp2, &ses->tcon_list) { | 108 | list_for_each(tmp2, &ses->tcon_list) { |
164 | tcon = list_entry(tmp2, struct cifsTconInfo, tcon_list); | 109 | tcon = list_entry(tmp2, struct cifs_tcon, tcon_list); |
165 | tcon->need_reconnect = true; | 110 | tcon->need_reconnect = true; |
166 | } | 111 | } |
167 | } | 112 | } |
@@ -189,16 +134,23 @@ cifs_reconnect(struct TCP_Server_Info *server) | |||
189 | mutex_unlock(&server->srv_mutex); | 134 | mutex_unlock(&server->srv_mutex); |
190 | 135 | ||
191 | /* mark submitted MIDs for retry and issue callback */ | 136 | /* mark submitted MIDs for retry and issue callback */ |
192 | cFYI(1, "%s: issuing mid callbacks", __func__); | 137 | INIT_LIST_HEAD(&retry_list); |
138 | cFYI(1, "%s: moving mids to private list", __func__); | ||
193 | spin_lock(&GlobalMid_Lock); | 139 | spin_lock(&GlobalMid_Lock); |
194 | list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { | 140 | list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { |
195 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); | 141 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); |
196 | if (mid_entry->midState == MID_REQUEST_SUBMITTED) | 142 | if (mid_entry->midState == MID_REQUEST_SUBMITTED) |
197 | mid_entry->midState = MID_RETRY_NEEDED; | 143 | mid_entry->midState = MID_RETRY_NEEDED; |
144 | list_move(&mid_entry->qhead, &retry_list); | ||
145 | } | ||
146 | spin_unlock(&GlobalMid_Lock); | ||
147 | |||
148 | cFYI(1, "%s: issuing mid callbacks", __func__); | ||
149 | list_for_each_safe(tmp, tmp2, &retry_list) { | ||
150 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); | ||
198 | list_del_init(&mid_entry->qhead); | 151 | list_del_init(&mid_entry->qhead); |
199 | mid_entry->callback(mid_entry); | 152 | mid_entry->callback(mid_entry); |
200 | } | 153 | } |
201 | spin_unlock(&GlobalMid_Lock); | ||
202 | 154 | ||
203 | while (server->tcpStatus == CifsNeedReconnect) { | 155 | while (server->tcpStatus == CifsNeedReconnect) { |
204 | try_to_freeze(); | 156 | try_to_freeze(); |
@@ -672,12 +624,12 @@ multi_t2_fnd: | |||
672 | mid_entry->when_received = jiffies; | 624 | mid_entry->when_received = jiffies; |
673 | #endif | 625 | #endif |
674 | list_del_init(&mid_entry->qhead); | 626 | list_del_init(&mid_entry->qhead); |
675 | mid_entry->callback(mid_entry); | ||
676 | break; | 627 | break; |
677 | } | 628 | } |
678 | spin_unlock(&GlobalMid_Lock); | 629 | spin_unlock(&GlobalMid_Lock); |
679 | 630 | ||
680 | if (mid_entry != NULL) { | 631 | if (mid_entry != NULL) { |
632 | mid_entry->callback(mid_entry); | ||
681 | /* Was previous buf put in mpx struct for multi-rsp? */ | 633 | /* Was previous buf put in mpx struct for multi-rsp? */ |
682 | if (!isMultiRsp) { | 634 | if (!isMultiRsp) { |
683 | /* smb buffer will be freed by user thread */ | 635 | /* smb buffer will be freed by user thread */ |
@@ -741,15 +693,25 @@ multi_t2_fnd: | |||
741 | cifs_small_buf_release(smallbuf); | 693 | cifs_small_buf_release(smallbuf); |
742 | 694 | ||
743 | if (!list_empty(&server->pending_mid_q)) { | 695 | if (!list_empty(&server->pending_mid_q)) { |
696 | struct list_head dispose_list; | ||
697 | |||
698 | INIT_LIST_HEAD(&dispose_list); | ||
744 | spin_lock(&GlobalMid_Lock); | 699 | spin_lock(&GlobalMid_Lock); |
745 | list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { | 700 | list_for_each_safe(tmp, tmp2, &server->pending_mid_q) { |
746 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); | 701 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); |
747 | cFYI(1, "Clearing Mid 0x%x - issuing callback", | 702 | cFYI(1, "Clearing mid 0x%x", mid_entry->mid); |
748 | mid_entry->mid); | 703 | mid_entry->midState = MID_SHUTDOWN; |
704 | list_move(&mid_entry->qhead, &dispose_list); | ||
705 | } | ||
706 | spin_unlock(&GlobalMid_Lock); | ||
707 | |||
708 | /* now walk dispose list and issue callbacks */ | ||
709 | list_for_each_safe(tmp, tmp2, &dispose_list) { | ||
710 | mid_entry = list_entry(tmp, struct mid_q_entry, qhead); | ||
711 | cFYI(1, "Callback mid 0x%x", mid_entry->mid); | ||
749 | list_del_init(&mid_entry->qhead); | 712 | list_del_init(&mid_entry->qhead); |
750 | mid_entry->callback(mid_entry); | 713 | mid_entry->callback(mid_entry); |
751 | } | 714 | } |
752 | spin_unlock(&GlobalMid_Lock); | ||
753 | /* 1/8th of sec is more than enough time for them to exit */ | 715 | /* 1/8th of sec is more than enough time for them to exit */ |
754 | msleep(125); | 716 | msleep(125); |
755 | } | 717 | } |
@@ -1062,13 +1024,6 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1062 | (strnicmp(value, "1", 1) == 0)) { | 1024 | (strnicmp(value, "1", 1) == 0)) { |
1063 | /* this is the default */ | 1025 | /* this is the default */ |
1064 | continue; | 1026 | continue; |
1065 | } else if ((strnicmp(value, "smb2", 4) == 0) || | ||
1066 | (strnicmp(value, "2", 1) == 0)) { | ||
1067 | #ifdef CONFIG_CIFS_SMB2 | ||
1068 | vol->use_smb2 = true; | ||
1069 | #else | ||
1070 | cERROR(1, "smb2 support not enabled"); | ||
1071 | #endif /* CONFIG_CIFS_SMB2 */ | ||
1072 | } | 1027 | } |
1073 | } else if ((strnicmp(data, "unc", 3) == 0) | 1028 | } else if ((strnicmp(data, "unc", 3) == 0) |
1074 | || (strnicmp(data, "target", 6) == 0) | 1029 | || (strnicmp(data, "target", 6) == 0) |
@@ -1404,6 +1359,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname, | |||
1404 | vol->server_ino = 1; | 1359 | vol->server_ino = 1; |
1405 | } else if (strnicmp(data, "noserverino", 9) == 0) { | 1360 | } else if (strnicmp(data, "noserverino", 9) == 0) { |
1406 | vol->server_ino = 0; | 1361 | vol->server_ino = 0; |
1362 | } else if (strnicmp(data, "rwpidforward", 4) == 0) { | ||
1363 | vol->rwpidforward = 1; | ||
1407 | } else if (strnicmp(data, "cifsacl", 7) == 0) { | 1364 | } else if (strnicmp(data, "cifsacl", 7) == 0) { |
1408 | vol->cifs_acl = 1; | 1365 | vol->cifs_acl = 1; |
1409 | } else if (strnicmp(data, "nocifsacl", 9) == 0) { | 1366 | } else if (strnicmp(data, "nocifsacl", 9) == 0) { |
@@ -1640,16 +1597,35 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol) | |||
1640 | 1597 | ||
1641 | /* now check if signing mode is acceptable */ | 1598 | /* now check if signing mode is acceptable */ |
1642 | if ((secFlags & CIFSSEC_MAY_SIGN) == 0 && | 1599 | if ((secFlags & CIFSSEC_MAY_SIGN) == 0 && |
1643 | (server->secMode & SECMODE_SIGN_REQUIRED)) | 1600 | (server->sec_mode & SECMODE_SIGN_REQUIRED)) |
1644 | return false; | 1601 | return false; |
1645 | else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) && | 1602 | else if (((secFlags & CIFSSEC_MUST_SIGN) == CIFSSEC_MUST_SIGN) && |
1646 | (server->secMode & | 1603 | (server->sec_mode & |
1647 | (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0) | 1604 | (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)) == 0) |
1648 | return false; | 1605 | return false; |
1649 | 1606 | ||
1650 | return true; | 1607 | return true; |
1651 | } | 1608 | } |
1652 | 1609 | ||
1610 | static int match_server(struct TCP_Server_Info *server, struct sockaddr *addr, | ||
1611 | struct smb_vol *vol) | ||
1612 | { | ||
1613 | if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) | ||
1614 | return 0; | ||
1615 | |||
1616 | if (!match_address(server, addr, | ||
1617 | (struct sockaddr *)&vol->srcaddr)) | ||
1618 | return 0; | ||
1619 | |||
1620 | if (!match_port(server, addr)) | ||
1621 | return 0; | ||
1622 | |||
1623 | if (!match_security(server, vol)) | ||
1624 | return 0; | ||
1625 | |||
1626 | return 1; | ||
1627 | } | ||
1628 | |||
1653 | static struct TCP_Server_Info * | 1629 | static struct TCP_Server_Info * |
1654 | cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol) | 1630 | cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol) |
1655 | { | 1631 | { |
@@ -1657,17 +1633,7 @@ cifs_find_tcp_session(struct sockaddr *addr, struct smb_vol *vol) | |||
1657 | 1633 | ||
1658 | spin_lock(&cifs_tcp_ses_lock); | 1634 | spin_lock(&cifs_tcp_ses_lock); |
1659 | list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { | 1635 | list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) { |
1660 | if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) | 1636 | if (!match_server(server, addr, vol)) |
1661 | continue; | ||
1662 | |||
1663 | if (!match_address(server, addr, | ||
1664 | (struct sockaddr *)&vol->srcaddr)) | ||
1665 | continue; | ||
1666 | |||
1667 | if (!match_port(server, addr)) | ||
1668 | continue; | ||
1669 | |||
1670 | if (!match_security(server, vol)) | ||
1671 | continue; | 1637 | continue; |
1672 | 1638 | ||
1673 | ++server->srv_count; | 1639 | ++server->srv_count; |
@@ -1861,32 +1827,39 @@ out_err: | |||
1861 | return ERR_PTR(rc); | 1827 | return ERR_PTR(rc); |
1862 | } | 1828 | } |
1863 | 1829 | ||
1864 | static struct cifsSesInfo * | 1830 | static int match_session(struct cifs_ses *ses, struct smb_vol *vol) |
1831 | { | ||
1832 | switch (ses->server->secType) { | ||
1833 | case Kerberos: | ||
1834 | if (vol->cred_uid != ses->cred_uid) | ||
1835 | return 0; | ||
1836 | break; | ||
1837 | default: | ||
1838 | /* anything else takes username/password */ | ||
1839 | if (ses->user_name == NULL) | ||
1840 | return 0; | ||
1841 | if (strncmp(ses->user_name, vol->username, | ||
1842 | MAX_USERNAME_SIZE)) | ||
1843 | return 0; | ||
1844 | if (strlen(vol->username) != 0 && | ||
1845 | ses->password != NULL && | ||
1846 | strncmp(ses->password, | ||
1847 | vol->password ? vol->password : "", | ||
1848 | MAX_PASSWORD_SIZE)) | ||
1849 | return 0; | ||
1850 | } | ||
1851 | return 1; | ||
1852 | } | ||
1853 | |||
1854 | static struct cifs_ses * | ||
1865 | cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) | 1855 | cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) |
1866 | { | 1856 | { |
1867 | struct cifsSesInfo *ses; | 1857 | struct cifs_ses *ses; |
1868 | 1858 | ||
1869 | spin_lock(&cifs_tcp_ses_lock); | 1859 | spin_lock(&cifs_tcp_ses_lock); |
1870 | list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { | 1860 | list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) { |
1871 | switch (server->secType) { | 1861 | if (!match_session(ses, vol)) |
1872 | case Kerberos: | 1862 | continue; |
1873 | if (vol->cred_uid != ses->cred_uid) | ||
1874 | continue; | ||
1875 | break; | ||
1876 | default: | ||
1877 | /* anything else takes username/password */ | ||
1878 | if (ses->user_name == NULL) | ||
1879 | continue; | ||
1880 | if (strncmp(ses->user_name, vol->username, | ||
1881 | MAX_USERNAME_SIZE)) | ||
1882 | continue; | ||
1883 | if (strlen(vol->username) != 0 && | ||
1884 | ses->password != NULL && | ||
1885 | strncmp(ses->password, | ||
1886 | vol->password ? vol->password : "", | ||
1887 | MAX_PASSWORD_SIZE)) | ||
1888 | continue; | ||
1889 | } | ||
1890 | ++ses->ses_count; | 1863 | ++ses->ses_count; |
1891 | spin_unlock(&cifs_tcp_ses_lock); | 1864 | spin_unlock(&cifs_tcp_ses_lock); |
1892 | return ses; | 1865 | return ses; |
@@ -1896,7 +1869,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb_vol *vol) | |||
1896 | } | 1869 | } |
1897 | 1870 | ||
1898 | static void | 1871 | static void |
1899 | cifs_put_smb_ses(struct cifsSesInfo *ses) | 1872 | cifs_put_smb_ses(struct cifs_ses *ses) |
1900 | { | 1873 | { |
1901 | int xid; | 1874 | int xid; |
1902 | struct TCP_Server_Info *server = ses->server; | 1875 | struct TCP_Server_Info *server = ses->server; |
@@ -1922,11 +1895,11 @@ cifs_put_smb_ses(struct cifsSesInfo *ses) | |||
1922 | 1895 | ||
1923 | static bool warned_on_ntlm; /* globals init to false automatically */ | 1896 | static bool warned_on_ntlm; /* globals init to false automatically */ |
1924 | 1897 | ||
1925 | static struct cifsSesInfo * | 1898 | static struct cifs_ses * |
1926 | cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) | 1899 | cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb_vol *volume_info) |
1927 | { | 1900 | { |
1928 | int rc = -ENOMEM, xid; | 1901 | int rc = -ENOMEM, xid; |
1929 | struct cifsSesInfo *ses; | 1902 | struct cifs_ses *ses; |
1930 | struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; | 1903 | struct sockaddr_in *addr = (struct sockaddr_in *)&server->dstaddr; |
1931 | struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; | 1904 | struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&server->dstaddr; |
1932 | 1905 | ||
@@ -2029,20 +2002,26 @@ get_ses_fail: | |||
2029 | return ERR_PTR(rc); | 2002 | return ERR_PTR(rc); |
2030 | } | 2003 | } |
2031 | 2004 | ||
2032 | static struct cifsTconInfo * | 2005 | static int match_tcon(struct cifs_tcon *tcon, const char *unc) |
2033 | cifs_find_tcon(struct cifsSesInfo *ses, const char *unc) | 2006 | { |
2007 | if (tcon->tidStatus == CifsExiting) | ||
2008 | return 0; | ||
2009 | if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE)) | ||
2010 | return 0; | ||
2011 | return 1; | ||
2012 | } | ||
2013 | |||
2014 | static struct cifs_tcon * | ||
2015 | cifs_find_tcon(struct cifs_ses *ses, const char *unc) | ||
2034 | { | 2016 | { |
2035 | struct list_head *tmp; | 2017 | struct list_head *tmp; |
2036 | struct cifsTconInfo *tcon; | 2018 | struct cifs_tcon *tcon; |
2037 | 2019 | ||
2038 | spin_lock(&cifs_tcp_ses_lock); | 2020 | spin_lock(&cifs_tcp_ses_lock); |
2039 | list_for_each(tmp, &ses->tcon_list) { | 2021 | list_for_each(tmp, &ses->tcon_list) { |
2040 | tcon = list_entry(tmp, struct cifsTconInfo, tcon_list); | 2022 | tcon = list_entry(tmp, struct cifs_tcon, tcon_list); |
2041 | if (tcon->tidStatus == CifsExiting) | 2023 | if (!match_tcon(tcon, unc)) |
2042 | continue; | ||
2043 | if (strncmp(tcon->treeName, unc, MAX_TREE_SIZE)) | ||
2044 | continue; | 2024 | continue; |
2045 | |||
2046 | ++tcon->tc_count; | 2025 | ++tcon->tc_count; |
2047 | spin_unlock(&cifs_tcp_ses_lock); | 2026 | spin_unlock(&cifs_tcp_ses_lock); |
2048 | return tcon; | 2027 | return tcon; |
@@ -2052,10 +2031,10 @@ cifs_find_tcon(struct cifsSesInfo *ses, const char *unc) | |||
2052 | } | 2031 | } |
2053 | 2032 | ||
2054 | static void | 2033 | static void |
2055 | cifs_put_tcon(struct cifsTconInfo *tcon) | 2034 | cifs_put_tcon(struct cifs_tcon *tcon) |
2056 | { | 2035 | { |
2057 | int xid; | 2036 | int xid; |
2058 | struct cifsSesInfo *ses = tcon->ses; | 2037 | struct cifs_ses *ses = tcon->ses; |
2059 | 2038 | ||
2060 | cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count); | 2039 | cFYI(1, "%s: tc_count=%d\n", __func__, tcon->tc_count); |
2061 | spin_lock(&cifs_tcp_ses_lock); | 2040 | spin_lock(&cifs_tcp_ses_lock); |
@@ -2076,11 +2055,11 @@ cifs_put_tcon(struct cifsTconInfo *tcon) | |||
2076 | cifs_put_smb_ses(ses); | 2055 | cifs_put_smb_ses(ses); |
2077 | } | 2056 | } |
2078 | 2057 | ||
2079 | static struct cifsTconInfo * | 2058 | static struct cifs_tcon * |
2080 | cifs_get_tcon(struct cifsSesInfo *ses, struct smb_vol *volume_info) | 2059 | cifs_get_tcon(struct cifs_ses *ses, struct smb_vol *volume_info) |
2081 | { | 2060 | { |
2082 | int rc, xid; | 2061 | int rc, xid; |
2083 | struct cifsTconInfo *tcon; | 2062 | struct cifs_tcon *tcon; |
2084 | 2063 | ||
2085 | tcon = cifs_find_tcon(ses, volume_info->UNC); | 2064 | tcon = cifs_find_tcon(ses, volume_info->UNC); |
2086 | if (tcon) { | 2065 | if (tcon) { |
@@ -2169,8 +2148,102 @@ cifs_put_tlink(struct tcon_link *tlink) | |||
2169 | return; | 2148 | return; |
2170 | } | 2149 | } |
2171 | 2150 | ||
2151 | static inline struct tcon_link * | ||
2152 | cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb); | ||
2153 | |||
2154 | static int | ||
2155 | compare_mount_options(struct super_block *sb, struct cifs_mnt_data *mnt_data) | ||
2156 | { | ||
2157 | struct cifs_sb_info *old = CIFS_SB(sb); | ||
2158 | struct cifs_sb_info *new = mnt_data->cifs_sb; | ||
2159 | |||
2160 | if ((sb->s_flags & CIFS_MS_MASK) != (mnt_data->flags & CIFS_MS_MASK)) | ||
2161 | return 0; | ||
2162 | |||
2163 | if ((old->mnt_cifs_flags & CIFS_MOUNT_MASK) != | ||
2164 | (new->mnt_cifs_flags & CIFS_MOUNT_MASK)) | ||
2165 | return 0; | ||
2166 | |||
2167 | if (old->rsize != new->rsize) | ||
2168 | return 0; | ||
2169 | |||
2170 | /* | ||
2171 | * We want to share sb only if we don't specify wsize or specified wsize | ||
2172 | * is greater or equal than existing one. | ||
2173 | */ | ||
2174 | if (new->wsize && new->wsize < old->wsize) | ||
2175 | return 0; | ||
2176 | |||
2177 | if (old->mnt_uid != new->mnt_uid || old->mnt_gid != new->mnt_gid) | ||
2178 | return 0; | ||
2179 | |||
2180 | if (old->mnt_file_mode != new->mnt_file_mode || | ||
2181 | old->mnt_dir_mode != new->mnt_dir_mode) | ||
2182 | return 0; | ||
2183 | |||
2184 | if (strcmp(old->local_nls->charset, new->local_nls->charset)) | ||
2185 | return 0; | ||
2186 | |||
2187 | if (old->actimeo != new->actimeo) | ||
2188 | return 0; | ||
2189 | |||
2190 | return 1; | ||
2191 | } | ||
2192 | |||
2172 | int | 2193 | int |
2173 | get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path, | 2194 | cifs_match_super(struct super_block *sb, void *data) |
2195 | { | ||
2196 | struct cifs_mnt_data *mnt_data = (struct cifs_mnt_data *)data; | ||
2197 | struct smb_vol *volume_info; | ||
2198 | struct cifs_sb_info *cifs_sb; | ||
2199 | struct TCP_Server_Info *tcp_srv; | ||
2200 | struct cifs_ses *ses; | ||
2201 | struct cifs_tcon *tcon; | ||
2202 | struct tcon_link *tlink; | ||
2203 | struct sockaddr_storage addr; | ||
2204 | int rc = 0; | ||
2205 | |||
2206 | memset(&addr, 0, sizeof(struct sockaddr_storage)); | ||
2207 | |||
2208 | spin_lock(&cifs_tcp_ses_lock); | ||
2209 | cifs_sb = CIFS_SB(sb); | ||
2210 | tlink = cifs_get_tlink(cifs_sb_master_tlink(cifs_sb)); | ||
2211 | if (IS_ERR(tlink)) { | ||
2212 | spin_unlock(&cifs_tcp_ses_lock); | ||
2213 | return rc; | ||
2214 | } | ||
2215 | tcon = tlink_tcon(tlink); | ||
2216 | ses = tcon->ses; | ||
2217 | tcp_srv = ses->server; | ||
2218 | |||
2219 | volume_info = mnt_data->vol; | ||
2220 | |||
2221 | if (!volume_info->UNCip || !volume_info->UNC) | ||
2222 | goto out; | ||
2223 | |||
2224 | rc = cifs_fill_sockaddr((struct sockaddr *)&addr, | ||
2225 | volume_info->UNCip, | ||
2226 | strlen(volume_info->UNCip), | ||
2227 | volume_info->port); | ||
2228 | if (!rc) | ||
2229 | goto out; | ||
2230 | |||
2231 | if (!match_server(tcp_srv, (struct sockaddr *)&addr, volume_info) || | ||
2232 | !match_session(ses, volume_info) || | ||
2233 | !match_tcon(tcon, volume_info->UNC)) { | ||
2234 | rc = 0; | ||
2235 | goto out; | ||
2236 | } | ||
2237 | |||
2238 | rc = compare_mount_options(sb, mnt_data); | ||
2239 | out: | ||
2240 | cifs_put_tlink(tlink); | ||
2241 | spin_unlock(&cifs_tcp_ses_lock); | ||
2242 | return rc; | ||
2243 | } | ||
2244 | |||
2245 | int | ||
2246 | get_dfs_path(int xid, struct cifs_ses *pSesInfo, const char *old_path, | ||
2174 | const struct nls_table *nls_codepage, unsigned int *pnum_referrals, | 2247 | const struct nls_table *nls_codepage, unsigned int *pnum_referrals, |
2175 | struct dfs_info3_param **preferrals, int remap) | 2248 | struct dfs_info3_param **preferrals, int remap) |
2176 | { | 2249 | { |
@@ -2469,7 +2542,7 @@ ip_connect(struct TCP_Server_Info *server) | |||
2469 | return generic_ip_connect(server); | 2542 | return generic_ip_connect(server); |
2470 | } | 2543 | } |
2471 | 2544 | ||
2472 | void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | 2545 | void reset_cifs_unix_caps(int xid, struct cifs_tcon *tcon, |
2473 | struct super_block *sb, struct smb_vol *vol_info) | 2546 | struct super_block *sb, struct smb_vol *vol_info) |
2474 | { | 2547 | { |
2475 | /* if we are reconnecting then should we check to see if | 2548 | /* if we are reconnecting then should we check to see if |
@@ -2498,7 +2571,7 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | |||
2498 | 2571 | ||
2499 | if (!CIFSSMBQFSUnixInfo(xid, tcon)) { | 2572 | if (!CIFSSMBQFSUnixInfo(xid, tcon)) { |
2500 | __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); | 2573 | __u64 cap = le64_to_cpu(tcon->fsUnixInfo.Capability); |
2501 | 2574 | cFYI(1, "unix caps which server supports %lld", cap); | |
2502 | /* check for reconnect case in which we do not | 2575 | /* check for reconnect case in which we do not |
2503 | want to change the mount behavior if we can avoid it */ | 2576 | want to change the mount behavior if we can avoid it */ |
2504 | if (vol_info == NULL) { | 2577 | if (vol_info == NULL) { |
@@ -2516,6 +2589,9 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | |||
2516 | } | 2589 | } |
2517 | } | 2590 | } |
2518 | 2591 | ||
2592 | if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) | ||
2593 | cERROR(1, "per-share encryption not supported yet"); | ||
2594 | |||
2519 | cap &= CIFS_UNIX_CAP_MASK; | 2595 | cap &= CIFS_UNIX_CAP_MASK; |
2520 | if (vol_info && vol_info->no_psx_acl) | 2596 | if (vol_info && vol_info->no_psx_acl) |
2521 | cap &= ~CIFS_UNIX_POSIX_ACL_CAP; | 2597 | cap &= ~CIFS_UNIX_POSIX_ACL_CAP; |
@@ -2534,12 +2610,6 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | |||
2534 | CIFS_MOUNT_POSIX_PATHS; | 2610 | CIFS_MOUNT_POSIX_PATHS; |
2535 | } | 2611 | } |
2536 | 2612 | ||
2537 | /* We might be setting the path sep back to a different | ||
2538 | form if we are reconnecting and the server switched its | ||
2539 | posix path capability for this share */ | ||
2540 | if (sb && (CIFS_SB(sb)->prepathlen > 0)) | ||
2541 | CIFS_SB(sb)->prepath[0] = CIFS_DIR_SEP(CIFS_SB(sb)); | ||
2542 | |||
2543 | if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) { | 2613 | if (sb && (CIFS_SB(sb)->rsize > 127 * 1024)) { |
2544 | if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { | 2614 | if ((cap & CIFS_UNIX_LARGE_READ_CAP) == 0) { |
2545 | CIFS_SB(sb)->rsize = 127 * 1024; | 2615 | CIFS_SB(sb)->rsize = 127 * 1024; |
@@ -2564,6 +2634,10 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | |||
2564 | cFYI(1, "very large read cap"); | 2634 | cFYI(1, "very large read cap"); |
2565 | if (cap & CIFS_UNIX_LARGE_WRITE_CAP) | 2635 | if (cap & CIFS_UNIX_LARGE_WRITE_CAP) |
2566 | cFYI(1, "very large write cap"); | 2636 | cFYI(1, "very large write cap"); |
2637 | if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_CAP) | ||
2638 | cFYI(1, "transport encryption cap"); | ||
2639 | if (cap & CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP) | ||
2640 | cFYI(1, "mandatory transport encryption cap"); | ||
2567 | #endif /* CIFS_DEBUG2 */ | 2641 | #endif /* CIFS_DEBUG2 */ |
2568 | if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { | 2642 | if (CIFSSMBSetFSUnixInfo(xid, tcon, cap)) { |
2569 | if (vol_info == NULL) { | 2643 | if (vol_info == NULL) { |
@@ -2580,28 +2654,8 @@ void reset_cifs_unix_caps(int xid, struct cifsTconInfo *tcon, | |||
2580 | } | 2654 | } |
2581 | } | 2655 | } |
2582 | 2656 | ||
2583 | static void | 2657 | void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, |
2584 | convert_delimiter(char *path, char delim) | 2658 | struct cifs_sb_info *cifs_sb) |
2585 | { | ||
2586 | int i; | ||
2587 | char old_delim; | ||
2588 | |||
2589 | if (path == NULL) | ||
2590 | return; | ||
2591 | |||
2592 | if (delim == '/') | ||
2593 | old_delim = '\\'; | ||
2594 | else | ||
2595 | old_delim = '/'; | ||
2596 | |||
2597 | for (i = 0; path[i] != '\0'; i++) { | ||
2598 | if (path[i] == old_delim) | ||
2599 | path[i] = delim; | ||
2600 | } | ||
2601 | } | ||
2602 | |||
2603 | static void setup_cifs_sb(struct smb_vol *pvolume_info, | ||
2604 | struct cifs_sb_info *cifs_sb) | ||
2605 | { | 2659 | { |
2606 | INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); | 2660 | INIT_DELAYED_WORK(&cifs_sb->prune_tlinks, cifs_prune_tlinks); |
2607 | 2661 | ||
@@ -2615,40 +2669,19 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info, | |||
2615 | else /* default */ | 2669 | else /* default */ |
2616 | cifs_sb->rsize = CIFSMaxBufSize; | 2670 | cifs_sb->rsize = CIFSMaxBufSize; |
2617 | 2671 | ||
2618 | if (pvolume_info->wsize > PAGEVEC_SIZE * PAGE_CACHE_SIZE) { | ||
2619 | cERROR(1, "wsize %d too large, using 4096 instead", | ||
2620 | pvolume_info->wsize); | ||
2621 | cifs_sb->wsize = 4096; | ||
2622 | } else if (pvolume_info->wsize) | ||
2623 | cifs_sb->wsize = pvolume_info->wsize; | ||
2624 | else | ||
2625 | cifs_sb->wsize = min_t(const int, | ||
2626 | PAGEVEC_SIZE * PAGE_CACHE_SIZE, | ||
2627 | 127*1024); | ||
2628 | /* old default of CIFSMaxBufSize was too small now | ||
2629 | that SMB Write2 can send multiple pages in kvec. | ||
2630 | RFC1001 does not describe what happens when frame | ||
2631 | bigger than 128K is sent so use that as max in | ||
2632 | conjunction with 52K kvec constraint on arch with 4K | ||
2633 | page size */ | ||
2634 | |||
2635 | if (cifs_sb->rsize < 2048) { | 2672 | if (cifs_sb->rsize < 2048) { |
2636 | cifs_sb->rsize = 2048; | 2673 | cifs_sb->rsize = 2048; |
2637 | /* Windows ME may prefer this */ | 2674 | /* Windows ME may prefer this */ |
2638 | cFYI(1, "readsize set to minimum: 2048"); | 2675 | cFYI(1, "readsize set to minimum: 2048"); |
2639 | } | 2676 | } |
2640 | /* calculate prepath */ | 2677 | |
2641 | cifs_sb->prepath = pvolume_info->prepath; | 2678 | /* |
2642 | if (cifs_sb->prepath) { | 2679 | * Temporarily set wsize for matching superblock. If we end up using |
2643 | cifs_sb->prepathlen = strlen(cifs_sb->prepath); | 2680 | * new sb then cifs_negotiate_wsize will later negotiate it downward |
2644 | /* we can not convert the / to \ in the path | 2681 | * if needed. |
2645 | separators in the prefixpath yet because we do not | 2682 | */ |
2646 | know (until reset_cifs_unix_caps is called later) | 2683 | cifs_sb->wsize = pvolume_info->wsize; |
2647 | whether POSIX PATH CAP is available. We normalize | 2684 | |
2648 | the / to \ after reset_cifs_unix_caps is called */ | ||
2649 | pvolume_info->prepath = NULL; | ||
2650 | } else | ||
2651 | cifs_sb->prepathlen = 0; | ||
2652 | cifs_sb->mnt_uid = pvolume_info->linux_uid; | 2685 | cifs_sb->mnt_uid = pvolume_info->linux_uid; |
2653 | cifs_sb->mnt_gid = pvolume_info->linux_gid; | 2686 | cifs_sb->mnt_gid = pvolume_info->linux_gid; |
2654 | cifs_sb->mnt_file_mode = pvolume_info->file_mode; | 2687 | cifs_sb->mnt_file_mode = pvolume_info->file_mode; |
@@ -2657,6 +2690,7 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info, | |||
2657 | cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); | 2690 | cifs_sb->mnt_file_mode, cifs_sb->mnt_dir_mode); |
2658 | 2691 | ||
2659 | cifs_sb->actimeo = pvolume_info->actimeo; | 2692 | cifs_sb->actimeo = pvolume_info->actimeo; |
2693 | cifs_sb->local_nls = pvolume_info->local_nls; | ||
2660 | 2694 | ||
2661 | if (pvolume_info->noperm) | 2695 | if (pvolume_info->noperm) |
2662 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; | 2696 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NO_PERM; |
@@ -2676,6 +2710,8 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info, | |||
2676 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC; | 2710 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOSSYNC; |
2677 | if (pvolume_info->mand_lock) | 2711 | if (pvolume_info->mand_lock) |
2678 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL; | 2712 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_NOPOSIXBRL; |
2713 | if (pvolume_info->rwpidforward) | ||
2714 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_RWPIDFORWARD; | ||
2679 | if (pvolume_info->cifs_acl) | 2715 | if (pvolume_info->cifs_acl) |
2680 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; | 2716 | cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_CIFS_ACL; |
2681 | if (pvolume_info->override_uid) | 2717 | if (pvolume_info->override_uid) |
@@ -2709,8 +2745,55 @@ static void setup_cifs_sb(struct smb_vol *pvolume_info, | |||
2709 | "mount option supported"); | 2745 | "mount option supported"); |
2710 | } | 2746 | } |
2711 | 2747 | ||
2748 | /* | ||
2749 | * When the server supports very large writes via POSIX extensions, we can | ||
2750 | * allow up to 2^24 - PAGE_CACHE_SIZE. | ||
2751 | * | ||
2752 | * Note that this might make for "interesting" allocation problems during | ||
2753 | * writeback however (as we have to allocate an array of pointers for the | ||
2754 | * pages). A 16M write means ~32kb page array with PAGE_CACHE_SIZE == 4096. | ||
2755 | */ | ||
2756 | #define CIFS_MAX_WSIZE ((1<<24) - PAGE_CACHE_SIZE) | ||
2757 | |||
2758 | /* | ||
2759 | * When the server doesn't allow large posix writes, default to a wsize of | ||
2760 | * 128k - PAGE_CACHE_SIZE -- one page less than the largest frame size | ||
2761 | * described in RFC1001. This allows space for the header without going over | ||
2762 | * that by default. | ||
2763 | */ | ||
2764 | #define CIFS_MAX_RFC1001_WSIZE (128 * 1024 - PAGE_CACHE_SIZE) | ||
2765 | |||
2766 | /* | ||
2767 | * The default wsize is 1M. find_get_pages seems to return a maximum of 256 | ||
2768 | * pages in a single call. With PAGE_CACHE_SIZE == 4k, this means we can fill | ||
2769 | * a single wsize request with a single call. | ||
2770 | */ | ||
2771 | #define CIFS_DEFAULT_WSIZE (1024 * 1024) | ||
2772 | |||
2773 | static unsigned int | ||
2774 | cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *pvolume_info) | ||
2775 | { | ||
2776 | __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); | ||
2777 | struct TCP_Server_Info *server = tcon->ses->server; | ||
2778 | unsigned int wsize = pvolume_info->wsize ? pvolume_info->wsize : | ||
2779 | CIFS_DEFAULT_WSIZE; | ||
2780 | |||
2781 | /* can server support 24-bit write sizes? (via UNIX extensions) */ | ||
2782 | if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) | ||
2783 | wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1001_WSIZE); | ||
2784 | |||
2785 | /* no CAP_LARGE_WRITE_X? Limit it to 16 bits */ | ||
2786 | if (!(server->capabilities & CAP_LARGE_WRITE_X)) | ||
2787 | wsize = min_t(unsigned int, wsize, USHRT_MAX); | ||
2788 | |||
2789 | /* hard limit of CIFS_MAX_WSIZE */ | ||
2790 | wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE); | ||
2791 | |||
2792 | return wsize; | ||
2793 | } | ||
2794 | |||
2712 | static int | 2795 | static int |
2713 | is_path_accessible(int xid, struct cifsTconInfo *tcon, | 2796 | is_path_accessible(int xid, struct cifs_tcon *tcon, |
2714 | struct cifs_sb_info *cifs_sb, const char *full_path) | 2797 | struct cifs_sb_info *cifs_sb, const char *full_path) |
2715 | { | 2798 | { |
2716 | int rc; | 2799 | int rc; |
@@ -2733,8 +2816,8 @@ is_path_accessible(int xid, struct cifsTconInfo *tcon, | |||
2733 | return rc; | 2816 | return rc; |
2734 | } | 2817 | } |
2735 | 2818 | ||
2736 | static void | 2819 | void |
2737 | cleanup_volume_info(struct smb_vol **pvolume_info) | 2820 | cifs_cleanup_volume_info(struct smb_vol **pvolume_info) |
2738 | { | 2821 | { |
2739 | struct smb_vol *volume_info; | 2822 | struct smb_vol *volume_info; |
2740 | 2823 | ||
@@ -2764,24 +2847,13 @@ build_unc_path_to_root(const struct smb_vol *volume_info, | |||
2764 | char *full_path; | 2847 | char *full_path; |
2765 | 2848 | ||
2766 | int unc_len = strnlen(volume_info->UNC, MAX_TREE_SIZE + 1); | 2849 | int unc_len = strnlen(volume_info->UNC, MAX_TREE_SIZE + 1); |
2767 | full_path = kmalloc(unc_len + cifs_sb->prepathlen + 1, GFP_KERNEL); | 2850 | full_path = kmalloc(unc_len + 1, GFP_KERNEL); |
2768 | if (full_path == NULL) | 2851 | if (full_path == NULL) |
2769 | return ERR_PTR(-ENOMEM); | 2852 | return ERR_PTR(-ENOMEM); |
2770 | 2853 | ||
2771 | strncpy(full_path, volume_info->UNC, unc_len); | 2854 | strncpy(full_path, volume_info->UNC, unc_len); |
2772 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) { | 2855 | full_path[unc_len] = 0; /* add trailing null */ |
2773 | int i; | 2856 | convert_delimiter(full_path, CIFS_DIR_SEP(cifs_sb)); |
2774 | for (i = 0; i < unc_len; i++) { | ||
2775 | if (full_path[i] == '\\') | ||
2776 | full_path[i] = '/'; | ||
2777 | } | ||
2778 | } | ||
2779 | |||
2780 | if (cifs_sb->prepathlen) | ||
2781 | strncpy(full_path + unc_len, cifs_sb->prepath, | ||
2782 | cifs_sb->prepathlen); | ||
2783 | |||
2784 | full_path[unc_len + cifs_sb->prepathlen] = 0; /* add trailing null */ | ||
2785 | return full_path; | 2857 | return full_path; |
2786 | } | 2858 | } |
2787 | 2859 | ||
@@ -2796,7 +2868,7 @@ build_unc_path_to_root(const struct smb_vol *volume_info, | |||
2796 | * determine whether there were referrals. | 2868 | * determine whether there were referrals. |
2797 | */ | 2869 | */ |
2798 | static int | 2870 | static int |
2799 | expand_dfs_referral(int xid, struct cifsSesInfo *pSesInfo, | 2871 | expand_dfs_referral(int xid, struct cifs_ses *pSesInfo, |
2800 | struct smb_vol *volume_info, struct cifs_sb_info *cifs_sb, | 2872 | struct smb_vol *volume_info, struct cifs_sb_info *cifs_sb, |
2801 | int check_prefix) | 2873 | int check_prefix) |
2802 | { | 2874 | { |
@@ -2840,40 +2912,13 @@ expand_dfs_referral(int xid, struct cifsSesInfo *pSesInfo, | |||
2840 | } | 2912 | } |
2841 | #endif | 2913 | #endif |
2842 | 2914 | ||
2843 | int | 2915 | int cifs_setup_volume_info(struct smb_vol **pvolume_info, char *mount_data, |
2844 | cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | 2916 | const char *devname) |
2845 | const char *devname) | ||
2846 | { | 2917 | { |
2847 | int rc; | ||
2848 | int xid; | ||
2849 | struct smb_vol *volume_info; | 2918 | struct smb_vol *volume_info; |
2850 | struct cifsSesInfo *pSesInfo; | 2919 | int rc = 0; |
2851 | struct cifsTconInfo *tcon; | ||
2852 | struct TCP_Server_Info *srvTcp; | ||
2853 | char *full_path; | ||
2854 | struct tcon_link *tlink; | ||
2855 | #ifdef CONFIG_CIFS_DFS_UPCALL | ||
2856 | int referral_walks_count = 0; | ||
2857 | try_mount_again: | ||
2858 | /* cleanup activities if we're chasing a referral */ | ||
2859 | if (referral_walks_count) { | ||
2860 | if (tcon) | ||
2861 | cifs_put_tcon(tcon); | ||
2862 | else if (pSesInfo) | ||
2863 | cifs_put_smb_ses(pSesInfo); | ||
2864 | |||
2865 | cleanup_volume_info(&volume_info); | ||
2866 | FreeXid(xid); | ||
2867 | } | ||
2868 | #endif | ||
2869 | rc = 0; | ||
2870 | tcon = NULL; | ||
2871 | pSesInfo = NULL; | ||
2872 | srvTcp = NULL; | ||
2873 | full_path = NULL; | ||
2874 | tlink = NULL; | ||
2875 | 2920 | ||
2876 | xid = GetXid(); | 2921 | *pvolume_info = NULL; |
2877 | 2922 | ||
2878 | volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL); | 2923 | volume_info = kzalloc(sizeof(struct smb_vol), GFP_KERNEL); |
2879 | if (!volume_info) { | 2924 | if (!volume_info) { |
@@ -2881,7 +2926,7 @@ try_mount_again: | |||
2881 | goto out; | 2926 | goto out; |
2882 | } | 2927 | } |
2883 | 2928 | ||
2884 | if (cifs_parse_mount_options(cifs_sb->mountdata, devname, | 2929 | if (cifs_parse_mount_options(mount_data, devname, |
2885 | volume_info)) { | 2930 | volume_info)) { |
2886 | rc = -EINVAL; | 2931 | rc = -EINVAL; |
2887 | goto out; | 2932 | goto out; |
@@ -2914,7 +2959,46 @@ try_mount_again: | |||
2914 | goto out; | 2959 | goto out; |
2915 | } | 2960 | } |
2916 | } | 2961 | } |
2917 | cifs_sb->local_nls = volume_info->local_nls; | 2962 | |
2963 | *pvolume_info = volume_info; | ||
2964 | return rc; | ||
2965 | out: | ||
2966 | cifs_cleanup_volume_info(&volume_info); | ||
2967 | return rc; | ||
2968 | } | ||
2969 | |||
2970 | int | ||
2971 | cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb, | ||
2972 | struct smb_vol *volume_info, const char *devname) | ||
2973 | { | ||
2974 | int rc = 0; | ||
2975 | int xid; | ||
2976 | struct cifs_ses *pSesInfo; | ||
2977 | struct cifs_tcon *tcon; | ||
2978 | struct TCP_Server_Info *srvTcp; | ||
2979 | char *full_path; | ||
2980 | struct tcon_link *tlink; | ||
2981 | #ifdef CONFIG_CIFS_DFS_UPCALL | ||
2982 | int referral_walks_count = 0; | ||
2983 | try_mount_again: | ||
2984 | /* cleanup activities if we're chasing a referral */ | ||
2985 | if (referral_walks_count) { | ||
2986 | if (tcon) | ||
2987 | cifs_put_tcon(tcon); | ||
2988 | else if (pSesInfo) | ||
2989 | cifs_put_smb_ses(pSesInfo); | ||
2990 | |||
2991 | cifs_cleanup_volume_info(&volume_info); | ||
2992 | FreeXid(xid); | ||
2993 | } | ||
2994 | #endif | ||
2995 | tcon = NULL; | ||
2996 | pSesInfo = NULL; | ||
2997 | srvTcp = NULL; | ||
2998 | full_path = NULL; | ||
2999 | tlink = NULL; | ||
3000 | |||
3001 | xid = GetXid(); | ||
2918 | 3002 | ||
2919 | /* get a reference to a tcp session */ | 3003 | /* get a reference to a tcp session */ |
2920 | srvTcp = cifs_get_tcp_session(volume_info); | 3004 | srvTcp = cifs_get_tcp_session(volume_info); |
@@ -2931,7 +3015,6 @@ try_mount_again: | |||
2931 | goto mount_fail_check; | 3015 | goto mount_fail_check; |
2932 | } | 3016 | } |
2933 | 3017 | ||
2934 | setup_cifs_sb(volume_info, cifs_sb); | ||
2935 | if (pSesInfo->capabilities & CAP_LARGE_FILES) | 3018 | if (pSesInfo->capabilities & CAP_LARGE_FILES) |
2936 | sb->s_maxbytes = MAX_LFS_FILESIZE; | 3019 | sb->s_maxbytes = MAX_LFS_FILESIZE; |
2937 | else | 3020 | else |
@@ -2948,35 +3031,36 @@ try_mount_again: | |||
2948 | goto remote_path_check; | 3031 | goto remote_path_check; |
2949 | } | 3032 | } |
2950 | 3033 | ||
2951 | /* do not care if following two calls succeed - informational */ | ||
2952 | if (!tcon->ipc) { | ||
2953 | CIFSSMBQFSDeviceInfo(xid, tcon); | ||
2954 | CIFSSMBQFSAttributeInfo(xid, tcon); | ||
2955 | } | ||
2956 | |||
2957 | /* tell server which Unix caps we support */ | 3034 | /* tell server which Unix caps we support */ |
2958 | if (tcon->ses->capabilities & CAP_UNIX) | 3035 | if (tcon->ses->capabilities & CAP_UNIX) { |
2959 | /* reset of caps checks mount to see if unix extensions | 3036 | /* reset of caps checks mount to see if unix extensions |
2960 | disabled for just this mount */ | 3037 | disabled for just this mount */ |
2961 | reset_cifs_unix_caps(xid, tcon, sb, volume_info); | 3038 | reset_cifs_unix_caps(xid, tcon, sb, volume_info); |
2962 | else | 3039 | if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) && |
3040 | (le64_to_cpu(tcon->fsUnixInfo.Capability) & | ||
3041 | CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) { | ||
3042 | rc = -EACCES; | ||
3043 | goto mount_fail_check; | ||
3044 | } | ||
3045 | } else | ||
2963 | tcon->unix_ext = 0; /* server does not support them */ | 3046 | tcon->unix_ext = 0; /* server does not support them */ |
2964 | 3047 | ||
2965 | /* convert forward to back slashes in prepath here if needed */ | 3048 | /* do not care if following two calls succeed - informational */ |
2966 | if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0) | 3049 | if (!tcon->ipc) { |
2967 | convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb)); | 3050 | CIFSSMBQFSDeviceInfo(xid, tcon); |
3051 | CIFSSMBQFSAttributeInfo(xid, tcon); | ||
3052 | } | ||
2968 | 3053 | ||
2969 | if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) { | 3054 | if ((tcon->unix_ext == 0) && (cifs_sb->rsize > (1024 * 127))) { |
2970 | cifs_sb->rsize = 1024 * 127; | 3055 | cifs_sb->rsize = 1024 * 127; |
2971 | cFYI(DBG2, "no very large read support, rsize now 127K"); | 3056 | cFYI(DBG2, "no very large read support, rsize now 127K"); |
2972 | } | 3057 | } |
2973 | if (!(tcon->ses->capabilities & CAP_LARGE_WRITE_X)) | ||
2974 | cifs_sb->wsize = min(cifs_sb->wsize, | ||
2975 | (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)); | ||
2976 | if (!(tcon->ses->capabilities & CAP_LARGE_READ_X)) | 3058 | if (!(tcon->ses->capabilities & CAP_LARGE_READ_X)) |
2977 | cifs_sb->rsize = min(cifs_sb->rsize, | 3059 | cifs_sb->rsize = min(cifs_sb->rsize, |
2978 | (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)); | 3060 | (tcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE)); |
2979 | 3061 | ||
3062 | cifs_sb->wsize = cifs_negotiate_wsize(tcon, volume_info); | ||
3063 | |||
2980 | remote_path_check: | 3064 | remote_path_check: |
2981 | #ifdef CONFIG_CIFS_DFS_UPCALL | 3065 | #ifdef CONFIG_CIFS_DFS_UPCALL |
2982 | /* | 3066 | /* |
@@ -2996,10 +3080,10 @@ remote_path_check: | |||
2996 | } | 3080 | } |
2997 | #endif | 3081 | #endif |
2998 | 3082 | ||
2999 | /* check if a whole path (including prepath) is not remote */ | 3083 | /* check if a whole path is not remote */ |
3000 | if (!rc && tcon) { | 3084 | if (!rc && tcon) { |
3001 | /* build_path_to_root works only when we have a valid tcon */ | 3085 | /* build_path_to_root works only when we have a valid tcon */ |
3002 | full_path = cifs_build_path_to_root(cifs_sb, tcon); | 3086 | full_path = cifs_build_path_to_root(volume_info, cifs_sb, tcon); |
3003 | if (full_path == NULL) { | 3087 | if (full_path == NULL) { |
3004 | rc = -ENOMEM; | 3088 | rc = -ENOMEM; |
3005 | goto mount_fail_check; | 3089 | goto mount_fail_check; |
@@ -3025,10 +3109,6 @@ remote_path_check: | |||
3025 | rc = -ELOOP; | 3109 | rc = -ELOOP; |
3026 | goto mount_fail_check; | 3110 | goto mount_fail_check; |
3027 | } | 3111 | } |
3028 | /* convert forward to back slashes in prepath here if needed */ | ||
3029 | if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) == 0) | ||
3030 | convert_delimiter(cifs_sb->prepath, | ||
3031 | CIFS_DIR_SEP(cifs_sb)); | ||
3032 | 3112 | ||
3033 | rc = expand_dfs_referral(xid, pSesInfo, volume_info, cifs_sb, | 3113 | rc = expand_dfs_referral(xid, pSesInfo, volume_info, cifs_sb, |
3034 | true); | 3114 | true); |
@@ -3087,14 +3167,13 @@ mount_fail_check: | |||
3087 | password will be freed at unmount time) */ | 3167 | password will be freed at unmount time) */ |
3088 | out: | 3168 | out: |
3089 | /* zero out password before freeing */ | 3169 | /* zero out password before freeing */ |
3090 | cleanup_volume_info(&volume_info); | ||
3091 | FreeXid(xid); | 3170 | FreeXid(xid); |
3092 | return rc; | 3171 | return rc; |
3093 | } | 3172 | } |
3094 | 3173 | ||
3095 | int | 3174 | int |
3096 | CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | 3175 | CIFSTCon(unsigned int xid, struct cifs_ses *ses, |
3097 | const char *tree, struct cifsTconInfo *tcon, | 3176 | const char *tree, struct cifs_tcon *tcon, |
3098 | const struct nls_table *nls_codepage) | 3177 | const struct nls_table *nls_codepage) |
3099 | { | 3178 | { |
3100 | struct smb_hdr *smb_buffer; | 3179 | struct smb_hdr *smb_buffer; |
@@ -3126,7 +3205,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | |||
3126 | pSMB->AndXCommand = 0xFF; | 3205 | pSMB->AndXCommand = 0xFF; |
3127 | pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); | 3206 | pSMB->Flags = cpu_to_le16(TCON_EXTENDED_SECINFO); |
3128 | bcc_ptr = &pSMB->Password[0]; | 3207 | bcc_ptr = &pSMB->Password[0]; |
3129 | if ((ses->server->secMode) & SECMODE_USER) { | 3208 | if ((ses->server->sec_mode) & SECMODE_USER) { |
3130 | pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ | 3209 | pSMB->PasswordLength = cpu_to_le16(1); /* minimum */ |
3131 | *bcc_ptr = 0; /* password is null byte */ | 3210 | *bcc_ptr = 0; /* password is null byte */ |
3132 | bcc_ptr++; /* skip password */ | 3211 | bcc_ptr++; /* skip password */ |
@@ -3143,7 +3222,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | |||
3143 | if ((global_secflags & CIFSSEC_MAY_LANMAN) && | 3222 | if ((global_secflags & CIFSSEC_MAY_LANMAN) && |
3144 | (ses->server->secType == LANMAN)) | 3223 | (ses->server->secType == LANMAN)) |
3145 | calc_lanman_hash(tcon->password, ses->server->cryptkey, | 3224 | calc_lanman_hash(tcon->password, ses->server->cryptkey, |
3146 | ses->server->secMode & | 3225 | ses->server->sec_mode & |
3147 | SECMODE_PW_ENCRYPT ? true : false, | 3226 | SECMODE_PW_ENCRYPT ? true : false, |
3148 | bcc_ptr); | 3227 | bcc_ptr); |
3149 | else | 3228 | else |
@@ -3159,7 +3238,7 @@ CIFSTCon(unsigned int xid, struct cifsSesInfo *ses, | |||
3159 | } | 3238 | } |
3160 | } | 3239 | } |
3161 | 3240 | ||
3162 | if (ses->server->secMode & | 3241 | if (ses->server->sec_mode & |
3163 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | 3242 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) |
3164 | smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; | 3243 | smb_buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; |
3165 | 3244 | ||
@@ -3255,7 +3334,6 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) | |||
3255 | struct rb_root *root = &cifs_sb->tlink_tree; | 3334 | struct rb_root *root = &cifs_sb->tlink_tree; |
3256 | struct rb_node *node; | 3335 | struct rb_node *node; |
3257 | struct tcon_link *tlink; | 3336 | struct tcon_link *tlink; |
3258 | char *tmp; | ||
3259 | 3337 | ||
3260 | cancel_delayed_work_sync(&cifs_sb->prune_tlinks); | 3338 | cancel_delayed_work_sync(&cifs_sb->prune_tlinks); |
3261 | 3339 | ||
@@ -3272,15 +3350,10 @@ cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb) | |||
3272 | } | 3350 | } |
3273 | spin_unlock(&cifs_sb->tlink_tree_lock); | 3351 | spin_unlock(&cifs_sb->tlink_tree_lock); |
3274 | 3352 | ||
3275 | tmp = cifs_sb->prepath; | ||
3276 | cifs_sb->prepathlen = 0; | ||
3277 | cifs_sb->prepath = NULL; | ||
3278 | kfree(tmp); | ||
3279 | |||
3280 | return 0; | 3353 | return 0; |
3281 | } | 3354 | } |
3282 | 3355 | ||
3283 | int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses) | 3356 | int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses) |
3284 | { | 3357 | { |
3285 | int rc = 0; | 3358 | int rc = 0; |
3286 | struct TCP_Server_Info *server = ses->server; | 3359 | struct TCP_Server_Info *server = ses->server; |
@@ -3310,7 +3383,7 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifsSesInfo *ses) | |||
3310 | } | 3383 | } |
3311 | 3384 | ||
3312 | 3385 | ||
3313 | int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses, | 3386 | int cifs_setup_session(unsigned int xid, struct cifs_ses *ses, |
3314 | struct nls_table *nls_info) | 3387 | struct nls_table *nls_info) |
3315 | { | 3388 | { |
3316 | int rc = 0; | 3389 | int rc = 0; |
@@ -3322,7 +3395,7 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses, | |||
3322 | ses->capabilities &= (~CAP_UNIX); | 3395 | ses->capabilities &= (~CAP_UNIX); |
3323 | 3396 | ||
3324 | cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d", | 3397 | cFYI(1, "Security Mode: 0x%x Capabilities: 0x%x TimeAdjust: %d", |
3325 | server->secMode, server->capabilities, server->timeAdj); | 3398 | server->sec_mode, server->capabilities, server->timeAdj); |
3326 | 3399 | ||
3327 | rc = CIFS_SessSetup(xid, ses, nls_info); | 3400 | rc = CIFS_SessSetup(xid, ses, nls_info); |
3328 | if (rc) { | 3401 | if (rc) { |
@@ -3354,12 +3427,12 @@ int cifs_setup_session(unsigned int xid, struct cifsSesInfo *ses, | |||
3354 | return rc; | 3427 | return rc; |
3355 | } | 3428 | } |
3356 | 3429 | ||
3357 | static struct cifsTconInfo * | 3430 | static struct cifs_tcon * |
3358 | cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid) | 3431 | cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid) |
3359 | { | 3432 | { |
3360 | struct cifsTconInfo *master_tcon = cifs_sb_master_tcon(cifs_sb); | 3433 | struct cifs_tcon *master_tcon = cifs_sb_master_tcon(cifs_sb); |
3361 | struct cifsSesInfo *ses; | 3434 | struct cifs_ses *ses; |
3362 | struct cifsTconInfo *tcon = NULL; | 3435 | struct cifs_tcon *tcon = NULL; |
3363 | struct smb_vol *vol_info; | 3436 | struct smb_vol *vol_info; |
3364 | char username[28]; /* big enough for "krb50x" + hex of ULONG_MAX 6+16 */ | 3437 | char username[28]; /* big enough for "krb50x" + hex of ULONG_MAX 6+16 */ |
3365 | /* We used to have this as MAX_USERNAME which is */ | 3438 | /* We used to have this as MAX_USERNAME which is */ |
@@ -3392,7 +3465,7 @@ cifs_construct_tcon(struct cifs_sb_info *cifs_sb, uid_t fsuid) | |||
3392 | 3465 | ||
3393 | ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info); | 3466 | ses = cifs_get_smb_ses(master_tcon->ses->server, vol_info); |
3394 | if (IS_ERR(ses)) { | 3467 | if (IS_ERR(ses)) { |
3395 | tcon = (struct cifsTconInfo *)ses; | 3468 | tcon = (struct cifs_tcon *)ses; |
3396 | cifs_put_tcp_session(master_tcon->ses->server); | 3469 | cifs_put_tcp_session(master_tcon->ses->server); |
3397 | goto out; | 3470 | goto out; |
3398 | } | 3471 | } |
@@ -3417,7 +3490,7 @@ cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb) | |||
3417 | return cifs_sb->master_tlink; | 3490 | return cifs_sb->master_tlink; |
3418 | } | 3491 | } |
3419 | 3492 | ||
3420 | struct cifsTconInfo * | 3493 | struct cifs_tcon * |
3421 | cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) | 3494 | cifs_sb_master_tcon(struct cifs_sb_info *cifs_sb) |
3422 | { | 3495 | { |
3423 | return tlink_tcon(cifs_sb_master_tlink(cifs_sb)); | 3496 | return tlink_tcon(cifs_sb_master_tlink(cifs_sb)); |
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c index 9ea65cf36714..81914df47ef1 100644 --- a/fs/cifs/dir.c +++ b/fs/cifs/dir.c | |||
@@ -50,12 +50,11 @@ build_path_from_dentry(struct dentry *direntry) | |||
50 | { | 50 | { |
51 | struct dentry *temp; | 51 | struct dentry *temp; |
52 | int namelen; | 52 | int namelen; |
53 | int pplen; | ||
54 | int dfsplen; | 53 | int dfsplen; |
55 | char *full_path; | 54 | char *full_path; |
56 | char dirsep; | 55 | char dirsep; |
57 | struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); | 56 | struct cifs_sb_info *cifs_sb = CIFS_SB(direntry->d_sb); |
58 | struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); | 57 | struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); |
59 | 58 | ||
60 | if (direntry == NULL) | 59 | if (direntry == NULL) |
61 | return NULL; /* not much we can do if dentry is freed and | 60 | return NULL; /* not much we can do if dentry is freed and |
@@ -63,13 +62,12 @@ build_path_from_dentry(struct dentry *direntry) | |||
63 | when the server crashed */ | 62 | when the server crashed */ |
64 | 63 | ||
65 | dirsep = CIFS_DIR_SEP(cifs_sb); | 64 | dirsep = CIFS_DIR_SEP(cifs_sb); |
66 | pplen = cifs_sb->prepathlen; | ||
67 | if (tcon->Flags & SMB_SHARE_IS_IN_DFS) | 65 | if (tcon->Flags & SMB_SHARE_IS_IN_DFS) |
68 | dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1); | 66 | dfsplen = strnlen(tcon->treeName, MAX_TREE_SIZE + 1); |
69 | else | 67 | else |
70 | dfsplen = 0; | 68 | dfsplen = 0; |
71 | cifs_bp_rename_retry: | 69 | cifs_bp_rename_retry: |
72 | namelen = pplen + dfsplen; | 70 | namelen = dfsplen; |
73 | for (temp = direntry; !IS_ROOT(temp);) { | 71 | for (temp = direntry; !IS_ROOT(temp);) { |
74 | namelen += (1 + temp->d_name.len); | 72 | namelen += (1 + temp->d_name.len); |
75 | temp = temp->d_parent; | 73 | temp = temp->d_parent; |
@@ -100,7 +98,7 @@ cifs_bp_rename_retry: | |||
100 | return NULL; | 98 | return NULL; |
101 | } | 99 | } |
102 | } | 100 | } |
103 | if (namelen != pplen + dfsplen) { | 101 | if (namelen != dfsplen) { |
104 | cERROR(1, "did not end path lookup where expected namelen is %d", | 102 | cERROR(1, "did not end path lookup where expected namelen is %d", |
105 | namelen); | 103 | namelen); |
106 | /* presumably this is only possible if racing with a rename | 104 | /* presumably this is only possible if racing with a rename |
@@ -126,7 +124,6 @@ cifs_bp_rename_retry: | |||
126 | } | 124 | } |
127 | } | 125 | } |
128 | } | 126 | } |
129 | strncpy(full_path + dfsplen, CIFS_SB(direntry->d_sb)->prepath, pplen); | ||
130 | return full_path; | 127 | return full_path; |
131 | } | 128 | } |
132 | 129 | ||
@@ -152,7 +149,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode, | |||
152 | __u16 fileHandle; | 149 | __u16 fileHandle; |
153 | struct cifs_sb_info *cifs_sb; | 150 | struct cifs_sb_info *cifs_sb; |
154 | struct tcon_link *tlink; | 151 | struct tcon_link *tlink; |
155 | struct cifsTconInfo *tcon; | 152 | struct cifs_tcon *tcon; |
156 | char *full_path = NULL; | 153 | char *full_path = NULL; |
157 | FILE_ALL_INFO *buf = NULL; | 154 | FILE_ALL_INFO *buf = NULL; |
158 | struct inode *newinode = NULL; | 155 | struct inode *newinode = NULL; |
@@ -356,7 +353,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, | |||
356 | int xid; | 353 | int xid; |
357 | struct cifs_sb_info *cifs_sb; | 354 | struct cifs_sb_info *cifs_sb; |
358 | struct tcon_link *tlink; | 355 | struct tcon_link *tlink; |
359 | struct cifsTconInfo *pTcon; | 356 | struct cifs_tcon *pTcon; |
357 | struct cifs_io_parms io_parms; | ||
360 | char *full_path = NULL; | 358 | char *full_path = NULL; |
361 | struct inode *newinode = NULL; | 359 | struct inode *newinode = NULL; |
362 | int oplock = 0; | 360 | int oplock = 0; |
@@ -439,16 +437,19 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, | |||
439 | * timestamps in, but we can reuse it safely */ | 437 | * timestamps in, but we can reuse it safely */ |
440 | 438 | ||
441 | pdev = (struct win_dev *)buf; | 439 | pdev = (struct win_dev *)buf; |
440 | io_parms.netfid = fileHandle; | ||
441 | io_parms.pid = current->tgid; | ||
442 | io_parms.tcon = pTcon; | ||
443 | io_parms.offset = 0; | ||
444 | io_parms.length = sizeof(struct win_dev); | ||
442 | if (S_ISCHR(mode)) { | 445 | if (S_ISCHR(mode)) { |
443 | memcpy(pdev->type, "IntxCHR", 8); | 446 | memcpy(pdev->type, "IntxCHR", 8); |
444 | pdev->major = | 447 | pdev->major = |
445 | cpu_to_le64(MAJOR(device_number)); | 448 | cpu_to_le64(MAJOR(device_number)); |
446 | pdev->minor = | 449 | pdev->minor = |
447 | cpu_to_le64(MINOR(device_number)); | 450 | cpu_to_le64(MINOR(device_number)); |
448 | rc = CIFSSMBWrite(xid, pTcon, | 451 | rc = CIFSSMBWrite(xid, &io_parms, |
449 | fileHandle, | 452 | &bytes_written, (char *)pdev, |
450 | sizeof(struct win_dev), | ||
451 | 0, &bytes_written, (char *)pdev, | ||
452 | NULL, 0); | 453 | NULL, 0); |
453 | } else if (S_ISBLK(mode)) { | 454 | } else if (S_ISBLK(mode)) { |
454 | memcpy(pdev->type, "IntxBLK", 8); | 455 | memcpy(pdev->type, "IntxBLK", 8); |
@@ -456,10 +457,8 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, int mode, | |||
456 | cpu_to_le64(MAJOR(device_number)); | 457 | cpu_to_le64(MAJOR(device_number)); |
457 | pdev->minor = | 458 | pdev->minor = |
458 | cpu_to_le64(MINOR(device_number)); | 459 | cpu_to_le64(MINOR(device_number)); |
459 | rc = CIFSSMBWrite(xid, pTcon, | 460 | rc = CIFSSMBWrite(xid, &io_parms, |
460 | fileHandle, | 461 | &bytes_written, (char *)pdev, |
461 | sizeof(struct win_dev), | ||
462 | 0, &bytes_written, (char *)pdev, | ||
463 | NULL, 0); | 462 | NULL, 0); |
464 | } /* else if (S_ISFIFO) */ | 463 | } /* else if (S_ISFIFO) */ |
465 | CIFSSMBClose(xid, pTcon, fileHandle); | 464 | CIFSSMBClose(xid, pTcon, fileHandle); |
@@ -486,7 +485,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry, | |||
486 | bool posix_open = false; | 485 | bool posix_open = false; |
487 | struct cifs_sb_info *cifs_sb; | 486 | struct cifs_sb_info *cifs_sb; |
488 | struct tcon_link *tlink; | 487 | struct tcon_link *tlink; |
489 | struct cifsTconInfo *pTcon; | 488 | struct cifs_tcon *pTcon; |
490 | struct cifsFileInfo *cfile; | 489 | struct cifsFileInfo *cfile; |
491 | struct inode *newInode = NULL; | 490 | struct inode *newInode = NULL; |
492 | char *full_path = NULL; | 491 | char *full_path = NULL; |
diff --git a/fs/cifs/file.c b/fs/cifs/file.c index c672afef0c09..bb71471a4d9d 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c | |||
@@ -114,7 +114,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode, | |||
114 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | 114 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
115 | struct cifs_fattr fattr; | 115 | struct cifs_fattr fattr; |
116 | struct tcon_link *tlink; | 116 | struct tcon_link *tlink; |
117 | struct cifsTconInfo *tcon; | 117 | struct cifs_tcon *tcon; |
118 | 118 | ||
119 | cFYI(1, "posix open %s", full_path); | 119 | cFYI(1, "posix open %s", full_path); |
120 | 120 | ||
@@ -168,7 +168,7 @@ posix_open_ret: | |||
168 | 168 | ||
169 | static int | 169 | static int |
170 | cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, | 170 | cifs_nt_open(char *full_path, struct inode *inode, struct cifs_sb_info *cifs_sb, |
171 | struct cifsTconInfo *tcon, unsigned int f_flags, __u32 *poplock, | 171 | struct cifs_tcon *tcon, unsigned int f_flags, __u32 *poplock, |
172 | __u16 *pnetfid, int xid) | 172 | __u16 *pnetfid, int xid) |
173 | { | 173 | { |
174 | int rc; | 174 | int rc; |
@@ -285,7 +285,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file, | |||
285 | void cifsFileInfo_put(struct cifsFileInfo *cifs_file) | 285 | void cifsFileInfo_put(struct cifsFileInfo *cifs_file) |
286 | { | 286 | { |
287 | struct inode *inode = cifs_file->dentry->d_inode; | 287 | struct inode *inode = cifs_file->dentry->d_inode; |
288 | struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink); | 288 | struct cifs_tcon *tcon = tlink_tcon(cifs_file->tlink); |
289 | struct cifsInodeInfo *cifsi = CIFS_I(inode); | 289 | struct cifsInodeInfo *cifsi = CIFS_I(inode); |
290 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 290 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
291 | struct cifsLockInfo *li, *tmp; | 291 | struct cifsLockInfo *li, *tmp; |
@@ -343,7 +343,7 @@ int cifs_open(struct inode *inode, struct file *file) | |||
343 | int xid; | 343 | int xid; |
344 | __u32 oplock; | 344 | __u32 oplock; |
345 | struct cifs_sb_info *cifs_sb; | 345 | struct cifs_sb_info *cifs_sb; |
346 | struct cifsTconInfo *tcon; | 346 | struct cifs_tcon *tcon; |
347 | struct tcon_link *tlink; | 347 | struct tcon_link *tlink; |
348 | struct cifsFileInfo *pCifsFile = NULL; | 348 | struct cifsFileInfo *pCifsFile = NULL; |
349 | char *full_path = NULL; | 349 | char *full_path = NULL; |
@@ -457,7 +457,7 @@ static int cifs_reopen_file(struct cifsFileInfo *pCifsFile, bool can_flush) | |||
457 | int xid; | 457 | int xid; |
458 | __u32 oplock; | 458 | __u32 oplock; |
459 | struct cifs_sb_info *cifs_sb; | 459 | struct cifs_sb_info *cifs_sb; |
460 | struct cifsTconInfo *tcon; | 460 | struct cifs_tcon *tcon; |
461 | struct cifsInodeInfo *pCifsInode; | 461 | struct cifsInodeInfo *pCifsInode; |
462 | struct inode *inode; | 462 | struct inode *inode; |
463 | char *full_path = NULL; | 463 | char *full_path = NULL; |
@@ -596,7 +596,7 @@ int cifs_closedir(struct inode *inode, struct file *file) | |||
596 | xid = GetXid(); | 596 | xid = GetXid(); |
597 | 597 | ||
598 | if (pCFileStruct) { | 598 | if (pCFileStruct) { |
599 | struct cifsTconInfo *pTcon = tlink_tcon(pCFileStruct->tlink); | 599 | struct cifs_tcon *pTcon = tlink_tcon(pCFileStruct->tlink); |
600 | 600 | ||
601 | cFYI(1, "Freeing private data in close dir"); | 601 | cFYI(1, "Freeing private data in close dir"); |
602 | spin_lock(&cifs_file_list_lock); | 602 | spin_lock(&cifs_file_list_lock); |
@@ -653,7 +653,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
653 | __u64 length; | 653 | __u64 length; |
654 | bool wait_flag = false; | 654 | bool wait_flag = false; |
655 | struct cifs_sb_info *cifs_sb; | 655 | struct cifs_sb_info *cifs_sb; |
656 | struct cifsTconInfo *tcon; | 656 | struct cifs_tcon *tcon; |
657 | __u16 netfid; | 657 | __u16 netfid; |
658 | __u8 lockType = LOCKING_ANDX_LARGE_FILES; | 658 | __u8 lockType = LOCKING_ANDX_LARGE_FILES; |
659 | bool posix_locking = 0; | 659 | bool posix_locking = 0; |
@@ -725,8 +725,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
725 | else | 725 | else |
726 | posix_lock_type = CIFS_WRLCK; | 726 | posix_lock_type = CIFS_WRLCK; |
727 | rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */, | 727 | rc = CIFSSMBPosixLock(xid, tcon, netfid, 1 /* get */, |
728 | length, pfLock, | 728 | length, pfLock, posix_lock_type, |
729 | posix_lock_type, wait_flag); | 729 | wait_flag); |
730 | FreeXid(xid); | 730 | FreeXid(xid); |
731 | return rc; | 731 | return rc; |
732 | } | 732 | } |
@@ -797,8 +797,8 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *pfLock) | |||
797 | posix_lock_type = CIFS_UNLCK; | 797 | posix_lock_type = CIFS_UNLCK; |
798 | 798 | ||
799 | rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */, | 799 | rc = CIFSSMBPosixLock(xid, tcon, netfid, 0 /* set */, |
800 | length, pfLock, | 800 | length, pfLock, posix_lock_type, |
801 | posix_lock_type, wait_flag); | 801 | wait_flag); |
802 | } else { | 802 | } else { |
803 | struct cifsFileInfo *fid = file->private_data; | 803 | struct cifsFileInfo *fid = file->private_data; |
804 | 804 | ||
@@ -857,7 +857,7 @@ cifs_update_eof(struct cifsInodeInfo *cifsi, loff_t offset, | |||
857 | cifsi->server_eof = end_of_write; | 857 | cifsi->server_eof = end_of_write; |
858 | } | 858 | } |
859 | 859 | ||
860 | static ssize_t cifs_write(struct cifsFileInfo *open_file, | 860 | static ssize_t cifs_write(struct cifsFileInfo *open_file, __u32 pid, |
861 | const char *write_data, size_t write_size, | 861 | const char *write_data, size_t write_size, |
862 | loff_t *poffset) | 862 | loff_t *poffset) |
863 | { | 863 | { |
@@ -865,10 +865,11 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, | |||
865 | unsigned int bytes_written = 0; | 865 | unsigned int bytes_written = 0; |
866 | unsigned int total_written; | 866 | unsigned int total_written; |
867 | struct cifs_sb_info *cifs_sb; | 867 | struct cifs_sb_info *cifs_sb; |
868 | struct cifsTconInfo *pTcon; | 868 | struct cifs_tcon *pTcon; |
869 | int xid; | 869 | int xid; |
870 | struct dentry *dentry = open_file->dentry; | 870 | struct dentry *dentry = open_file->dentry; |
871 | struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode); | 871 | struct cifsInodeInfo *cifsi = CIFS_I(dentry->d_inode); |
872 | struct cifs_io_parms io_parms; | ||
872 | 873 | ||
873 | cifs_sb = CIFS_SB(dentry->d_sb); | 874 | cifs_sb = CIFS_SB(dentry->d_sb); |
874 | 875 | ||
@@ -901,8 +902,13 @@ static ssize_t cifs_write(struct cifsFileInfo *open_file, | |||
901 | /* iov[0] is reserved for smb header */ | 902 | /* iov[0] is reserved for smb header */ |
902 | iov[1].iov_base = (char *)write_data + total_written; | 903 | iov[1].iov_base = (char *)write_data + total_written; |
903 | iov[1].iov_len = len; | 904 | iov[1].iov_len = len; |
904 | rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, len, | 905 | io_parms.netfid = open_file->netfid; |
905 | *poffset, &bytes_written, iov, 1, 0); | 906 | io_parms.pid = pid; |
907 | io_parms.tcon = pTcon; | ||
908 | io_parms.offset = *poffset; | ||
909 | io_parms.length = len; | ||
910 | rc = CIFSSMBWrite2(xid, &io_parms, &bytes_written, iov, | ||
911 | 1, 0); | ||
906 | } | 912 | } |
907 | if (rc || (bytes_written == 0)) { | 913 | if (rc || (bytes_written == 0)) { |
908 | if (total_written) | 914 | if (total_written) |
@@ -1071,8 +1077,8 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) | |||
1071 | 1077 | ||
1072 | open_file = find_writable_file(CIFS_I(mapping->host), false); | 1078 | open_file = find_writable_file(CIFS_I(mapping->host), false); |
1073 | if (open_file) { | 1079 | if (open_file) { |
1074 | bytes_written = cifs_write(open_file, write_data, | 1080 | bytes_written = cifs_write(open_file, open_file->pid, |
1075 | to - from, &offset); | 1081 | write_data, to - from, &offset); |
1076 | cifsFileInfo_put(open_file); | 1082 | cifsFileInfo_put(open_file); |
1077 | /* Does mm or vfs already set times? */ | 1083 | /* Does mm or vfs already set times? */ |
1078 | inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb); | 1084 | inode->i_atime = inode->i_mtime = current_fs_time(inode->i_sb); |
@@ -1092,58 +1098,20 @@ static int cifs_partialpagewrite(struct page *page, unsigned from, unsigned to) | |||
1092 | static int cifs_writepages(struct address_space *mapping, | 1098 | static int cifs_writepages(struct address_space *mapping, |
1093 | struct writeback_control *wbc) | 1099 | struct writeback_control *wbc) |
1094 | { | 1100 | { |
1095 | unsigned int bytes_to_write; | 1101 | struct cifs_sb_info *cifs_sb = CIFS_SB(mapping->host->i_sb); |
1096 | unsigned int bytes_written; | 1102 | bool done = false, scanned = false, range_whole = false; |
1097 | struct cifs_sb_info *cifs_sb; | 1103 | pgoff_t end, index; |
1098 | int done = 0; | 1104 | struct cifs_writedata *wdata; |
1099 | pgoff_t end; | ||
1100 | pgoff_t index; | ||
1101 | int range_whole = 0; | ||
1102 | struct kvec *iov; | ||
1103 | int len; | ||
1104 | int n_iov = 0; | ||
1105 | pgoff_t next; | ||
1106 | int nr_pages; | ||
1107 | __u64 offset = 0; | ||
1108 | struct cifsFileInfo *open_file; | ||
1109 | struct cifsTconInfo *tcon; | ||
1110 | struct cifsInodeInfo *cifsi = CIFS_I(mapping->host); | ||
1111 | struct page *page; | 1105 | struct page *page; |
1112 | struct pagevec pvec; | ||
1113 | int rc = 0; | 1106 | int rc = 0; |
1114 | int scanned = 0; | ||
1115 | int xid; | ||
1116 | |||
1117 | cifs_sb = CIFS_SB(mapping->host->i_sb); | ||
1118 | 1107 | ||
1119 | /* | 1108 | /* |
1120 | * If wsize is smaller that the page cache size, default to writing | 1109 | * If wsize is smaller than the page cache size, default to writing |
1121 | * one page at a time via cifs_writepage | 1110 | * one page at a time via cifs_writepage |
1122 | */ | 1111 | */ |
1123 | if (cifs_sb->wsize < PAGE_CACHE_SIZE) | 1112 | if (cifs_sb->wsize < PAGE_CACHE_SIZE) |
1124 | return generic_writepages(mapping, wbc); | 1113 | return generic_writepages(mapping, wbc); |
1125 | 1114 | ||
1126 | iov = kmalloc(32 * sizeof(struct kvec), GFP_KERNEL); | ||
1127 | if (iov == NULL) | ||
1128 | return generic_writepages(mapping, wbc); | ||
1129 | |||
1130 | /* | ||
1131 | * if there's no open file, then this is likely to fail too, | ||
1132 | * but it'll at least handle the return. Maybe it should be | ||
1133 | * a BUG() instead? | ||
1134 | */ | ||
1135 | open_file = find_writable_file(CIFS_I(mapping->host), false); | ||
1136 | if (!open_file) { | ||
1137 | kfree(iov); | ||
1138 | return generic_writepages(mapping, wbc); | ||
1139 | } | ||
1140 | |||
1141 | tcon = tlink_tcon(open_file->tlink); | ||
1142 | cifsFileInfo_put(open_file); | ||
1143 | |||
1144 | xid = GetXid(); | ||
1145 | |||
1146 | pagevec_init(&pvec, 0); | ||
1147 | if (wbc->range_cyclic) { | 1115 | if (wbc->range_cyclic) { |
1148 | index = mapping->writeback_index; /* Start from prev offset */ | 1116 | index = mapping->writeback_index; /* Start from prev offset */ |
1149 | end = -1; | 1117 | end = -1; |
@@ -1151,24 +1119,49 @@ static int cifs_writepages(struct address_space *mapping, | |||
1151 | index = wbc->range_start >> PAGE_CACHE_SHIFT; | 1119 | index = wbc->range_start >> PAGE_CACHE_SHIFT; |
1152 | end = wbc->range_end >> PAGE_CACHE_SHIFT; | 1120 | end = wbc->range_end >> PAGE_CACHE_SHIFT; |
1153 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) | 1121 | if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX) |
1154 | range_whole = 1; | 1122 | range_whole = true; |
1155 | scanned = 1; | 1123 | scanned = true; |
1156 | } | 1124 | } |
1157 | retry: | 1125 | retry: |
1158 | while (!done && (index <= end) && | 1126 | while (!done && index <= end) { |
1159 | (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, | 1127 | unsigned int i, nr_pages, found_pages; |
1160 | PAGECACHE_TAG_DIRTY, | 1128 | pgoff_t next = 0, tofind; |
1161 | min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1))) { | 1129 | struct page **pages; |
1162 | int first; | 1130 | |
1163 | unsigned int i; | 1131 | tofind = min((cifs_sb->wsize / PAGE_CACHE_SIZE) - 1, |
1164 | 1132 | end - index) + 1; | |
1165 | first = -1; | 1133 | |
1166 | next = 0; | 1134 | wdata = cifs_writedata_alloc((unsigned int)tofind); |
1167 | n_iov = 0; | 1135 | if (!wdata) { |
1168 | bytes_to_write = 0; | 1136 | rc = -ENOMEM; |
1169 | 1137 | break; | |
1170 | for (i = 0; i < nr_pages; i++) { | 1138 | } |
1171 | page = pvec.pages[i]; | 1139 | |
1140 | /* | ||
1141 | * find_get_pages_tag seems to return a max of 256 on each | ||
1142 | * iteration, so we must call it several times in order to | ||
1143 | * fill the array or the wsize is effectively limited to | ||
1144 | * 256 * PAGE_CACHE_SIZE. | ||
1145 | */ | ||
1146 | found_pages = 0; | ||
1147 | pages = wdata->pages; | ||
1148 | do { | ||
1149 | nr_pages = find_get_pages_tag(mapping, &index, | ||
1150 | PAGECACHE_TAG_DIRTY, | ||
1151 | tofind, pages); | ||
1152 | found_pages += nr_pages; | ||
1153 | tofind -= nr_pages; | ||
1154 | pages += nr_pages; | ||
1155 | } while (nr_pages && tofind && index <= end); | ||
1156 | |||
1157 | if (found_pages == 0) { | ||
1158 | kref_put(&wdata->refcount, cifs_writedata_release); | ||
1159 | break; | ||
1160 | } | ||
1161 | |||
1162 | nr_pages = 0; | ||
1163 | for (i = 0; i < found_pages; i++) { | ||
1164 | page = wdata->pages[i]; | ||
1172 | /* | 1165 | /* |
1173 | * At this point we hold neither mapping->tree_lock nor | 1166 | * At this point we hold neither mapping->tree_lock nor |
1174 | * lock on the page itself: the page may be truncated or | 1167 | * lock on the page itself: the page may be truncated or |
@@ -1177,7 +1170,7 @@ retry: | |||
1177 | * mapping | 1170 | * mapping |
1178 | */ | 1171 | */ |
1179 | 1172 | ||
1180 | if (first < 0) | 1173 | if (nr_pages == 0) |
1181 | lock_page(page); | 1174 | lock_page(page); |
1182 | else if (!trylock_page(page)) | 1175 | else if (!trylock_page(page)) |
1183 | break; | 1176 | break; |
@@ -1188,7 +1181,7 @@ retry: | |||
1188 | } | 1181 | } |
1189 | 1182 | ||
1190 | if (!wbc->range_cyclic && page->index > end) { | 1183 | if (!wbc->range_cyclic && page->index > end) { |
1191 | done = 1; | 1184 | done = true; |
1192 | unlock_page(page); | 1185 | unlock_page(page); |
1193 | break; | 1186 | break; |
1194 | } | 1187 | } |
@@ -1215,119 +1208,89 @@ retry: | |||
1215 | set_page_writeback(page); | 1208 | set_page_writeback(page); |
1216 | 1209 | ||
1217 | if (page_offset(page) >= mapping->host->i_size) { | 1210 | if (page_offset(page) >= mapping->host->i_size) { |
1218 | done = 1; | 1211 | done = true; |
1219 | unlock_page(page); | 1212 | unlock_page(page); |
1220 | end_page_writeback(page); | 1213 | end_page_writeback(page); |
1221 | break; | 1214 | break; |
1222 | } | 1215 | } |
1223 | 1216 | ||
1224 | /* | 1217 | wdata->pages[i] = page; |
1225 | * BB can we get rid of this? pages are held by pvec | 1218 | next = page->index + 1; |
1226 | */ | 1219 | ++nr_pages; |
1227 | page_cache_get(page); | 1220 | } |
1228 | 1221 | ||
1229 | len = min(mapping->host->i_size - page_offset(page), | 1222 | /* reset index to refind any pages skipped */ |
1230 | (loff_t)PAGE_CACHE_SIZE); | 1223 | if (nr_pages == 0) |
1224 | index = wdata->pages[0]->index + 1; | ||
1231 | 1225 | ||
1232 | /* reserve iov[0] for the smb header */ | 1226 | /* put any pages we aren't going to use */ |
1233 | n_iov++; | 1227 | for (i = nr_pages; i < found_pages; i++) { |
1234 | iov[n_iov].iov_base = kmap(page); | 1228 | page_cache_release(wdata->pages[i]); |
1235 | iov[n_iov].iov_len = len; | 1229 | wdata->pages[i] = NULL; |
1236 | bytes_to_write += len; | 1230 | } |
1237 | 1231 | ||
1238 | if (first < 0) { | 1232 | /* nothing to write? */ |
1239 | first = i; | 1233 | if (nr_pages == 0) { |
1240 | offset = page_offset(page); | 1234 | kref_put(&wdata->refcount, cifs_writedata_release); |
1241 | } | 1235 | continue; |
1242 | next = page->index + 1; | ||
1243 | if (bytes_to_write + PAGE_CACHE_SIZE > cifs_sb->wsize) | ||
1244 | break; | ||
1245 | } | 1236 | } |
1246 | if (n_iov) { | ||
1247 | retry_write: | ||
1248 | open_file = find_writable_file(CIFS_I(mapping->host), | ||
1249 | false); | ||
1250 | if (!open_file) { | ||
1251 | cERROR(1, "No writable handles for inode"); | ||
1252 | rc = -EBADF; | ||
1253 | } else { | ||
1254 | rc = CIFSSMBWrite2(xid, tcon, open_file->netfid, | ||
1255 | bytes_to_write, offset, | ||
1256 | &bytes_written, iov, n_iov, | ||
1257 | 0); | ||
1258 | cifsFileInfo_put(open_file); | ||
1259 | } | ||
1260 | 1237 | ||
1261 | cFYI(1, "Write2 rc=%d, wrote=%u", rc, bytes_written); | 1238 | wdata->sync_mode = wbc->sync_mode; |
1239 | wdata->nr_pages = nr_pages; | ||
1240 | wdata->offset = page_offset(wdata->pages[0]); | ||
1262 | 1241 | ||
1263 | /* | 1242 | do { |
1264 | * For now, treat a short write as if nothing got | 1243 | if (wdata->cfile != NULL) |
1265 | * written. A zero length write however indicates | 1244 | cifsFileInfo_put(wdata->cfile); |
1266 | * ENOSPC or EFBIG. We have no way to know which | 1245 | wdata->cfile = find_writable_file(CIFS_I(mapping->host), |
1267 | * though, so call it ENOSPC for now. EFBIG would | 1246 | false); |
1268 | * get translated to AS_EIO anyway. | 1247 | if (!wdata->cfile) { |
1269 | * | 1248 | cERROR(1, "No writable handles for inode"); |
1270 | * FIXME: make it take into account the data that did | 1249 | rc = -EBADF; |
1271 | * get written | 1250 | break; |
1272 | */ | ||
1273 | if (rc == 0) { | ||
1274 | if (bytes_written == 0) | ||
1275 | rc = -ENOSPC; | ||
1276 | else if (bytes_written < bytes_to_write) | ||
1277 | rc = -EAGAIN; | ||
1278 | } | 1251 | } |
1252 | rc = cifs_async_writev(wdata); | ||
1253 | } while (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN); | ||
1279 | 1254 | ||
1280 | /* retry on data-integrity flush */ | 1255 | for (i = 0; i < nr_pages; ++i) |
1281 | if (wbc->sync_mode == WB_SYNC_ALL && rc == -EAGAIN) | 1256 | unlock_page(wdata->pages[i]); |
1282 | goto retry_write; | ||
1283 | |||
1284 | /* fix the stats and EOF */ | ||
1285 | if (bytes_written > 0) { | ||
1286 | cifs_stats_bytes_written(tcon, bytes_written); | ||
1287 | cifs_update_eof(cifsi, offset, bytes_written); | ||
1288 | } | ||
1289 | 1257 | ||
1290 | for (i = 0; i < n_iov; i++) { | 1258 | /* send failure -- clean up the mess */ |
1291 | page = pvec.pages[first + i]; | 1259 | if (rc != 0) { |
1292 | /* on retryable write error, redirty page */ | 1260 | for (i = 0; i < nr_pages; ++i) { |
1293 | if (rc == -EAGAIN) | 1261 | if (rc == -EAGAIN) |
1294 | redirty_page_for_writepage(wbc, page); | 1262 | redirty_page_for_writepage(wbc, |
1295 | else if (rc != 0) | 1263 | wdata->pages[i]); |
1296 | SetPageError(page); | 1264 | else |
1297 | kunmap(page); | 1265 | SetPageError(wdata->pages[i]); |
1298 | unlock_page(page); | 1266 | end_page_writeback(wdata->pages[i]); |
1299 | end_page_writeback(page); | 1267 | page_cache_release(wdata->pages[i]); |
1300 | page_cache_release(page); | ||
1301 | } | 1268 | } |
1302 | |||
1303 | if (rc != -EAGAIN) | 1269 | if (rc != -EAGAIN) |
1304 | mapping_set_error(mapping, rc); | 1270 | mapping_set_error(mapping, rc); |
1305 | else | 1271 | } |
1306 | rc = 0; | 1272 | kref_put(&wdata->refcount, cifs_writedata_release); |
1307 | 1273 | ||
1308 | if ((wbc->nr_to_write -= n_iov) <= 0) | 1274 | wbc->nr_to_write -= nr_pages; |
1309 | done = 1; | 1275 | if (wbc->nr_to_write <= 0) |
1310 | index = next; | 1276 | done = true; |
1311 | } else | ||
1312 | /* Need to re-find the pages we skipped */ | ||
1313 | index = pvec.pages[0]->index + 1; | ||
1314 | 1277 | ||
1315 | pagevec_release(&pvec); | 1278 | index = next; |
1316 | } | 1279 | } |
1280 | |||
1317 | if (!scanned && !done) { | 1281 | if (!scanned && !done) { |
1318 | /* | 1282 | /* |
1319 | * We hit the last page and there is more work to be done: wrap | 1283 | * We hit the last page and there is more work to be done: wrap |
1320 | * back to the start of the file | 1284 | * back to the start of the file |
1321 | */ | 1285 | */ |
1322 | scanned = 1; | 1286 | scanned = true; |
1323 | index = 0; | 1287 | index = 0; |
1324 | goto retry; | 1288 | goto retry; |
1325 | } | 1289 | } |
1290 | |||
1326 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) | 1291 | if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) |
1327 | mapping->writeback_index = index; | 1292 | mapping->writeback_index = index; |
1328 | 1293 | ||
1329 | FreeXid(xid); | ||
1330 | kfree(iov); | ||
1331 | return rc; | 1294 | return rc; |
1332 | } | 1295 | } |
1333 | 1296 | ||
@@ -1383,6 +1346,14 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, | |||
1383 | { | 1346 | { |
1384 | int rc; | 1347 | int rc; |
1385 | struct inode *inode = mapping->host; | 1348 | struct inode *inode = mapping->host; |
1349 | struct cifsFileInfo *cfile = file->private_data; | ||
1350 | struct cifs_sb_info *cifs_sb = CIFS_SB(cfile->dentry->d_sb); | ||
1351 | __u32 pid; | ||
1352 | |||
1353 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) | ||
1354 | pid = cfile->pid; | ||
1355 | else | ||
1356 | pid = current->tgid; | ||
1386 | 1357 | ||
1387 | cFYI(1, "write_end for page %p from pos %lld with %d bytes", | 1358 | cFYI(1, "write_end for page %p from pos %lld with %d bytes", |
1388 | page, pos, copied); | 1359 | page, pos, copied); |
@@ -1406,8 +1377,7 @@ static int cifs_write_end(struct file *file, struct address_space *mapping, | |||
1406 | /* BB check if anything else missing out of ppw | 1377 | /* BB check if anything else missing out of ppw |
1407 | such as updating last write time */ | 1378 | such as updating last write time */ |
1408 | page_data = kmap(page); | 1379 | page_data = kmap(page); |
1409 | rc = cifs_write(file->private_data, page_data + offset, | 1380 | rc = cifs_write(cfile, pid, page_data + offset, copied, &pos); |
1410 | copied, &pos); | ||
1411 | /* if (rc < 0) should we set writebehind rc? */ | 1381 | /* if (rc < 0) should we set writebehind rc? */ |
1412 | kunmap(page); | 1382 | kunmap(page); |
1413 | 1383 | ||
@@ -1435,7 +1405,7 @@ int cifs_strict_fsync(struct file *file, int datasync) | |||
1435 | { | 1405 | { |
1436 | int xid; | 1406 | int xid; |
1437 | int rc = 0; | 1407 | int rc = 0; |
1438 | struct cifsTconInfo *tcon; | 1408 | struct cifs_tcon *tcon; |
1439 | struct cifsFileInfo *smbfile = file->private_data; | 1409 | struct cifsFileInfo *smbfile = file->private_data; |
1440 | struct inode *inode = file->f_path.dentry->d_inode; | 1410 | struct inode *inode = file->f_path.dentry->d_inode; |
1441 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 1411 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
@@ -1465,7 +1435,7 @@ int cifs_fsync(struct file *file, int datasync) | |||
1465 | { | 1435 | { |
1466 | int xid; | 1436 | int xid; |
1467 | int rc = 0; | 1437 | int rc = 0; |
1468 | struct cifsTconInfo *tcon; | 1438 | struct cifs_tcon *tcon; |
1469 | struct cifsFileInfo *smbfile = file->private_data; | 1439 | struct cifsFileInfo *smbfile = file->private_data; |
1470 | struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | 1440 | struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
1471 | 1441 | ||
@@ -1556,9 +1526,11 @@ cifs_iovec_write(struct file *file, const struct iovec *iov, | |||
1556 | struct iov_iter it; | 1526 | struct iov_iter it; |
1557 | struct inode *inode; | 1527 | struct inode *inode; |
1558 | struct cifsFileInfo *open_file; | 1528 | struct cifsFileInfo *open_file; |
1559 | struct cifsTconInfo *pTcon; | 1529 | struct cifs_tcon *pTcon; |
1560 | struct cifs_sb_info *cifs_sb; | 1530 | struct cifs_sb_info *cifs_sb; |
1531 | struct cifs_io_parms io_parms; | ||
1561 | int xid, rc; | 1532 | int xid, rc; |
1533 | __u32 pid; | ||
1562 | 1534 | ||
1563 | len = iov_length(iov, nr_segs); | 1535 | len = iov_length(iov, nr_segs); |
1564 | if (!len) | 1536 | if (!len) |
@@ -1590,6 +1562,12 @@ cifs_iovec_write(struct file *file, const struct iovec *iov, | |||
1590 | 1562 | ||
1591 | xid = GetXid(); | 1563 | xid = GetXid(); |
1592 | open_file = file->private_data; | 1564 | open_file = file->private_data; |
1565 | |||
1566 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) | ||
1567 | pid = open_file->pid; | ||
1568 | else | ||
1569 | pid = current->tgid; | ||
1570 | |||
1593 | pTcon = tlink_tcon(open_file->tlink); | 1571 | pTcon = tlink_tcon(open_file->tlink); |
1594 | inode = file->f_path.dentry->d_inode; | 1572 | inode = file->f_path.dentry->d_inode; |
1595 | 1573 | ||
@@ -1616,9 +1594,13 @@ cifs_iovec_write(struct file *file, const struct iovec *iov, | |||
1616 | if (rc != 0) | 1594 | if (rc != 0) |
1617 | break; | 1595 | break; |
1618 | } | 1596 | } |
1619 | rc = CIFSSMBWrite2(xid, pTcon, open_file->netfid, | 1597 | io_parms.netfid = open_file->netfid; |
1620 | cur_len, *poffset, &written, | 1598 | io_parms.pid = pid; |
1621 | to_send, npages, 0); | 1599 | io_parms.tcon = pTcon; |
1600 | io_parms.offset = *poffset; | ||
1601 | io_parms.length = cur_len; | ||
1602 | rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send, | ||
1603 | npages, 0); | ||
1622 | } while (rc == -EAGAIN); | 1604 | } while (rc == -EAGAIN); |
1623 | 1605 | ||
1624 | for (i = 0; i < npages; i++) | 1606 | for (i = 0; i < npages; i++) |
@@ -1711,10 +1693,12 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, | |||
1711 | size_t len, cur_len; | 1693 | size_t len, cur_len; |
1712 | int iov_offset = 0; | 1694 | int iov_offset = 0; |
1713 | struct cifs_sb_info *cifs_sb; | 1695 | struct cifs_sb_info *cifs_sb; |
1714 | struct cifsTconInfo *pTcon; | 1696 | struct cifs_tcon *pTcon; |
1715 | struct cifsFileInfo *open_file; | 1697 | struct cifsFileInfo *open_file; |
1716 | struct smb_com_read_rsp *pSMBr; | 1698 | struct smb_com_read_rsp *pSMBr; |
1699 | struct cifs_io_parms io_parms; | ||
1717 | char *read_data; | 1700 | char *read_data; |
1701 | __u32 pid; | ||
1718 | 1702 | ||
1719 | if (!nr_segs) | 1703 | if (!nr_segs) |
1720 | return 0; | 1704 | return 0; |
@@ -1729,6 +1713,11 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, | |||
1729 | open_file = file->private_data; | 1713 | open_file = file->private_data; |
1730 | pTcon = tlink_tcon(open_file->tlink); | 1714 | pTcon = tlink_tcon(open_file->tlink); |
1731 | 1715 | ||
1716 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) | ||
1717 | pid = open_file->pid; | ||
1718 | else | ||
1719 | pid = current->tgid; | ||
1720 | |||
1732 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) | 1721 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) |
1733 | cFYI(1, "attempting read on write only file instance"); | 1722 | cFYI(1, "attempting read on write only file instance"); |
1734 | 1723 | ||
@@ -1744,8 +1733,12 @@ cifs_iovec_read(struct file *file, const struct iovec *iov, | |||
1744 | if (rc != 0) | 1733 | if (rc != 0) |
1745 | break; | 1734 | break; |
1746 | } | 1735 | } |
1747 | rc = CIFSSMBRead(xid, pTcon, open_file->netfid, | 1736 | io_parms.netfid = open_file->netfid; |
1748 | cur_len, *poffset, &bytes_read, | 1737 | io_parms.pid = pid; |
1738 | io_parms.tcon = pTcon; | ||
1739 | io_parms.offset = *poffset; | ||
1740 | io_parms.length = len; | ||
1741 | rc = CIFSSMBRead(xid, &io_parms, &bytes_read, | ||
1749 | &read_data, &buf_type); | 1742 | &read_data, &buf_type); |
1750 | pSMBr = (struct smb_com_read_rsp *)read_data; | 1743 | pSMBr = (struct smb_com_read_rsp *)read_data; |
1751 | if (read_data) { | 1744 | if (read_data) { |
@@ -1822,11 +1815,13 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, | |||
1822 | unsigned int total_read; | 1815 | unsigned int total_read; |
1823 | unsigned int current_read_size; | 1816 | unsigned int current_read_size; |
1824 | struct cifs_sb_info *cifs_sb; | 1817 | struct cifs_sb_info *cifs_sb; |
1825 | struct cifsTconInfo *pTcon; | 1818 | struct cifs_tcon *pTcon; |
1826 | int xid; | 1819 | int xid; |
1827 | char *current_offset; | 1820 | char *current_offset; |
1828 | struct cifsFileInfo *open_file; | 1821 | struct cifsFileInfo *open_file; |
1822 | struct cifs_io_parms io_parms; | ||
1829 | int buf_type = CIFS_NO_BUFFER; | 1823 | int buf_type = CIFS_NO_BUFFER; |
1824 | __u32 pid; | ||
1830 | 1825 | ||
1831 | xid = GetXid(); | 1826 | xid = GetXid(); |
1832 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | 1827 | cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
@@ -1839,6 +1834,11 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, | |||
1839 | open_file = file->private_data; | 1834 | open_file = file->private_data; |
1840 | pTcon = tlink_tcon(open_file->tlink); | 1835 | pTcon = tlink_tcon(open_file->tlink); |
1841 | 1836 | ||
1837 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) | ||
1838 | pid = open_file->pid; | ||
1839 | else | ||
1840 | pid = current->tgid; | ||
1841 | |||
1842 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) | 1842 | if ((file->f_flags & O_ACCMODE) == O_WRONLY) |
1843 | cFYI(1, "attempting read on write only file instance"); | 1843 | cFYI(1, "attempting read on write only file instance"); |
1844 | 1844 | ||
@@ -1861,11 +1861,13 @@ static ssize_t cifs_read(struct file *file, char *read_data, size_t read_size, | |||
1861 | if (rc != 0) | 1861 | if (rc != 0) |
1862 | break; | 1862 | break; |
1863 | } | 1863 | } |
1864 | rc = CIFSSMBRead(xid, pTcon, | 1864 | io_parms.netfid = open_file->netfid; |
1865 | open_file->netfid, | 1865 | io_parms.pid = pid; |
1866 | current_read_size, *poffset, | 1866 | io_parms.tcon = pTcon; |
1867 | &bytes_read, ¤t_offset, | 1867 | io_parms.offset = *poffset; |
1868 | &buf_type); | 1868 | io_parms.length = current_read_size; |
1869 | rc = CIFSSMBRead(xid, &io_parms, &bytes_read, | ||
1870 | ¤t_offset, &buf_type); | ||
1869 | } | 1871 | } |
1870 | if (rc || (bytes_read == 0)) { | 1872 | if (rc || (bytes_read == 0)) { |
1871 | if (total_read) { | 1873 | if (total_read) { |
@@ -1996,13 +1998,15 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
1996 | loff_t offset; | 1998 | loff_t offset; |
1997 | struct page *page; | 1999 | struct page *page; |
1998 | struct cifs_sb_info *cifs_sb; | 2000 | struct cifs_sb_info *cifs_sb; |
1999 | struct cifsTconInfo *pTcon; | 2001 | struct cifs_tcon *pTcon; |
2000 | unsigned int bytes_read = 0; | 2002 | unsigned int bytes_read = 0; |
2001 | unsigned int read_size, i; | 2003 | unsigned int read_size, i; |
2002 | char *smb_read_data = NULL; | 2004 | char *smb_read_data = NULL; |
2003 | struct smb_com_read_rsp *pSMBr; | 2005 | struct smb_com_read_rsp *pSMBr; |
2004 | struct cifsFileInfo *open_file; | 2006 | struct cifsFileInfo *open_file; |
2007 | struct cifs_io_parms io_parms; | ||
2005 | int buf_type = CIFS_NO_BUFFER; | 2008 | int buf_type = CIFS_NO_BUFFER; |
2009 | __u32 pid; | ||
2006 | 2010 | ||
2007 | xid = GetXid(); | 2011 | xid = GetXid(); |
2008 | if (file->private_data == NULL) { | 2012 | if (file->private_data == NULL) { |
@@ -2024,6 +2028,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
2024 | goto read_complete; | 2028 | goto read_complete; |
2025 | 2029 | ||
2026 | cFYI(DBG2, "rpages: num pages %d", num_pages); | 2030 | cFYI(DBG2, "rpages: num pages %d", num_pages); |
2031 | if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) | ||
2032 | pid = open_file->pid; | ||
2033 | else | ||
2034 | pid = current->tgid; | ||
2035 | |||
2027 | for (i = 0; i < num_pages; ) { | 2036 | for (i = 0; i < num_pages; ) { |
2028 | unsigned contig_pages; | 2037 | unsigned contig_pages; |
2029 | struct page *tmp_page; | 2038 | struct page *tmp_page; |
@@ -2065,12 +2074,13 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, | |||
2065 | if (rc != 0) | 2074 | if (rc != 0) |
2066 | break; | 2075 | break; |
2067 | } | 2076 | } |
2068 | 2077 | io_parms.netfid = open_file->netfid; | |
2069 | rc = CIFSSMBRead(xid, pTcon, | 2078 | io_parms.pid = pid; |
2070 | open_file->netfid, | 2079 | io_parms.tcon = pTcon; |
2071 | read_size, offset, | 2080 | io_parms.offset = offset; |
2072 | &bytes_read, &smb_read_data, | 2081 | io_parms.length = read_size; |
2073 | &buf_type); | 2082 | rc = CIFSSMBRead(xid, &io_parms, &bytes_read, |
2083 | &smb_read_data, &buf_type); | ||
2074 | /* BB more RC checks ? */ | 2084 | /* BB more RC checks ? */ |
2075 | if (rc == -EAGAIN) { | 2085 | if (rc == -EAGAIN) { |
2076 | if (smb_read_data) { | 2086 | if (smb_read_data) { |
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c index 297a43d0ff7f..d368a47ba5eb 100644 --- a/fs/cifs/fscache.c +++ b/fs/cifs/fscache.c | |||
@@ -40,7 +40,7 @@ void cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) | |||
40 | server->fscache = NULL; | 40 | server->fscache = NULL; |
41 | } | 41 | } |
42 | 42 | ||
43 | void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon) | 43 | void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) |
44 | { | 44 | { |
45 | struct TCP_Server_Info *server = tcon->ses->server; | 45 | struct TCP_Server_Info *server = tcon->ses->server; |
46 | 46 | ||
@@ -51,7 +51,7 @@ void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon) | |||
51 | server->fscache, tcon->fscache); | 51 | server->fscache, tcon->fscache); |
52 | } | 52 | } |
53 | 53 | ||
54 | void cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon) | 54 | void cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) |
55 | { | 55 | { |
56 | cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache); | 56 | cFYI(1, "CIFS: releasing superblock cookie (0x%p)", tcon->fscache); |
57 | fscache_relinquish_cookie(tcon->fscache, 0); | 57 | fscache_relinquish_cookie(tcon->fscache, 0); |
@@ -62,7 +62,7 @@ static void cifs_fscache_enable_inode_cookie(struct inode *inode) | |||
62 | { | 62 | { |
63 | struct cifsInodeInfo *cifsi = CIFS_I(inode); | 63 | struct cifsInodeInfo *cifsi = CIFS_I(inode); |
64 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 64 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
65 | struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); | 65 | struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); |
66 | 66 | ||
67 | if (cifsi->fscache) | 67 | if (cifsi->fscache) |
68 | return; | 68 | return; |
diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h index 31b88ec2341e..63539323e0b9 100644 --- a/fs/cifs/fscache.h +++ b/fs/cifs/fscache.h | |||
@@ -40,8 +40,8 @@ extern void cifs_fscache_unregister(void); | |||
40 | */ | 40 | */ |
41 | extern void cifs_fscache_get_client_cookie(struct TCP_Server_Info *); | 41 | extern void cifs_fscache_get_client_cookie(struct TCP_Server_Info *); |
42 | extern void cifs_fscache_release_client_cookie(struct TCP_Server_Info *); | 42 | extern void cifs_fscache_release_client_cookie(struct TCP_Server_Info *); |
43 | extern void cifs_fscache_get_super_cookie(struct cifsTconInfo *); | 43 | extern void cifs_fscache_get_super_cookie(struct cifs_tcon *); |
44 | extern void cifs_fscache_release_super_cookie(struct cifsTconInfo *); | 44 | extern void cifs_fscache_release_super_cookie(struct cifs_tcon *); |
45 | 45 | ||
46 | extern void cifs_fscache_release_inode_cookie(struct inode *); | 46 | extern void cifs_fscache_release_inode_cookie(struct inode *); |
47 | extern void cifs_fscache_set_inode_cookie(struct inode *, struct file *); | 47 | extern void cifs_fscache_set_inode_cookie(struct inode *, struct file *); |
@@ -99,9 +99,9 @@ static inline void | |||
99 | cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) {} | 99 | cifs_fscache_get_client_cookie(struct TCP_Server_Info *server) {} |
100 | static inline void | 100 | static inline void |
101 | cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) {} | 101 | cifs_fscache_release_client_cookie(struct TCP_Server_Info *server) {} |
102 | static inline void cifs_fscache_get_super_cookie(struct cifsTconInfo *tcon) {} | 102 | static inline void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon) {} |
103 | static inline void | 103 | static inline void |
104 | cifs_fscache_release_super_cookie(struct cifsTconInfo *tcon) {} | 104 | cifs_fscache_release_super_cookie(struct cifs_tcon *tcon) {} |
105 | 105 | ||
106 | static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {} | 106 | static inline void cifs_fscache_release_inode_cookie(struct inode *inode) {} |
107 | static inline void cifs_fscache_set_inode_cookie(struct inode *inode, | 107 | static inline void cifs_fscache_set_inode_cookie(struct inode *inode, |
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index de02ed5e25c2..9b018c8334fa 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c | |||
@@ -295,7 +295,7 @@ int cifs_get_file_info_unix(struct file *filp) | |||
295 | struct inode *inode = filp->f_path.dentry->d_inode; | 295 | struct inode *inode = filp->f_path.dentry->d_inode; |
296 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 296 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
297 | struct cifsFileInfo *cfile = filp->private_data; | 297 | struct cifsFileInfo *cfile = filp->private_data; |
298 | struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink); | 298 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
299 | 299 | ||
300 | xid = GetXid(); | 300 | xid = GetXid(); |
301 | rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data); | 301 | rc = CIFSSMBUnixQFileInfo(xid, tcon, cfile->netfid, &find_data); |
@@ -318,7 +318,7 @@ int cifs_get_inode_info_unix(struct inode **pinode, | |||
318 | int rc; | 318 | int rc; |
319 | FILE_UNIX_BASIC_INFO find_data; | 319 | FILE_UNIX_BASIC_INFO find_data; |
320 | struct cifs_fattr fattr; | 320 | struct cifs_fattr fattr; |
321 | struct cifsTconInfo *tcon; | 321 | struct cifs_tcon *tcon; |
322 | struct tcon_link *tlink; | 322 | struct tcon_link *tlink; |
323 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | 323 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
324 | 324 | ||
@@ -373,7 +373,8 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path, | |||
373 | int oplock = 0; | 373 | int oplock = 0; |
374 | __u16 netfid; | 374 | __u16 netfid; |
375 | struct tcon_link *tlink; | 375 | struct tcon_link *tlink; |
376 | struct cifsTconInfo *tcon; | 376 | struct cifs_tcon *tcon; |
377 | struct cifs_io_parms io_parms; | ||
377 | char buf[24]; | 378 | char buf[24]; |
378 | unsigned int bytes_read; | 379 | unsigned int bytes_read; |
379 | char *pbuf; | 380 | char *pbuf; |
@@ -405,9 +406,13 @@ cifs_sfu_type(struct cifs_fattr *fattr, const unsigned char *path, | |||
405 | if (rc == 0) { | 406 | if (rc == 0) { |
406 | int buf_type = CIFS_NO_BUFFER; | 407 | int buf_type = CIFS_NO_BUFFER; |
407 | /* Read header */ | 408 | /* Read header */ |
408 | rc = CIFSSMBRead(xid, tcon, netfid, | 409 | io_parms.netfid = netfid; |
409 | 24 /* length */, 0 /* offset */, | 410 | io_parms.pid = current->tgid; |
410 | &bytes_read, &pbuf, &buf_type); | 411 | io_parms.tcon = tcon; |
412 | io_parms.offset = 0; | ||
413 | io_parms.length = 24; | ||
414 | rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, | ||
415 | &buf_type); | ||
411 | if ((rc == 0) && (bytes_read >= 8)) { | 416 | if ((rc == 0) && (bytes_read >= 8)) { |
412 | if (memcmp("IntxBLK", pbuf, 8) == 0) { | 417 | if (memcmp("IntxBLK", pbuf, 8) == 0) { |
413 | cFYI(1, "Block device"); | 418 | cFYI(1, "Block device"); |
@@ -468,7 +473,7 @@ static int cifs_sfu_mode(struct cifs_fattr *fattr, const unsigned char *path, | |||
468 | char ea_value[4]; | 473 | char ea_value[4]; |
469 | __u32 mode; | 474 | __u32 mode; |
470 | struct tcon_link *tlink; | 475 | struct tcon_link *tlink; |
471 | struct cifsTconInfo *tcon; | 476 | struct cifs_tcon *tcon; |
472 | 477 | ||
473 | tlink = cifs_sb_tlink(cifs_sb); | 478 | tlink = cifs_sb_tlink(cifs_sb); |
474 | if (IS_ERR(tlink)) | 479 | if (IS_ERR(tlink)) |
@@ -502,7 +507,7 @@ static void | |||
502 | cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, | 507 | cifs_all_info_to_fattr(struct cifs_fattr *fattr, FILE_ALL_INFO *info, |
503 | struct cifs_sb_info *cifs_sb, bool adjust_tz) | 508 | struct cifs_sb_info *cifs_sb, bool adjust_tz) |
504 | { | 509 | { |
505 | struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); | 510 | struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); |
506 | 511 | ||
507 | memset(fattr, 0, sizeof(*fattr)); | 512 | memset(fattr, 0, sizeof(*fattr)); |
508 | fattr->cf_cifsattrs = le32_to_cpu(info->Attributes); | 513 | fattr->cf_cifsattrs = le32_to_cpu(info->Attributes); |
@@ -553,7 +558,7 @@ int cifs_get_file_info(struct file *filp) | |||
553 | struct inode *inode = filp->f_path.dentry->d_inode; | 558 | struct inode *inode = filp->f_path.dentry->d_inode; |
554 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 559 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
555 | struct cifsFileInfo *cfile = filp->private_data; | 560 | struct cifsFileInfo *cfile = filp->private_data; |
556 | struct cifsTconInfo *tcon = tlink_tcon(cfile->tlink); | 561 | struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); |
557 | 562 | ||
558 | xid = GetXid(); | 563 | xid = GetXid(); |
559 | rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data); | 564 | rc = CIFSSMBQFileInfo(xid, tcon, cfile->netfid, &find_data); |
@@ -590,7 +595,7 @@ int cifs_get_inode_info(struct inode **pinode, | |||
590 | struct super_block *sb, int xid, const __u16 *pfid) | 595 | struct super_block *sb, int xid, const __u16 *pfid) |
591 | { | 596 | { |
592 | int rc = 0, tmprc; | 597 | int rc = 0, tmprc; |
593 | struct cifsTconInfo *pTcon; | 598 | struct cifs_tcon *pTcon; |
594 | struct tcon_link *tlink; | 599 | struct tcon_link *tlink; |
595 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | 600 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
596 | char *buf = NULL; | 601 | char *buf = NULL; |
@@ -735,10 +740,10 @@ static const struct inode_operations cifs_ipc_inode_ops = { | |||
735 | .lookup = cifs_lookup, | 740 | .lookup = cifs_lookup, |
736 | }; | 741 | }; |
737 | 742 | ||
738 | char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb, | 743 | char *cifs_build_path_to_root(struct smb_vol *vol, struct cifs_sb_info *cifs_sb, |
739 | struct cifsTconInfo *tcon) | 744 | struct cifs_tcon *tcon) |
740 | { | 745 | { |
741 | int pplen = cifs_sb->prepathlen; | 746 | int pplen = vol->prepath ? strlen(vol->prepath) : 0; |
742 | int dfsplen; | 747 | int dfsplen; |
743 | char *full_path = NULL; | 748 | char *full_path = NULL; |
744 | 749 | ||
@@ -772,7 +777,7 @@ char *cifs_build_path_to_root(struct cifs_sb_info *cifs_sb, | |||
772 | } | 777 | } |
773 | } | 778 | } |
774 | } | 779 | } |
775 | strncpy(full_path + dfsplen, cifs_sb->prepath, pplen); | 780 | strncpy(full_path + dfsplen, vol->prepath, pplen); |
776 | full_path[dfsplen + pplen] = 0; /* add trailing null */ | 781 | full_path[dfsplen + pplen] = 0; /* add trailing null */ |
777 | return full_path; | 782 | return full_path; |
778 | } | 783 | } |
@@ -884,19 +889,13 @@ struct inode *cifs_root_iget(struct super_block *sb) | |||
884 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | 889 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
885 | struct inode *inode = NULL; | 890 | struct inode *inode = NULL; |
886 | long rc; | 891 | long rc; |
887 | char *full_path; | 892 | struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); |
888 | struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); | ||
889 | |||
890 | full_path = cifs_build_path_to_root(cifs_sb, tcon); | ||
891 | if (full_path == NULL) | ||
892 | return ERR_PTR(-ENOMEM); | ||
893 | 893 | ||
894 | xid = GetXid(); | 894 | xid = GetXid(); |
895 | if (tcon->unix_ext) | 895 | if (tcon->unix_ext) |
896 | rc = cifs_get_inode_info_unix(&inode, full_path, sb, xid); | 896 | rc = cifs_get_inode_info_unix(&inode, "", sb, xid); |
897 | else | 897 | else |
898 | rc = cifs_get_inode_info(&inode, full_path, NULL, sb, | 898 | rc = cifs_get_inode_info(&inode, "", NULL, sb, xid, NULL); |
899 | xid, NULL); | ||
900 | 899 | ||
901 | if (!inode) { | 900 | if (!inode) { |
902 | inode = ERR_PTR(rc); | 901 | inode = ERR_PTR(rc); |
@@ -922,7 +921,6 @@ struct inode *cifs_root_iget(struct super_block *sb) | |||
922 | } | 921 | } |
923 | 922 | ||
924 | out: | 923 | out: |
925 | kfree(full_path); | ||
926 | /* can not call macro FreeXid here since in a void func | 924 | /* can not call macro FreeXid here since in a void func |
927 | * TODO: This is no longer true | 925 | * TODO: This is no longer true |
928 | */ | 926 | */ |
@@ -943,7 +941,7 @@ cifs_set_file_info(struct inode *inode, struct iattr *attrs, int xid, | |||
943 | struct cifsInodeInfo *cifsInode = CIFS_I(inode); | 941 | struct cifsInodeInfo *cifsInode = CIFS_I(inode); |
944 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 942 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
945 | struct tcon_link *tlink = NULL; | 943 | struct tcon_link *tlink = NULL; |
946 | struct cifsTconInfo *pTcon; | 944 | struct cifs_tcon *pTcon; |
947 | FILE_BASIC_INFO info_buf; | 945 | FILE_BASIC_INFO info_buf; |
948 | 946 | ||
949 | if (attrs == NULL) | 947 | if (attrs == NULL) |
@@ -1061,7 +1059,7 @@ cifs_rename_pending_delete(char *full_path, struct dentry *dentry, int xid) | |||
1061 | struct cifsInodeInfo *cifsInode = CIFS_I(inode); | 1059 | struct cifsInodeInfo *cifsInode = CIFS_I(inode); |
1062 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 1060 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
1063 | struct tcon_link *tlink; | 1061 | struct tcon_link *tlink; |
1064 | struct cifsTconInfo *tcon; | 1062 | struct cifs_tcon *tcon; |
1065 | __u32 dosattr, origattr; | 1063 | __u32 dosattr, origattr; |
1066 | FILE_BASIC_INFO *info_buf = NULL; | 1064 | FILE_BASIC_INFO *info_buf = NULL; |
1067 | 1065 | ||
@@ -1179,7 +1177,7 @@ int cifs_unlink(struct inode *dir, struct dentry *dentry) | |||
1179 | struct super_block *sb = dir->i_sb; | 1177 | struct super_block *sb = dir->i_sb; |
1180 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); | 1178 | struct cifs_sb_info *cifs_sb = CIFS_SB(sb); |
1181 | struct tcon_link *tlink; | 1179 | struct tcon_link *tlink; |
1182 | struct cifsTconInfo *tcon; | 1180 | struct cifs_tcon *tcon; |
1183 | struct iattr *attrs = NULL; | 1181 | struct iattr *attrs = NULL; |
1184 | __u32 dosattr = 0, origattr = 0; | 1182 | __u32 dosattr = 0, origattr = 0; |
1185 | 1183 | ||
@@ -1277,7 +1275,7 @@ int cifs_mkdir(struct inode *inode, struct dentry *direntry, int mode) | |||
1277 | int xid; | 1275 | int xid; |
1278 | struct cifs_sb_info *cifs_sb; | 1276 | struct cifs_sb_info *cifs_sb; |
1279 | struct tcon_link *tlink; | 1277 | struct tcon_link *tlink; |
1280 | struct cifsTconInfo *pTcon; | 1278 | struct cifs_tcon *pTcon; |
1281 | char *full_path = NULL; | 1279 | char *full_path = NULL; |
1282 | struct inode *newinode = NULL; | 1280 | struct inode *newinode = NULL; |
1283 | struct cifs_fattr fattr; | 1281 | struct cifs_fattr fattr; |
@@ -1455,7 +1453,7 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry) | |||
1455 | int xid; | 1453 | int xid; |
1456 | struct cifs_sb_info *cifs_sb; | 1454 | struct cifs_sb_info *cifs_sb; |
1457 | struct tcon_link *tlink; | 1455 | struct tcon_link *tlink; |
1458 | struct cifsTconInfo *pTcon; | 1456 | struct cifs_tcon *pTcon; |
1459 | char *full_path = NULL; | 1457 | char *full_path = NULL; |
1460 | struct cifsInodeInfo *cifsInode; | 1458 | struct cifsInodeInfo *cifsInode; |
1461 | 1459 | ||
@@ -1512,7 +1510,7 @@ cifs_do_rename(int xid, struct dentry *from_dentry, const char *fromPath, | |||
1512 | { | 1510 | { |
1513 | struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb); | 1511 | struct cifs_sb_info *cifs_sb = CIFS_SB(from_dentry->d_sb); |
1514 | struct tcon_link *tlink; | 1512 | struct tcon_link *tlink; |
1515 | struct cifsTconInfo *pTcon; | 1513 | struct cifs_tcon *pTcon; |
1516 | __u16 srcfid; | 1514 | __u16 srcfid; |
1517 | int oplock, rc; | 1515 | int oplock, rc; |
1518 | 1516 | ||
@@ -1564,7 +1562,7 @@ int cifs_rename(struct inode *source_dir, struct dentry *source_dentry, | |||
1564 | char *toName = NULL; | 1562 | char *toName = NULL; |
1565 | struct cifs_sb_info *cifs_sb; | 1563 | struct cifs_sb_info *cifs_sb; |
1566 | struct tcon_link *tlink; | 1564 | struct tcon_link *tlink; |
1567 | struct cifsTconInfo *tcon; | 1565 | struct cifs_tcon *tcon; |
1568 | FILE_UNIX_BASIC_INFO *info_buf_source = NULL; | 1566 | FILE_UNIX_BASIC_INFO *info_buf_source = NULL; |
1569 | FILE_UNIX_BASIC_INFO *info_buf_target; | 1567 | FILE_UNIX_BASIC_INFO *info_buf_target; |
1570 | int xid, rc, tmprc; | 1568 | int xid, rc, tmprc; |
@@ -1794,7 +1792,7 @@ int cifs_getattr(struct vfsmount *mnt, struct dentry *dentry, | |||
1794 | struct kstat *stat) | 1792 | struct kstat *stat) |
1795 | { | 1793 | { |
1796 | struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb); | 1794 | struct cifs_sb_info *cifs_sb = CIFS_SB(dentry->d_sb); |
1797 | struct cifsTconInfo *tcon = cifs_sb_master_tcon(cifs_sb); | 1795 | struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb); |
1798 | struct inode *inode = dentry->d_inode; | 1796 | struct inode *inode = dentry->d_inode; |
1799 | int rc; | 1797 | int rc; |
1800 | 1798 | ||
@@ -1872,7 +1870,8 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, | |||
1872 | struct cifsInodeInfo *cifsInode = CIFS_I(inode); | 1870 | struct cifsInodeInfo *cifsInode = CIFS_I(inode); |
1873 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 1871 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
1874 | struct tcon_link *tlink = NULL; | 1872 | struct tcon_link *tlink = NULL; |
1875 | struct cifsTconInfo *pTcon = NULL; | 1873 | struct cifs_tcon *pTcon = NULL; |
1874 | struct cifs_io_parms io_parms; | ||
1876 | 1875 | ||
1877 | /* | 1876 | /* |
1878 | * To avoid spurious oplock breaks from server, in the case of | 1877 | * To avoid spurious oplock breaks from server, in the case of |
@@ -1894,8 +1893,14 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, | |||
1894 | cFYI(1, "SetFSize for attrs rc = %d", rc); | 1893 | cFYI(1, "SetFSize for attrs rc = %d", rc); |
1895 | if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { | 1894 | if ((rc == -EINVAL) || (rc == -EOPNOTSUPP)) { |
1896 | unsigned int bytes_written; | 1895 | unsigned int bytes_written; |
1897 | rc = CIFSSMBWrite(xid, pTcon, nfid, 0, attrs->ia_size, | 1896 | |
1898 | &bytes_written, NULL, NULL, 1); | 1897 | io_parms.netfid = nfid; |
1898 | io_parms.pid = npid; | ||
1899 | io_parms.tcon = pTcon; | ||
1900 | io_parms.offset = 0; | ||
1901 | io_parms.length = attrs->ia_size; | ||
1902 | rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, | ||
1903 | NULL, NULL, 1); | ||
1899 | cFYI(1, "Wrt seteof rc %d", rc); | 1904 | cFYI(1, "Wrt seteof rc %d", rc); |
1900 | } | 1905 | } |
1901 | } else | 1906 | } else |
@@ -1930,10 +1935,15 @@ cifs_set_file_size(struct inode *inode, struct iattr *attrs, | |||
1930 | CIFS_MOUNT_MAP_SPECIAL_CHR); | 1935 | CIFS_MOUNT_MAP_SPECIAL_CHR); |
1931 | if (rc == 0) { | 1936 | if (rc == 0) { |
1932 | unsigned int bytes_written; | 1937 | unsigned int bytes_written; |
1933 | rc = CIFSSMBWrite(xid, pTcon, netfid, 0, | 1938 | |
1934 | attrs->ia_size, | 1939 | io_parms.netfid = netfid; |
1935 | &bytes_written, NULL, | 1940 | io_parms.pid = current->tgid; |
1936 | NULL, 1); | 1941 | io_parms.tcon = pTcon; |
1942 | io_parms.offset = 0; | ||
1943 | io_parms.length = attrs->ia_size; | ||
1944 | rc = CIFSSMBWrite(xid, &io_parms, | ||
1945 | &bytes_written, | ||
1946 | NULL, NULL, 1); | ||
1937 | cFYI(1, "wrt seteof rc %d", rc); | 1947 | cFYI(1, "wrt seteof rc %d", rc); |
1938 | CIFSSMBClose(xid, pTcon, netfid); | 1948 | CIFSSMBClose(xid, pTcon, netfid); |
1939 | } | 1949 | } |
@@ -1961,7 +1971,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs) | |||
1961 | struct cifsInodeInfo *cifsInode = CIFS_I(inode); | 1971 | struct cifsInodeInfo *cifsInode = CIFS_I(inode); |
1962 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 1972 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
1963 | struct tcon_link *tlink; | 1973 | struct tcon_link *tlink; |
1964 | struct cifsTconInfo *pTcon; | 1974 | struct cifs_tcon *pTcon; |
1965 | struct cifs_unix_set_info_args *args = NULL; | 1975 | struct cifs_unix_set_info_args *args = NULL; |
1966 | struct cifsFileInfo *open_file; | 1976 | struct cifsFileInfo *open_file; |
1967 | 1977 | ||
@@ -2247,7 +2257,7 @@ cifs_setattr(struct dentry *direntry, struct iattr *attrs) | |||
2247 | { | 2257 | { |
2248 | struct inode *inode = direntry->d_inode; | 2258 | struct inode *inode = direntry->d_inode; |
2249 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 2259 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
2250 | struct cifsTconInfo *pTcon = cifs_sb_master_tcon(cifs_sb); | 2260 | struct cifs_tcon *pTcon = cifs_sb_master_tcon(cifs_sb); |
2251 | 2261 | ||
2252 | if (pTcon->unix_ext) | 2262 | if (pTcon->unix_ext) |
2253 | return cifs_setattr_unix(direntry, attrs); | 2263 | return cifs_setattr_unix(direntry, attrs); |
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c index 0c98672d0122..4221b5e48a42 100644 --- a/fs/cifs/ioctl.c +++ b/fs/cifs/ioctl.c | |||
@@ -38,7 +38,7 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg) | |||
38 | struct cifs_sb_info *cifs_sb; | 38 | struct cifs_sb_info *cifs_sb; |
39 | #ifdef CONFIG_CIFS_POSIX | 39 | #ifdef CONFIG_CIFS_POSIX |
40 | struct cifsFileInfo *pSMBFile = filep->private_data; | 40 | struct cifsFileInfo *pSMBFile = filep->private_data; |
41 | struct cifsTconInfo *tcon; | 41 | struct cifs_tcon *tcon; |
42 | __u64 ExtAttrBits = 0; | 42 | __u64 ExtAttrBits = 0; |
43 | __u64 ExtAttrMask = 0; | 43 | __u64 ExtAttrMask = 0; |
44 | __u64 caps; | 44 | __u64 caps; |
diff --git a/fs/cifs/link.c b/fs/cifs/link.c index ce417a9764a3..556b1a0b54de 100644 --- a/fs/cifs/link.c +++ b/fs/cifs/link.c | |||
@@ -175,7 +175,7 @@ CIFSFormatMFSymlink(u8 *buf, unsigned int buf_len, const char *link_str) | |||
175 | } | 175 | } |
176 | 176 | ||
177 | static int | 177 | static int |
178 | CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon, | 178 | CIFSCreateMFSymLink(const int xid, struct cifs_tcon *tcon, |
179 | const char *fromName, const char *toName, | 179 | const char *fromName, const char *toName, |
180 | const struct nls_table *nls_codepage, int remap) | 180 | const struct nls_table *nls_codepage, int remap) |
181 | { | 181 | { |
@@ -184,6 +184,7 @@ CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon, | |||
184 | __u16 netfid = 0; | 184 | __u16 netfid = 0; |
185 | u8 *buf; | 185 | u8 *buf; |
186 | unsigned int bytes_written = 0; | 186 | unsigned int bytes_written = 0; |
187 | struct cifs_io_parms io_parms; | ||
187 | 188 | ||
188 | buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); | 189 | buf = kmalloc(CIFS_MF_SYMLINK_FILE_SIZE, GFP_KERNEL); |
189 | if (!buf) | 190 | if (!buf) |
@@ -203,10 +204,13 @@ CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon, | |||
203 | return rc; | 204 | return rc; |
204 | } | 205 | } |
205 | 206 | ||
206 | rc = CIFSSMBWrite(xid, tcon, netfid, | 207 | io_parms.netfid = netfid; |
207 | CIFS_MF_SYMLINK_FILE_SIZE /* length */, | 208 | io_parms.pid = current->tgid; |
208 | 0 /* offset */, | 209 | io_parms.tcon = tcon; |
209 | &bytes_written, buf, NULL, 0); | 210 | io_parms.offset = 0; |
211 | io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; | ||
212 | |||
213 | rc = CIFSSMBWrite(xid, &io_parms, &bytes_written, buf, NULL, 0); | ||
210 | CIFSSMBClose(xid, tcon, netfid); | 214 | CIFSSMBClose(xid, tcon, netfid); |
211 | kfree(buf); | 215 | kfree(buf); |
212 | if (rc != 0) | 216 | if (rc != 0) |
@@ -219,7 +223,7 @@ CIFSCreateMFSymLink(const int xid, struct cifsTconInfo *tcon, | |||
219 | } | 223 | } |
220 | 224 | ||
221 | static int | 225 | static int |
222 | CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon, | 226 | CIFSQueryMFSymLink(const int xid, struct cifs_tcon *tcon, |
223 | const unsigned char *searchName, char **symlinkinfo, | 227 | const unsigned char *searchName, char **symlinkinfo, |
224 | const struct nls_table *nls_codepage, int remap) | 228 | const struct nls_table *nls_codepage, int remap) |
225 | { | 229 | { |
@@ -231,6 +235,7 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon, | |||
231 | unsigned int bytes_read = 0; | 235 | unsigned int bytes_read = 0; |
232 | int buf_type = CIFS_NO_BUFFER; | 236 | int buf_type = CIFS_NO_BUFFER; |
233 | unsigned int link_len = 0; | 237 | unsigned int link_len = 0; |
238 | struct cifs_io_parms io_parms; | ||
234 | FILE_ALL_INFO file_info; | 239 | FILE_ALL_INFO file_info; |
235 | 240 | ||
236 | rc = CIFSSMBOpen(xid, tcon, searchName, FILE_OPEN, GENERIC_READ, | 241 | rc = CIFSSMBOpen(xid, tcon, searchName, FILE_OPEN, GENERIC_READ, |
@@ -249,11 +254,13 @@ CIFSQueryMFSymLink(const int xid, struct cifsTconInfo *tcon, | |||
249 | if (!buf) | 254 | if (!buf) |
250 | return -ENOMEM; | 255 | return -ENOMEM; |
251 | pbuf = buf; | 256 | pbuf = buf; |
257 | io_parms.netfid = netfid; | ||
258 | io_parms.pid = current->tgid; | ||
259 | io_parms.tcon = tcon; | ||
260 | io_parms.offset = 0; | ||
261 | io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; | ||
252 | 262 | ||
253 | rc = CIFSSMBRead(xid, tcon, netfid, | 263 | rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type); |
254 | CIFS_MF_SYMLINK_FILE_SIZE /* length */, | ||
255 | 0 /* offset */, | ||
256 | &bytes_read, &pbuf, &buf_type); | ||
257 | CIFSSMBClose(xid, tcon, netfid); | 264 | CIFSSMBClose(xid, tcon, netfid); |
258 | if (rc != 0) { | 265 | if (rc != 0) { |
259 | kfree(buf); | 266 | kfree(buf); |
@@ -291,7 +298,8 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr, | |||
291 | int oplock = 0; | 298 | int oplock = 0; |
292 | __u16 netfid = 0; | 299 | __u16 netfid = 0; |
293 | struct tcon_link *tlink; | 300 | struct tcon_link *tlink; |
294 | struct cifsTconInfo *pTcon; | 301 | struct cifs_tcon *pTcon; |
302 | struct cifs_io_parms io_parms; | ||
295 | u8 *buf; | 303 | u8 *buf; |
296 | char *pbuf; | 304 | char *pbuf; |
297 | unsigned int bytes_read = 0; | 305 | unsigned int bytes_read = 0; |
@@ -328,11 +336,13 @@ CIFSCheckMFSymlink(struct cifs_fattr *fattr, | |||
328 | goto out; | 336 | goto out; |
329 | } | 337 | } |
330 | pbuf = buf; | 338 | pbuf = buf; |
339 | io_parms.netfid = netfid; | ||
340 | io_parms.pid = current->tgid; | ||
341 | io_parms.tcon = pTcon; | ||
342 | io_parms.offset = 0; | ||
343 | io_parms.length = CIFS_MF_SYMLINK_FILE_SIZE; | ||
331 | 344 | ||
332 | rc = CIFSSMBRead(xid, pTcon, netfid, | 345 | rc = CIFSSMBRead(xid, &io_parms, &bytes_read, &pbuf, &buf_type); |
333 | CIFS_MF_SYMLINK_FILE_SIZE /* length */, | ||
334 | 0 /* offset */, | ||
335 | &bytes_read, &pbuf, &buf_type); | ||
336 | CIFSSMBClose(xid, pTcon, netfid); | 346 | CIFSSMBClose(xid, pTcon, netfid); |
337 | if (rc != 0) { | 347 | if (rc != 0) { |
338 | kfree(buf); | 348 | kfree(buf); |
@@ -370,7 +380,7 @@ cifs_hardlink(struct dentry *old_file, struct inode *inode, | |||
370 | char *toName = NULL; | 380 | char *toName = NULL; |
371 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 381 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
372 | struct tcon_link *tlink; | 382 | struct tcon_link *tlink; |
373 | struct cifsTconInfo *pTcon; | 383 | struct cifs_tcon *pTcon; |
374 | struct cifsInodeInfo *cifsInode; | 384 | struct cifsInodeInfo *cifsInode; |
375 | 385 | ||
376 | tlink = cifs_sb_tlink(cifs_sb); | 386 | tlink = cifs_sb_tlink(cifs_sb); |
@@ -445,7 +455,7 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd) | |||
445 | char *target_path = NULL; | 455 | char *target_path = NULL; |
446 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 456 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
447 | struct tcon_link *tlink = NULL; | 457 | struct tcon_link *tlink = NULL; |
448 | struct cifsTconInfo *tcon; | 458 | struct cifs_tcon *tcon; |
449 | 459 | ||
450 | xid = GetXid(); | 460 | xid = GetXid(); |
451 | 461 | ||
@@ -518,7 +528,7 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname) | |||
518 | int xid; | 528 | int xid; |
519 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); | 529 | struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); |
520 | struct tcon_link *tlink; | 530 | struct tcon_link *tlink; |
521 | struct cifsTconInfo *pTcon; | 531 | struct cifs_tcon *pTcon; |
522 | char *full_path = NULL; | 532 | char *full_path = NULL; |
523 | struct inode *newinode = NULL; | 533 | struct inode *newinode = NULL; |
524 | 534 | ||
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c index 907531ac5888..03a1f491d39b 100644 --- a/fs/cifs/misc.c +++ b/fs/cifs/misc.c | |||
@@ -67,12 +67,12 @@ _FreeXid(unsigned int xid) | |||
67 | spin_unlock(&GlobalMid_Lock); | 67 | spin_unlock(&GlobalMid_Lock); |
68 | } | 68 | } |
69 | 69 | ||
70 | struct cifsSesInfo * | 70 | struct cifs_ses * |
71 | sesInfoAlloc(void) | 71 | sesInfoAlloc(void) |
72 | { | 72 | { |
73 | struct cifsSesInfo *ret_buf; | 73 | struct cifs_ses *ret_buf; |
74 | 74 | ||
75 | ret_buf = kzalloc(sizeof(struct cifsSesInfo), GFP_KERNEL); | 75 | ret_buf = kzalloc(sizeof(struct cifs_ses), GFP_KERNEL); |
76 | if (ret_buf) { | 76 | if (ret_buf) { |
77 | atomic_inc(&sesInfoAllocCount); | 77 | atomic_inc(&sesInfoAllocCount); |
78 | ret_buf->status = CifsNew; | 78 | ret_buf->status = CifsNew; |
@@ -85,7 +85,7 @@ sesInfoAlloc(void) | |||
85 | } | 85 | } |
86 | 86 | ||
87 | void | 87 | void |
88 | sesInfoFree(struct cifsSesInfo *buf_to_free) | 88 | sesInfoFree(struct cifs_ses *buf_to_free) |
89 | { | 89 | { |
90 | if (buf_to_free == NULL) { | 90 | if (buf_to_free == NULL) { |
91 | cFYI(1, "Null buffer passed to sesInfoFree"); | 91 | cFYI(1, "Null buffer passed to sesInfoFree"); |
@@ -105,11 +105,11 @@ sesInfoFree(struct cifsSesInfo *buf_to_free) | |||
105 | kfree(buf_to_free); | 105 | kfree(buf_to_free); |
106 | } | 106 | } |
107 | 107 | ||
108 | struct cifsTconInfo * | 108 | struct cifs_tcon * |
109 | tconInfoAlloc(void) | 109 | tconInfoAlloc(void) |
110 | { | 110 | { |
111 | struct cifsTconInfo *ret_buf; | 111 | struct cifs_tcon *ret_buf; |
112 | ret_buf = kzalloc(sizeof(struct cifsTconInfo), GFP_KERNEL); | 112 | ret_buf = kzalloc(sizeof(struct cifs_tcon), GFP_KERNEL); |
113 | if (ret_buf) { | 113 | if (ret_buf) { |
114 | atomic_inc(&tconInfoAllocCount); | 114 | atomic_inc(&tconInfoAllocCount); |
115 | ret_buf->tidStatus = CifsNew; | 115 | ret_buf->tidStatus = CifsNew; |
@@ -124,7 +124,7 @@ tconInfoAlloc(void) | |||
124 | } | 124 | } |
125 | 125 | ||
126 | void | 126 | void |
127 | tconInfoFree(struct cifsTconInfo *buf_to_free) | 127 | tconInfoFree(struct cifs_tcon *buf_to_free) |
128 | { | 128 | { |
129 | if (buf_to_free == NULL) { | 129 | if (buf_to_free == NULL) { |
130 | cFYI(1, "Null buffer passed to tconInfoFree"); | 130 | cFYI(1, "Null buffer passed to tconInfoFree"); |
@@ -295,11 +295,11 @@ __u16 GetNextMid(struct TCP_Server_Info *server) | |||
295 | case it is responsbility of caller to set the mid */ | 295 | case it is responsbility of caller to set the mid */ |
296 | void | 296 | void |
297 | header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , | 297 | header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , |
298 | const struct cifsTconInfo *treeCon, int word_count | 298 | const struct cifs_tcon *treeCon, int word_count |
299 | /* length of fixed section (word count) in two byte units */) | 299 | /* length of fixed section (word count) in two byte units */) |
300 | { | 300 | { |
301 | struct list_head *temp_item; | 301 | struct list_head *temp_item; |
302 | struct cifsSesInfo *ses; | 302 | struct cifs_ses *ses; |
303 | char *temp = (char *) buffer; | 303 | char *temp = (char *) buffer; |
304 | 304 | ||
305 | memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */ | 305 | memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */ |
@@ -359,7 +359,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , | |||
359 | "did not match tcon uid"); | 359 | "did not match tcon uid"); |
360 | spin_lock(&cifs_tcp_ses_lock); | 360 | spin_lock(&cifs_tcp_ses_lock); |
361 | list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) { | 361 | list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) { |
362 | ses = list_entry(temp_item, struct cifsSesInfo, smb_ses_list); | 362 | ses = list_entry(temp_item, struct cifs_ses, smb_ses_list); |
363 | if (ses->linux_uid == current_fsuid()) { | 363 | if (ses->linux_uid == current_fsuid()) { |
364 | if (ses->server == treeCon->ses->server) { | 364 | if (ses->server == treeCon->ses->server) { |
365 | cFYI(1, "found matching uid substitute right smb_uid"); | 365 | cFYI(1, "found matching uid substitute right smb_uid"); |
@@ -380,7 +380,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ , | |||
380 | if (treeCon->nocase) | 380 | if (treeCon->nocase) |
381 | buffer->Flags |= SMBFLG_CASELESS; | 381 | buffer->Flags |= SMBFLG_CASELESS; |
382 | if ((treeCon->ses) && (treeCon->ses->server)) | 382 | if ((treeCon->ses) && (treeCon->ses->server)) |
383 | if (treeCon->ses->server->secMode & | 383 | if (treeCon->ses->server->sec_mode & |
384 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | 384 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) |
385 | buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; | 385 | buffer->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; |
386 | } | 386 | } |
@@ -507,8 +507,8 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
507 | { | 507 | { |
508 | struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; | 508 | struct smb_com_lock_req *pSMB = (struct smb_com_lock_req *)buf; |
509 | struct list_head *tmp, *tmp1, *tmp2; | 509 | struct list_head *tmp, *tmp1, *tmp2; |
510 | struct cifsSesInfo *ses; | 510 | struct cifs_ses *ses; |
511 | struct cifsTconInfo *tcon; | 511 | struct cifs_tcon *tcon; |
512 | struct cifsInodeInfo *pCifsInode; | 512 | struct cifsInodeInfo *pCifsInode; |
513 | struct cifsFileInfo *netfile; | 513 | struct cifsFileInfo *netfile; |
514 | 514 | ||
@@ -566,9 +566,9 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv) | |||
566 | /* look up tcon based on tid & uid */ | 566 | /* look up tcon based on tid & uid */ |
567 | spin_lock(&cifs_tcp_ses_lock); | 567 | spin_lock(&cifs_tcp_ses_lock); |
568 | list_for_each(tmp, &srv->smb_ses_list) { | 568 | list_for_each(tmp, &srv->smb_ses_list) { |
569 | ses = list_entry(tmp, struct cifsSesInfo, smb_ses_list); | 569 | ses = list_entry(tmp, struct cifs_ses, smb_ses_list); |
570 | list_for_each(tmp1, &ses->tcon_list) { | 570 | list_for_each(tmp1, &ses->tcon_list) { |
571 | tcon = list_entry(tmp1, struct cifsTconInfo, tcon_list); | 571 | tcon = list_entry(tmp1, struct cifs_tcon, tcon_list); |
572 | if (tcon->tid != buf->Tid) | 572 | if (tcon->tid != buf->Tid) |
573 | continue; | 573 | continue; |
574 | 574 | ||
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c index 79b71c2c7c9d..73e47e84b61a 100644 --- a/fs/cifs/netmisc.c +++ b/fs/cifs/netmisc.c | |||
@@ -836,7 +836,7 @@ ntstatus_to_dos(__u32 ntstatus, __u8 *eclass, __u16 *ecode) | |||
836 | } | 836 | } |
837 | 837 | ||
838 | int | 838 | int |
839 | map_smb_to_linux_error(struct smb_hdr *smb, int logErr) | 839 | map_smb_to_linux_error(struct smb_hdr *smb, bool logErr) |
840 | { | 840 | { |
841 | unsigned int i; | 841 | unsigned int i; |
842 | int rc = -EIO; /* if transport error smb error may not be set */ | 842 | int rc = -EIO; /* if transport error smb error may not be set */ |
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c index f8e4cd2a7912..6751e745bbc6 100644 --- a/fs/cifs/readdir.c +++ b/fs/cifs/readdir.c | |||
@@ -195,7 +195,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb, | |||
195 | int len; | 195 | int len; |
196 | int oplock = 0; | 196 | int oplock = 0; |
197 | int rc; | 197 | int rc; |
198 | struct cifsTconInfo *ptcon = cifs_sb_tcon(cifs_sb); | 198 | struct cifs_tcon *ptcon = cifs_sb_tcon(cifs_sb); |
199 | char *tmpbuffer; | 199 | char *tmpbuffer; |
200 | 200 | ||
201 | rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ, | 201 | rc = CIFSSMBOpen(xid, ptcon, full_path, FILE_OPEN, GENERIC_READ, |
@@ -223,7 +223,7 @@ static int initiate_cifs_search(const int xid, struct file *file) | |||
223 | struct cifsFileInfo *cifsFile; | 223 | struct cifsFileInfo *cifsFile; |
224 | struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); | 224 | struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); |
225 | struct tcon_link *tlink = NULL; | 225 | struct tcon_link *tlink = NULL; |
226 | struct cifsTconInfo *pTcon; | 226 | struct cifs_tcon *pTcon; |
227 | 227 | ||
228 | if (file->private_data == NULL) { | 228 | if (file->private_data == NULL) { |
229 | tlink = cifs_sb_tlink(cifs_sb); | 229 | tlink = cifs_sb_tlink(cifs_sb); |
@@ -496,7 +496,7 @@ static int cifs_save_resume_key(const char *current_entry, | |||
496 | assume that they are located in the findfirst return buffer.*/ | 496 | assume that they are located in the findfirst return buffer.*/ |
497 | /* We start counting in the buffer with entry 2 and increment for every | 497 | /* We start counting in the buffer with entry 2 and increment for every |
498 | entry (do not increment for . or .. entry) */ | 498 | entry (do not increment for . or .. entry) */ |
499 | static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon, | 499 | static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon, |
500 | struct file *file, char **ppCurrentEntry, int *num_to_ret) | 500 | struct file *file, char **ppCurrentEntry, int *num_to_ret) |
501 | { | 501 | { |
502 | int rc = 0; | 502 | int rc = 0; |
@@ -764,7 +764,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir) | |||
764 | { | 764 | { |
765 | int rc = 0; | 765 | int rc = 0; |
766 | int xid, i; | 766 | int xid, i; |
767 | struct cifsTconInfo *pTcon; | 767 | struct cifs_tcon *pTcon; |
768 | struct cifsFileInfo *cifsFile = NULL; | 768 | struct cifsFileInfo *cifsFile = NULL; |
769 | char *current_entry; | 769 | char *current_entry; |
770 | int num_to_fill = 0; | 770 | int num_to_fill = 0; |
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c index 7dd462100378..3892ab817a36 100644 --- a/fs/cifs/sess.c +++ b/fs/cifs/sess.c | |||
@@ -37,13 +37,13 @@ | |||
37 | * the socket has been reestablished (so we know whether to use vc 0). | 37 | * the socket has been reestablished (so we know whether to use vc 0). |
38 | * Called while holding the cifs_tcp_ses_lock, so do not block | 38 | * Called while holding the cifs_tcp_ses_lock, so do not block |
39 | */ | 39 | */ |
40 | static bool is_first_ses_reconnect(struct cifsSesInfo *ses) | 40 | static bool is_first_ses_reconnect(struct cifs_ses *ses) |
41 | { | 41 | { |
42 | struct list_head *tmp; | 42 | struct list_head *tmp; |
43 | struct cifsSesInfo *tmp_ses; | 43 | struct cifs_ses *tmp_ses; |
44 | 44 | ||
45 | list_for_each(tmp, &ses->server->smb_ses_list) { | 45 | list_for_each(tmp, &ses->server->smb_ses_list) { |
46 | tmp_ses = list_entry(tmp, struct cifsSesInfo, | 46 | tmp_ses = list_entry(tmp, struct cifs_ses, |
47 | smb_ses_list); | 47 | smb_ses_list); |
48 | if (tmp_ses->need_reconnect == false) | 48 | if (tmp_ses->need_reconnect == false) |
49 | return false; | 49 | return false; |
@@ -61,11 +61,11 @@ static bool is_first_ses_reconnect(struct cifsSesInfo *ses) | |||
61 | * any vc but zero (some servers reset the connection on vcnum zero) | 61 | * any vc but zero (some servers reset the connection on vcnum zero) |
62 | * | 62 | * |
63 | */ | 63 | */ |
64 | static __le16 get_next_vcnum(struct cifsSesInfo *ses) | 64 | static __le16 get_next_vcnum(struct cifs_ses *ses) |
65 | { | 65 | { |
66 | __u16 vcnum = 0; | 66 | __u16 vcnum = 0; |
67 | struct list_head *tmp; | 67 | struct list_head *tmp; |
68 | struct cifsSesInfo *tmp_ses; | 68 | struct cifs_ses *tmp_ses; |
69 | __u16 max_vcs = ses->server->max_vcs; | 69 | __u16 max_vcs = ses->server->max_vcs; |
70 | __u16 i; | 70 | __u16 i; |
71 | int free_vc_found = 0; | 71 | int free_vc_found = 0; |
@@ -87,7 +87,7 @@ static __le16 get_next_vcnum(struct cifsSesInfo *ses) | |||
87 | free_vc_found = 1; | 87 | free_vc_found = 1; |
88 | 88 | ||
89 | list_for_each(tmp, &ses->server->smb_ses_list) { | 89 | list_for_each(tmp, &ses->server->smb_ses_list) { |
90 | tmp_ses = list_entry(tmp, struct cifsSesInfo, | 90 | tmp_ses = list_entry(tmp, struct cifs_ses, |
91 | smb_ses_list); | 91 | smb_ses_list); |
92 | if (tmp_ses->vcnum == i) { | 92 | if (tmp_ses->vcnum == i) { |
93 | free_vc_found = 0; | 93 | free_vc_found = 0; |
@@ -114,7 +114,7 @@ get_vc_num_exit: | |||
114 | return cpu_to_le16(vcnum); | 114 | return cpu_to_le16(vcnum); |
115 | } | 115 | } |
116 | 116 | ||
117 | static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB) | 117 | static __u32 cifs_ssetup_hdr(struct cifs_ses *ses, SESSION_SETUP_ANDX *pSMB) |
118 | { | 118 | { |
119 | __u32 capabilities = 0; | 119 | __u32 capabilities = 0; |
120 | 120 | ||
@@ -136,7 +136,7 @@ static __u32 cifs_ssetup_hdr(struct cifsSesInfo *ses, SESSION_SETUP_ANDX *pSMB) | |||
136 | capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS | | 136 | capabilities = CAP_LARGE_FILES | CAP_NT_SMBS | CAP_LEVEL_II_OPLOCKS | |
137 | CAP_LARGE_WRITE_X | CAP_LARGE_READ_X; | 137 | CAP_LARGE_WRITE_X | CAP_LARGE_READ_X; |
138 | 138 | ||
139 | if (ses->server->secMode & | 139 | if (ses->server->sec_mode & |
140 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | 140 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) |
141 | pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; | 141 | pSMB->req.hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE; |
142 | 142 | ||
@@ -181,7 +181,7 @@ unicode_oslm_strings(char **pbcc_area, const struct nls_table *nls_cp) | |||
181 | *pbcc_area = bcc_ptr; | 181 | *pbcc_area = bcc_ptr; |
182 | } | 182 | } |
183 | 183 | ||
184 | static void unicode_domain_string(char **pbcc_area, struct cifsSesInfo *ses, | 184 | static void unicode_domain_string(char **pbcc_area, struct cifs_ses *ses, |
185 | const struct nls_table *nls_cp) | 185 | const struct nls_table *nls_cp) |
186 | { | 186 | { |
187 | char *bcc_ptr = *pbcc_area; | 187 | char *bcc_ptr = *pbcc_area; |
@@ -204,7 +204,7 @@ static void unicode_domain_string(char **pbcc_area, struct cifsSesInfo *ses, | |||
204 | } | 204 | } |
205 | 205 | ||
206 | 206 | ||
207 | static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, | 207 | static void unicode_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, |
208 | const struct nls_table *nls_cp) | 208 | const struct nls_table *nls_cp) |
209 | { | 209 | { |
210 | char *bcc_ptr = *pbcc_area; | 210 | char *bcc_ptr = *pbcc_area; |
@@ -236,7 +236,7 @@ static void unicode_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, | |||
236 | *pbcc_area = bcc_ptr; | 236 | *pbcc_area = bcc_ptr; |
237 | } | 237 | } |
238 | 238 | ||
239 | static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, | 239 | static void ascii_ssetup_strings(char **pbcc_area, struct cifs_ses *ses, |
240 | const struct nls_table *nls_cp) | 240 | const struct nls_table *nls_cp) |
241 | { | 241 | { |
242 | char *bcc_ptr = *pbcc_area; | 242 | char *bcc_ptr = *pbcc_area; |
@@ -276,7 +276,7 @@ static void ascii_ssetup_strings(char **pbcc_area, struct cifsSesInfo *ses, | |||
276 | } | 276 | } |
277 | 277 | ||
278 | static void | 278 | static void |
279 | decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, | 279 | decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifs_ses *ses, |
280 | const struct nls_table *nls_cp) | 280 | const struct nls_table *nls_cp) |
281 | { | 281 | { |
282 | int len; | 282 | int len; |
@@ -310,7 +310,7 @@ decode_unicode_ssetup(char **pbcc_area, int bleft, struct cifsSesInfo *ses, | |||
310 | } | 310 | } |
311 | 311 | ||
312 | static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft, | 312 | static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft, |
313 | struct cifsSesInfo *ses, | 313 | struct cifs_ses *ses, |
314 | const struct nls_table *nls_cp) | 314 | const struct nls_table *nls_cp) |
315 | { | 315 | { |
316 | int rc = 0; | 316 | int rc = 0; |
@@ -364,7 +364,7 @@ static int decode_ascii_ssetup(char **pbcc_area, __u16 bleft, | |||
364 | } | 364 | } |
365 | 365 | ||
366 | static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, | 366 | static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, |
367 | struct cifsSesInfo *ses) | 367 | struct cifs_ses *ses) |
368 | { | 368 | { |
369 | unsigned int tioffset; /* challenge message target info area */ | 369 | unsigned int tioffset; /* challenge message target info area */ |
370 | unsigned int tilen; /* challenge message target info area length */ | 370 | unsigned int tilen; /* challenge message target info area length */ |
@@ -411,7 +411,7 @@ static int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, | |||
411 | /* We do not malloc the blob, it is passed in pbuffer, because | 411 | /* We do not malloc the blob, it is passed in pbuffer, because |
412 | it is fixed size, and small, making this approach cleaner */ | 412 | it is fixed size, and small, making this approach cleaner */ |
413 | static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, | 413 | static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, |
414 | struct cifsSesInfo *ses) | 414 | struct cifs_ses *ses) |
415 | { | 415 | { |
416 | NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer; | 416 | NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer; |
417 | __u32 flags; | 417 | __u32 flags; |
@@ -424,7 +424,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, | |||
424 | flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | | 424 | flags = NTLMSSP_NEGOTIATE_56 | NTLMSSP_REQUEST_TARGET | |
425 | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | | 425 | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | |
426 | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; | 426 | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; |
427 | if (ses->server->secMode & | 427 | if (ses->server->sec_mode & |
428 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { | 428 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { |
429 | flags |= NTLMSSP_NEGOTIATE_SIGN; | 429 | flags |= NTLMSSP_NEGOTIATE_SIGN; |
430 | if (!ses->server->session_estab) | 430 | if (!ses->server->session_estab) |
@@ -449,7 +449,7 @@ static void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, | |||
449 | This function returns the length of the data in the blob */ | 449 | This function returns the length of the data in the blob */ |
450 | static int build_ntlmssp_auth_blob(unsigned char *pbuffer, | 450 | static int build_ntlmssp_auth_blob(unsigned char *pbuffer, |
451 | u16 *buflen, | 451 | u16 *buflen, |
452 | struct cifsSesInfo *ses, | 452 | struct cifs_ses *ses, |
453 | const struct nls_table *nls_cp) | 453 | const struct nls_table *nls_cp) |
454 | { | 454 | { |
455 | int rc; | 455 | int rc; |
@@ -464,10 +464,10 @@ static int build_ntlmssp_auth_blob(unsigned char *pbuffer, | |||
464 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | | 464 | NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO | |
465 | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | | 465 | NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE | |
466 | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; | 466 | NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC; |
467 | if (ses->server->secMode & | 467 | if (ses->server->sec_mode & |
468 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | 468 | (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) |
469 | flags |= NTLMSSP_NEGOTIATE_SIGN; | 469 | flags |= NTLMSSP_NEGOTIATE_SIGN; |
470 | if (ses->server->secMode & SECMODE_SIGN_REQUIRED) | 470 | if (ses->server->sec_mode & SECMODE_SIGN_REQUIRED) |
471 | flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; | 471 | flags |= NTLMSSP_NEGOTIATE_ALWAYS_SIGN; |
472 | 472 | ||
473 | tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE); | 473 | tmp = pbuffer + sizeof(AUTHENTICATE_MESSAGE); |
@@ -551,7 +551,7 @@ setup_ntlmv2_ret: | |||
551 | } | 551 | } |
552 | 552 | ||
553 | int | 553 | int |
554 | CIFS_SessSetup(unsigned int xid, struct cifsSesInfo *ses, | 554 | CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses, |
555 | const struct nls_table *nls_cp) | 555 | const struct nls_table *nls_cp) |
556 | { | 556 | { |
557 | int rc = 0; | 557 | int rc = 0; |
@@ -657,7 +657,7 @@ ssetup_ntlmssp_authenticate: | |||
657 | */ | 657 | */ |
658 | 658 | ||
659 | rc = calc_lanman_hash(ses->password, ses->server->cryptkey, | 659 | rc = calc_lanman_hash(ses->password, ses->server->cryptkey, |
660 | ses->server->secMode & SECMODE_PW_ENCRYPT ? | 660 | ses->server->sec_mode & SECMODE_PW_ENCRYPT ? |
661 | true : false, lnm_session_key); | 661 | true : false, lnm_session_key); |
662 | 662 | ||
663 | ses->flags |= CIFS_SES_LANMAN; | 663 | ses->flags |= CIFS_SES_LANMAN; |
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c index f2513fb8c391..147aa22c3c3a 100644 --- a/fs/cifs/transport.c +++ b/fs/cifs/transport.c | |||
@@ -295,7 +295,7 @@ static int wait_for_free_request(struct TCP_Server_Info *server, | |||
295 | return 0; | 295 | return 0; |
296 | } | 296 | } |
297 | 297 | ||
298 | static int allocate_mid(struct cifsSesInfo *ses, struct smb_hdr *in_buf, | 298 | static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, |
299 | struct mid_q_entry **ppmidQ) | 299 | struct mid_q_entry **ppmidQ) |
300 | { | 300 | { |
301 | if (ses->server->tcpStatus == CifsExiting) { | 301 | if (ses->server->tcpStatus == CifsExiting) { |
@@ -342,22 +342,24 @@ wait_for_response(struct TCP_Server_Info *server, struct mid_q_entry *midQ) | |||
342 | * the result. Caller is responsible for dealing with timeouts. | 342 | * the result. Caller is responsible for dealing with timeouts. |
343 | */ | 343 | */ |
344 | int | 344 | int |
345 | cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf, | 345 | cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, |
346 | mid_callback_t *callback, void *cbdata) | 346 | unsigned int nvec, mid_callback_t *callback, void *cbdata, |
347 | bool ignore_pend) | ||
347 | { | 348 | { |
348 | int rc; | 349 | int rc; |
349 | struct mid_q_entry *mid; | 350 | struct mid_q_entry *mid; |
351 | struct smb_hdr *hdr = (struct smb_hdr *)iov[0].iov_base; | ||
350 | 352 | ||
351 | rc = wait_for_free_request(server, CIFS_ASYNC_OP); | 353 | rc = wait_for_free_request(server, ignore_pend ? CIFS_ASYNC_OP : 0); |
352 | if (rc) | 354 | if (rc) |
353 | return rc; | 355 | return rc; |
354 | 356 | ||
355 | /* enable signing if server requires it */ | 357 | /* enable signing if server requires it */ |
356 | if (server->secMode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) | 358 | if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) |
357 | in_buf->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; | 359 | hdr->Flags2 |= SMBFLG2_SECURITY_SIGNATURE; |
358 | 360 | ||
359 | mutex_lock(&server->srv_mutex); | 361 | mutex_lock(&server->srv_mutex); |
360 | mid = AllocMidQEntry(in_buf, server); | 362 | mid = AllocMidQEntry(hdr, server); |
361 | if (mid == NULL) { | 363 | if (mid == NULL) { |
362 | mutex_unlock(&server->srv_mutex); | 364 | mutex_unlock(&server->srv_mutex); |
363 | return -ENOMEM; | 365 | return -ENOMEM; |
@@ -368,7 +370,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf, | |||
368 | list_add_tail(&mid->qhead, &server->pending_mid_q); | 370 | list_add_tail(&mid->qhead, &server->pending_mid_q); |
369 | spin_unlock(&GlobalMid_Lock); | 371 | spin_unlock(&GlobalMid_Lock); |
370 | 372 | ||
371 | rc = cifs_sign_smb(in_buf, server, &mid->sequence_number); | 373 | rc = cifs_sign_smb2(iov, nvec, server, &mid->sequence_number); |
372 | if (rc) { | 374 | if (rc) { |
373 | mutex_unlock(&server->srv_mutex); | 375 | mutex_unlock(&server->srv_mutex); |
374 | goto out_err; | 376 | goto out_err; |
@@ -380,7 +382,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct smb_hdr *in_buf, | |||
380 | #ifdef CONFIG_CIFS_STATS2 | 382 | #ifdef CONFIG_CIFS_STATS2 |
381 | atomic_inc(&server->inSend); | 383 | atomic_inc(&server->inSend); |
382 | #endif | 384 | #endif |
383 | rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); | 385 | rc = smb_sendv(server, iov, nvec); |
384 | #ifdef CONFIG_CIFS_STATS2 | 386 | #ifdef CONFIG_CIFS_STATS2 |
385 | atomic_dec(&server->inSend); | 387 | atomic_dec(&server->inSend); |
386 | mid->when_sent = jiffies; | 388 | mid->when_sent = jiffies; |
@@ -407,7 +409,7 @@ out_err: | |||
407 | * | 409 | * |
408 | */ | 410 | */ |
409 | int | 411 | int |
410 | SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses, | 412 | SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses, |
411 | struct smb_hdr *in_buf, int flags) | 413 | struct smb_hdr *in_buf, int flags) |
412 | { | 414 | { |
413 | int rc; | 415 | int rc; |
@@ -424,7 +426,7 @@ SendReceiveNoRsp(const unsigned int xid, struct cifsSesInfo *ses, | |||
424 | } | 426 | } |
425 | 427 | ||
426 | static int | 428 | static int |
427 | sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) | 429 | cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) |
428 | { | 430 | { |
429 | int rc = 0; | 431 | int rc = 0; |
430 | 432 | ||
@@ -432,28 +434,21 @@ sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server) | |||
432 | mid->mid, mid->midState); | 434 | mid->mid, mid->midState); |
433 | 435 | ||
434 | spin_lock(&GlobalMid_Lock); | 436 | spin_lock(&GlobalMid_Lock); |
435 | /* ensure that it's no longer on the pending_mid_q */ | ||
436 | list_del_init(&mid->qhead); | ||
437 | |||
438 | switch (mid->midState) { | 437 | switch (mid->midState) { |
439 | case MID_RESPONSE_RECEIVED: | 438 | case MID_RESPONSE_RECEIVED: |
440 | spin_unlock(&GlobalMid_Lock); | 439 | spin_unlock(&GlobalMid_Lock); |
441 | return rc; | 440 | return rc; |
442 | case MID_REQUEST_SUBMITTED: | ||
443 | /* socket is going down, reject all calls */ | ||
444 | if (server->tcpStatus == CifsExiting) { | ||
445 | cERROR(1, "%s: canceling mid=%d cmd=0x%x state=%d", | ||
446 | __func__, mid->mid, mid->command, mid->midState); | ||
447 | rc = -EHOSTDOWN; | ||
448 | break; | ||
449 | } | ||
450 | case MID_RETRY_NEEDED: | 441 | case MID_RETRY_NEEDED: |
451 | rc = -EAGAIN; | 442 | rc = -EAGAIN; |
452 | break; | 443 | break; |
453 | case MID_RESPONSE_MALFORMED: | 444 | case MID_RESPONSE_MALFORMED: |
454 | rc = -EIO; | 445 | rc = -EIO; |
455 | break; | 446 | break; |
447 | case MID_SHUTDOWN: | ||
448 | rc = -EHOSTDOWN; | ||
449 | break; | ||
456 | default: | 450 | default: |
451 | list_del_init(&mid->qhead); | ||
457 | cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__, | 452 | cERROR(1, "%s: invalid mid state mid=%d state=%d", __func__, |
458 | mid->mid, mid->midState); | 453 | mid->mid, mid->midState); |
459 | rc = -EIO; | 454 | rc = -EIO; |
@@ -502,13 +497,31 @@ send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf, | |||
502 | } | 497 | } |
503 | 498 | ||
504 | int | 499 | int |
505 | SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | 500 | cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server, |
501 | bool log_error) | ||
502 | { | ||
503 | dump_smb(mid->resp_buf, | ||
504 | min_t(u32, 92, be32_to_cpu(mid->resp_buf->smb_buf_length))); | ||
505 | |||
506 | /* convert the length into a more usable form */ | ||
507 | if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { | ||
508 | /* FIXME: add code to kill session */ | ||
509 | if (cifs_verify_signature(mid->resp_buf, server, | ||
510 | mid->sequence_number + 1) != 0) | ||
511 | cERROR(1, "Unexpected SMB signature"); | ||
512 | } | ||
513 | |||
514 | /* BB special case reconnect tid and uid here? */ | ||
515 | return map_smb_to_linux_error(mid->resp_buf, log_error); | ||
516 | } | ||
517 | |||
518 | int | ||
519 | SendReceive2(const unsigned int xid, struct cifs_ses *ses, | ||
506 | struct kvec *iov, int n_vec, int *pRespBufType /* ret */, | 520 | struct kvec *iov, int n_vec, int *pRespBufType /* ret */, |
507 | const int flags) | 521 | const int flags) |
508 | { | 522 | { |
509 | int rc = 0; | 523 | int rc = 0; |
510 | int long_op; | 524 | int long_op; |
511 | unsigned int receive_len; | ||
512 | struct mid_q_entry *midQ; | 525 | struct mid_q_entry *midQ; |
513 | struct smb_hdr *in_buf = iov[0].iov_base; | 526 | struct smb_hdr *in_buf = iov[0].iov_base; |
514 | 527 | ||
@@ -598,61 +611,31 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses, | |||
598 | 611 | ||
599 | cifs_small_buf_release(in_buf); | 612 | cifs_small_buf_release(in_buf); |
600 | 613 | ||
601 | rc = sync_mid_result(midQ, ses->server); | 614 | rc = cifs_sync_mid_result(midQ, ses->server); |
602 | if (rc != 0) { | 615 | if (rc != 0) { |
603 | atomic_dec(&ses->server->inFlight); | 616 | atomic_dec(&ses->server->inFlight); |
604 | wake_up(&ses->server->request_q); | 617 | wake_up(&ses->server->request_q); |
605 | return rc; | 618 | return rc; |
606 | } | 619 | } |
607 | 620 | ||
608 | receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length); | 621 | if (!midQ->resp_buf || midQ->midState != MID_RESPONSE_RECEIVED) { |
609 | |||
610 | if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { | ||
611 | cERROR(1, "Frame too large received. Length: %d Xid: %d", | ||
612 | receive_len, xid); | ||
613 | rc = -EIO; | 622 | rc = -EIO; |
623 | cFYI(1, "Bad MID state?"); | ||
614 | goto out; | 624 | goto out; |
615 | } | 625 | } |
616 | 626 | ||
617 | /* rcvd frame is ok */ | 627 | iov[0].iov_base = (char *)midQ->resp_buf; |
618 | 628 | iov[0].iov_len = be32_to_cpu(midQ->resp_buf->smb_buf_length) + 4; | |
619 | if (midQ->resp_buf && | 629 | if (midQ->largeBuf) |
620 | (midQ->midState == MID_RESPONSE_RECEIVED)) { | 630 | *pRespBufType = CIFS_LARGE_BUFFER; |
621 | 631 | else | |
622 | iov[0].iov_base = (char *)midQ->resp_buf; | 632 | *pRespBufType = CIFS_SMALL_BUFFER; |
623 | if (midQ->largeBuf) | ||
624 | *pRespBufType = CIFS_LARGE_BUFFER; | ||
625 | else | ||
626 | *pRespBufType = CIFS_SMALL_BUFFER; | ||
627 | iov[0].iov_len = receive_len + 4; | ||
628 | |||
629 | dump_smb(midQ->resp_buf, 80); | ||
630 | /* convert the length into a more usable form */ | ||
631 | if ((receive_len > 24) && | ||
632 | (ses->server->secMode & (SECMODE_SIGN_REQUIRED | | ||
633 | SECMODE_SIGN_ENABLED))) { | ||
634 | rc = cifs_verify_signature(midQ->resp_buf, | ||
635 | ses->server, | ||
636 | midQ->sequence_number+1); | ||
637 | if (rc) { | ||
638 | cERROR(1, "Unexpected SMB signature"); | ||
639 | /* BB FIXME add code to kill session */ | ||
640 | } | ||
641 | } | ||
642 | |||
643 | /* BB special case reconnect tid and uid here? */ | ||
644 | rc = map_smb_to_linux_error(midQ->resp_buf, | ||
645 | flags & CIFS_LOG_ERROR); | ||
646 | 633 | ||
647 | if ((flags & CIFS_NO_RESP) == 0) | 634 | rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR); |
648 | midQ->resp_buf = NULL; /* mark it so buf will | ||
649 | not be freed by | ||
650 | delete_mid */ | ||
651 | } else { | ||
652 | rc = -EIO; | ||
653 | cFYI(1, "Bad MID state?"); | ||
654 | } | ||
655 | 635 | ||
636 | /* mark it so buf will not be freed by delete_mid */ | ||
637 | if ((flags & CIFS_NO_RESP) == 0) | ||
638 | midQ->resp_buf = NULL; | ||
656 | out: | 639 | out: |
657 | delete_mid(midQ); | 640 | delete_mid(midQ); |
658 | atomic_dec(&ses->server->inFlight); | 641 | atomic_dec(&ses->server->inFlight); |
@@ -662,12 +645,11 @@ out: | |||
662 | } | 645 | } |
663 | 646 | ||
664 | int | 647 | int |
665 | SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | 648 | SendReceive(const unsigned int xid, struct cifs_ses *ses, |
666 | struct smb_hdr *in_buf, struct smb_hdr *out_buf, | 649 | struct smb_hdr *in_buf, struct smb_hdr *out_buf, |
667 | int *pbytes_returned, const int long_op) | 650 | int *pbytes_returned, const int long_op) |
668 | { | 651 | { |
669 | int rc = 0; | 652 | int rc = 0; |
670 | unsigned int receive_len; | ||
671 | struct mid_q_entry *midQ; | 653 | struct mid_q_entry *midQ; |
672 | 654 | ||
673 | if (ses == NULL) { | 655 | if (ses == NULL) { |
@@ -750,54 +732,23 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses, | |||
750 | spin_unlock(&GlobalMid_Lock); | 732 | spin_unlock(&GlobalMid_Lock); |
751 | } | 733 | } |
752 | 734 | ||
753 | rc = sync_mid_result(midQ, ses->server); | 735 | rc = cifs_sync_mid_result(midQ, ses->server); |
754 | if (rc != 0) { | 736 | if (rc != 0) { |
755 | atomic_dec(&ses->server->inFlight); | 737 | atomic_dec(&ses->server->inFlight); |
756 | wake_up(&ses->server->request_q); | 738 | wake_up(&ses->server->request_q); |
757 | return rc; | 739 | return rc; |
758 | } | 740 | } |
759 | 741 | ||
760 | receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length); | 742 | if (!midQ->resp_buf || !out_buf || |
761 | 743 | midQ->midState != MID_RESPONSE_RECEIVED) { | |
762 | if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { | ||
763 | cERROR(1, "Frame too large received. Length: %d Xid: %d", | ||
764 | receive_len, xid); | ||
765 | rc = -EIO; | ||
766 | goto out; | ||
767 | } | ||
768 | |||
769 | /* rcvd frame is ok */ | ||
770 | |||
771 | if (midQ->resp_buf && out_buf | ||
772 | && (midQ->midState == MID_RESPONSE_RECEIVED)) { | ||
773 | out_buf->smb_buf_length = cpu_to_be32(receive_len); | ||
774 | memcpy((char *)out_buf + 4, | ||
775 | (char *)midQ->resp_buf + 4, | ||
776 | receive_len); | ||
777 | |||
778 | dump_smb(out_buf, 92); | ||
779 | /* convert the length into a more usable form */ | ||
780 | if ((receive_len > 24) && | ||
781 | (ses->server->secMode & (SECMODE_SIGN_REQUIRED | | ||
782 | SECMODE_SIGN_ENABLED))) { | ||
783 | rc = cifs_verify_signature(out_buf, | ||
784 | ses->server, | ||
785 | midQ->sequence_number+1); | ||
786 | if (rc) { | ||
787 | cERROR(1, "Unexpected SMB signature"); | ||
788 | /* BB FIXME add code to kill session */ | ||
789 | } | ||
790 | } | ||
791 | |||
792 | *pbytes_returned = be32_to_cpu(out_buf->smb_buf_length); | ||
793 | |||
794 | /* BB special case reconnect tid and uid here? */ | ||
795 | rc = map_smb_to_linux_error(out_buf, 0 /* no log */ ); | ||
796 | } else { | ||
797 | rc = -EIO; | 744 | rc = -EIO; |
798 | cERROR(1, "Bad MID state?"); | 745 | cERROR(1, "Bad MID state?"); |
746 | goto out; | ||
799 | } | 747 | } |
800 | 748 | ||
749 | *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length); | ||
750 | memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); | ||
751 | rc = cifs_check_receive(midQ, ses->server, 0); | ||
801 | out: | 752 | out: |
802 | delete_mid(midQ); | 753 | delete_mid(midQ); |
803 | atomic_dec(&ses->server->inFlight); | 754 | atomic_dec(&ses->server->inFlight); |
@@ -810,12 +761,12 @@ out: | |||
810 | blocking lock to return. */ | 761 | blocking lock to return. */ |
811 | 762 | ||
812 | static int | 763 | static int |
813 | send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon, | 764 | send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon, |
814 | struct smb_hdr *in_buf, | 765 | struct smb_hdr *in_buf, |
815 | struct smb_hdr *out_buf) | 766 | struct smb_hdr *out_buf) |
816 | { | 767 | { |
817 | int bytes_returned; | 768 | int bytes_returned; |
818 | struct cifsSesInfo *ses = tcon->ses; | 769 | struct cifs_ses *ses = tcon->ses; |
819 | LOCK_REQ *pSMB = (LOCK_REQ *)in_buf; | 770 | LOCK_REQ *pSMB = (LOCK_REQ *)in_buf; |
820 | 771 | ||
821 | /* We just modify the current in_buf to change | 772 | /* We just modify the current in_buf to change |
@@ -832,15 +783,14 @@ send_lock_cancel(const unsigned int xid, struct cifsTconInfo *tcon, | |||
832 | } | 783 | } |
833 | 784 | ||
834 | int | 785 | int |
835 | SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, | 786 | SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon, |
836 | struct smb_hdr *in_buf, struct smb_hdr *out_buf, | 787 | struct smb_hdr *in_buf, struct smb_hdr *out_buf, |
837 | int *pbytes_returned) | 788 | int *pbytes_returned) |
838 | { | 789 | { |
839 | int rc = 0; | 790 | int rc = 0; |
840 | int rstart = 0; | 791 | int rstart = 0; |
841 | unsigned int receive_len; | ||
842 | struct mid_q_entry *midQ; | 792 | struct mid_q_entry *midQ; |
843 | struct cifsSesInfo *ses; | 793 | struct cifs_ses *ses; |
844 | 794 | ||
845 | if (tcon == NULL || tcon->ses == NULL) { | 795 | if (tcon == NULL || tcon->ses == NULL) { |
846 | cERROR(1, "Null smb session"); | 796 | cERROR(1, "Null smb session"); |
@@ -957,50 +907,20 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon, | |||
957 | rstart = 1; | 907 | rstart = 1; |
958 | } | 908 | } |
959 | 909 | ||
960 | rc = sync_mid_result(midQ, ses->server); | 910 | rc = cifs_sync_mid_result(midQ, ses->server); |
961 | if (rc != 0) | 911 | if (rc != 0) |
962 | return rc; | 912 | return rc; |
963 | 913 | ||
964 | receive_len = be32_to_cpu(midQ->resp_buf->smb_buf_length); | ||
965 | if (receive_len > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE) { | ||
966 | cERROR(1, "Frame too large received. Length: %d Xid: %d", | ||
967 | receive_len, xid); | ||
968 | rc = -EIO; | ||
969 | goto out; | ||
970 | } | ||
971 | |||
972 | /* rcvd frame is ok */ | 914 | /* rcvd frame is ok */ |
973 | 915 | if (out_buf == NULL || midQ->midState != MID_RESPONSE_RECEIVED) { | |
974 | if ((out_buf == NULL) || (midQ->midState != MID_RESPONSE_RECEIVED)) { | ||
975 | rc = -EIO; | 916 | rc = -EIO; |
976 | cERROR(1, "Bad MID state?"); | 917 | cERROR(1, "Bad MID state?"); |
977 | goto out; | 918 | goto out; |
978 | } | 919 | } |
979 | 920 | ||
980 | out_buf->smb_buf_length = cpu_to_be32(receive_len); | 921 | *pbytes_returned = be32_to_cpu(midQ->resp_buf->smb_buf_length); |
981 | memcpy((char *)out_buf + 4, | 922 | memcpy(out_buf, midQ->resp_buf, *pbytes_returned + 4); |
982 | (char *)midQ->resp_buf + 4, | 923 | rc = cifs_check_receive(midQ, ses->server, 0); |
983 | receive_len); | ||
984 | |||
985 | dump_smb(out_buf, 92); | ||
986 | /* convert the length into a more usable form */ | ||
987 | if ((receive_len > 24) && | ||
988 | (ses->server->secMode & (SECMODE_SIGN_REQUIRED | | ||
989 | SECMODE_SIGN_ENABLED))) { | ||
990 | rc = cifs_verify_signature(out_buf, | ||
991 | ses->server, | ||
992 | midQ->sequence_number+1); | ||
993 | if (rc) { | ||
994 | cERROR(1, "Unexpected SMB signature"); | ||
995 | /* BB FIXME add code to kill session */ | ||
996 | } | ||
997 | } | ||
998 | |||
999 | *pbytes_returned = be32_to_cpu(out_buf->smb_buf_length); | ||
1000 | |||
1001 | /* BB special case reconnect tid and uid here? */ | ||
1002 | rc = map_smb_to_linux_error(out_buf, 0 /* no log */ ); | ||
1003 | |||
1004 | out: | 924 | out: |
1005 | delete_mid(midQ); | 925 | delete_mid(midQ); |
1006 | if (rstart && rc == -EACCES) | 926 | if (rstart && rc == -EACCES) |
diff --git a/fs/cifs/xattr.c b/fs/cifs/xattr.c index 912995e013ec..2a22fb2989e4 100644 --- a/fs/cifs/xattr.c +++ b/fs/cifs/xattr.c | |||
@@ -49,7 +49,7 @@ int cifs_removexattr(struct dentry *direntry, const char *ea_name) | |||
49 | int xid; | 49 | int xid; |
50 | struct cifs_sb_info *cifs_sb; | 50 | struct cifs_sb_info *cifs_sb; |
51 | struct tcon_link *tlink; | 51 | struct tcon_link *tlink; |
52 | struct cifsTconInfo *pTcon; | 52 | struct cifs_tcon *pTcon; |
53 | struct super_block *sb; | 53 | struct super_block *sb; |
54 | char *full_path = NULL; | 54 | char *full_path = NULL; |
55 | 55 | ||
@@ -109,7 +109,7 @@ int cifs_setxattr(struct dentry *direntry, const char *ea_name, | |||
109 | int xid; | 109 | int xid; |
110 | struct cifs_sb_info *cifs_sb; | 110 | struct cifs_sb_info *cifs_sb; |
111 | struct tcon_link *tlink; | 111 | struct tcon_link *tlink; |
112 | struct cifsTconInfo *pTcon; | 112 | struct cifs_tcon *pTcon; |
113 | struct super_block *sb; | 113 | struct super_block *sb; |
114 | char *full_path; | 114 | char *full_path; |
115 | struct cifs_ntsd *pacl; | 115 | struct cifs_ntsd *pacl; |
@@ -240,7 +240,7 @@ ssize_t cifs_getxattr(struct dentry *direntry, const char *ea_name, | |||
240 | int xid; | 240 | int xid; |
241 | struct cifs_sb_info *cifs_sb; | 241 | struct cifs_sb_info *cifs_sb; |
242 | struct tcon_link *tlink; | 242 | struct tcon_link *tlink; |
243 | struct cifsTconInfo *pTcon; | 243 | struct cifs_tcon *pTcon; |
244 | struct super_block *sb; | 244 | struct super_block *sb; |
245 | char *full_path; | 245 | char *full_path; |
246 | 246 | ||
@@ -372,7 +372,7 @@ ssize_t cifs_listxattr(struct dentry *direntry, char *data, size_t buf_size) | |||
372 | int xid; | 372 | int xid; |
373 | struct cifs_sb_info *cifs_sb; | 373 | struct cifs_sb_info *cifs_sb; |
374 | struct tcon_link *tlink; | 374 | struct tcon_link *tlink; |
375 | struct cifsTconInfo *pTcon; | 375 | struct cifs_tcon *pTcon; |
376 | struct super_block *sb; | 376 | struct super_block *sb; |
377 | char *full_path; | 377 | char *full_path; |
378 | 378 | ||
diff --git a/fs/coda/dir.c b/fs/coda/dir.c index 2b8dae4d121e..a46126fd5735 100644 --- a/fs/coda/dir.c +++ b/fs/coda/dir.c | |||
@@ -336,6 +336,8 @@ static int coda_rmdir(struct inode *dir, struct dentry *de) | |||
336 | int len = de->d_name.len; | 336 | int len = de->d_name.len; |
337 | int error; | 337 | int error; |
338 | 338 | ||
339 | dentry_unhash(de); | ||
340 | |||
339 | error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len); | 341 | error = venus_rmdir(dir->i_sb, coda_i2f(dir), name, len); |
340 | if (!error) { | 342 | if (!error) { |
341 | /* VFS may delete the child */ | 343 | /* VFS may delete the child */ |
@@ -359,6 +361,9 @@ static int coda_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
359 | int new_length = new_dentry->d_name.len; | 361 | int new_length = new_dentry->d_name.len; |
360 | int error; | 362 | int error; |
361 | 363 | ||
364 | if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) | ||
365 | dentry_unhash(new_dentry); | ||
366 | |||
362 | error = venus_rename(old_dir->i_sb, coda_i2f(old_dir), | 367 | error = venus_rename(old_dir->i_sb, coda_i2f(old_dir), |
363 | coda_i2f(new_dir), old_length, new_length, | 368 | coda_i2f(new_dir), old_length, new_length, |
364 | (const char *) old_name, (const char *)new_name); | 369 | (const char *) old_name, (const char *)new_name); |
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c index 9a37a9b6de3a..9d17d350abc5 100644 --- a/fs/configfs/dir.c +++ b/fs/configfs/dir.c | |||
@@ -1359,6 +1359,8 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
1359 | struct module *subsys_owner = NULL, *dead_item_owner = NULL; | 1359 | struct module *subsys_owner = NULL, *dead_item_owner = NULL; |
1360 | int ret; | 1360 | int ret; |
1361 | 1361 | ||
1362 | dentry_unhash(dentry); | ||
1363 | |||
1362 | if (dentry->d_parent == configfs_sb->s_root) | 1364 | if (dentry->d_parent == configfs_sb->s_root) |
1363 | return -EPERM; | 1365 | return -EPERM; |
1364 | 1366 | ||
diff --git a/fs/dlm/main.c b/fs/dlm/main.c index b80e0aa3cfa5..5a59efa0bb46 100644 --- a/fs/dlm/main.c +++ b/fs/dlm/main.c | |||
@@ -50,7 +50,7 @@ static int __init init_dlm(void) | |||
50 | if (error) | 50 | if (error) |
51 | goto out_netlink; | 51 | goto out_netlink; |
52 | 52 | ||
53 | printk("DLM (built %s %s) installed\n", __DATE__, __TIME__); | 53 | printk("DLM installed\n"); |
54 | 54 | ||
55 | return 0; | 55 | return 0; |
56 | 56 | ||
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c index 4d4cc6a90cd5..bc116b9ffcf2 100644 --- a/fs/ecryptfs/inode.c +++ b/fs/ecryptfs/inode.c | |||
@@ -521,12 +521,16 @@ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
521 | struct dentry *lower_dir_dentry; | 521 | struct dentry *lower_dir_dentry; |
522 | int rc; | 522 | int rc; |
523 | 523 | ||
524 | dentry_unhash(dentry); | ||
525 | |||
524 | lower_dentry = ecryptfs_dentry_to_lower(dentry); | 526 | lower_dentry = ecryptfs_dentry_to_lower(dentry); |
525 | dget(dentry); | 527 | dget(dentry); |
526 | lower_dir_dentry = lock_parent(lower_dentry); | 528 | lower_dir_dentry = lock_parent(lower_dentry); |
527 | dget(lower_dentry); | 529 | dget(lower_dentry); |
528 | rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry); | 530 | rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry); |
529 | dput(lower_dentry); | 531 | dput(lower_dentry); |
532 | if (!rc && dentry->d_inode) | ||
533 | clear_nlink(dentry->d_inode); | ||
530 | fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); | 534 | fsstack_copy_attr_times(dir, lower_dir_dentry->d_inode); |
531 | dir->i_nlink = lower_dir_dentry->d_inode->i_nlink; | 535 | dir->i_nlink = lower_dir_dentry->d_inode->i_nlink; |
532 | unlock_dir(lower_dir_dentry); | 536 | unlock_dir(lower_dir_dentry); |
@@ -571,6 +575,9 @@ ecryptfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
571 | struct dentry *lower_new_dir_dentry; | 575 | struct dentry *lower_new_dir_dentry; |
572 | struct dentry *trap = NULL; | 576 | struct dentry *trap = NULL; |
573 | 577 | ||
578 | if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) | ||
579 | dentry_unhash(new_dentry); | ||
580 | |||
574 | lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry); | 581 | lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry); |
575 | lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry); | 582 | lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry); |
576 | dget(lower_old_dentry); | 583 | dget(lower_old_dentry); |
diff --git a/fs/ecryptfs/keystore.c b/fs/ecryptfs/keystore.c index 03e609c45012..27a7fefb83eb 100644 --- a/fs/ecryptfs/keystore.c +++ b/fs/ecryptfs/keystore.c | |||
@@ -599,8 +599,8 @@ struct ecryptfs_write_tag_70_packet_silly_stack { | |||
599 | struct mutex *tfm_mutex; | 599 | struct mutex *tfm_mutex; |
600 | char *block_aligned_filename; | 600 | char *block_aligned_filename; |
601 | struct ecryptfs_auth_tok *auth_tok; | 601 | struct ecryptfs_auth_tok *auth_tok; |
602 | struct scatterlist src_sg; | 602 | struct scatterlist src_sg[2]; |
603 | struct scatterlist dst_sg; | 603 | struct scatterlist dst_sg[2]; |
604 | struct blkcipher_desc desc; | 604 | struct blkcipher_desc desc; |
605 | char iv[ECRYPTFS_MAX_IV_BYTES]; | 605 | char iv[ECRYPTFS_MAX_IV_BYTES]; |
606 | char hash[ECRYPTFS_TAG_70_DIGEST_SIZE]; | 606 | char hash[ECRYPTFS_TAG_70_DIGEST_SIZE]; |
@@ -816,23 +816,21 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes, | |||
816 | memcpy(&s->block_aligned_filename[s->num_rand_bytes], filename, | 816 | memcpy(&s->block_aligned_filename[s->num_rand_bytes], filename, |
817 | filename_size); | 817 | filename_size); |
818 | rc = virt_to_scatterlist(s->block_aligned_filename, | 818 | rc = virt_to_scatterlist(s->block_aligned_filename, |
819 | s->block_aligned_filename_size, &s->src_sg, 1); | 819 | s->block_aligned_filename_size, s->src_sg, 2); |
820 | if (rc != 1) { | 820 | if (rc < 1) { |
821 | printk(KERN_ERR "%s: Internal error whilst attempting to " | 821 | printk(KERN_ERR "%s: Internal error whilst attempting to " |
822 | "convert filename memory to scatterlist; " | 822 | "convert filename memory to scatterlist; rc = [%d]. " |
823 | "expected rc = 1; got rc = [%d]. " | ||
824 | "block_aligned_filename_size = [%zd]\n", __func__, rc, | 823 | "block_aligned_filename_size = [%zd]\n", __func__, rc, |
825 | s->block_aligned_filename_size); | 824 | s->block_aligned_filename_size); |
826 | goto out_release_free_unlock; | 825 | goto out_release_free_unlock; |
827 | } | 826 | } |
828 | rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size, | 827 | rc = virt_to_scatterlist(&dest[s->i], s->block_aligned_filename_size, |
829 | &s->dst_sg, 1); | 828 | s->dst_sg, 2); |
830 | if (rc != 1) { | 829 | if (rc < 1) { |
831 | printk(KERN_ERR "%s: Internal error whilst attempting to " | 830 | printk(KERN_ERR "%s: Internal error whilst attempting to " |
832 | "convert encrypted filename memory to scatterlist; " | 831 | "convert encrypted filename memory to scatterlist; " |
833 | "expected rc = 1; got rc = [%d]. " | 832 | "rc = [%d]. block_aligned_filename_size = [%zd]\n", |
834 | "block_aligned_filename_size = [%zd]\n", __func__, rc, | 833 | __func__, rc, s->block_aligned_filename_size); |
835 | s->block_aligned_filename_size); | ||
836 | goto out_release_free_unlock; | 834 | goto out_release_free_unlock; |
837 | } | 835 | } |
838 | /* The characters in the first block effectively do the job | 836 | /* The characters in the first block effectively do the job |
@@ -855,7 +853,7 @@ ecryptfs_write_tag_70_packet(char *dest, size_t *remaining_bytes, | |||
855 | mount_crypt_stat->global_default_fn_cipher_key_bytes); | 853 | mount_crypt_stat->global_default_fn_cipher_key_bytes); |
856 | goto out_release_free_unlock; | 854 | goto out_release_free_unlock; |
857 | } | 855 | } |
858 | rc = crypto_blkcipher_encrypt_iv(&s->desc, &s->dst_sg, &s->src_sg, | 856 | rc = crypto_blkcipher_encrypt_iv(&s->desc, s->dst_sg, s->src_sg, |
859 | s->block_aligned_filename_size); | 857 | s->block_aligned_filename_size); |
860 | if (rc) { | 858 | if (rc) { |
861 | printk(KERN_ERR "%s: Error attempting to encrypt filename; " | 859 | printk(KERN_ERR "%s: Error attempting to encrypt filename; " |
@@ -891,8 +889,8 @@ struct ecryptfs_parse_tag_70_packet_silly_stack { | |||
891 | struct mutex *tfm_mutex; | 889 | struct mutex *tfm_mutex; |
892 | char *decrypted_filename; | 890 | char *decrypted_filename; |
893 | struct ecryptfs_auth_tok *auth_tok; | 891 | struct ecryptfs_auth_tok *auth_tok; |
894 | struct scatterlist src_sg; | 892 | struct scatterlist src_sg[2]; |
895 | struct scatterlist dst_sg; | 893 | struct scatterlist dst_sg[2]; |
896 | struct blkcipher_desc desc; | 894 | struct blkcipher_desc desc; |
897 | char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1]; | 895 | char fnek_sig_hex[ECRYPTFS_SIG_SIZE_HEX + 1]; |
898 | char iv[ECRYPTFS_MAX_IV_BYTES]; | 896 | char iv[ECRYPTFS_MAX_IV_BYTES]; |
@@ -1008,13 +1006,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size, | |||
1008 | } | 1006 | } |
1009 | mutex_lock(s->tfm_mutex); | 1007 | mutex_lock(s->tfm_mutex); |
1010 | rc = virt_to_scatterlist(&data[(*packet_size)], | 1008 | rc = virt_to_scatterlist(&data[(*packet_size)], |
1011 | s->block_aligned_filename_size, &s->src_sg, 1); | 1009 | s->block_aligned_filename_size, s->src_sg, 2); |
1012 | if (rc != 1) { | 1010 | if (rc < 1) { |
1013 | printk(KERN_ERR "%s: Internal error whilst attempting to " | 1011 | printk(KERN_ERR "%s: Internal error whilst attempting to " |
1014 | "convert encrypted filename memory to scatterlist; " | 1012 | "convert encrypted filename memory to scatterlist; " |
1015 | "expected rc = 1; got rc = [%d]. " | 1013 | "rc = [%d]. block_aligned_filename_size = [%zd]\n", |
1016 | "block_aligned_filename_size = [%zd]\n", __func__, rc, | 1014 | __func__, rc, s->block_aligned_filename_size); |
1017 | s->block_aligned_filename_size); | ||
1018 | goto out_unlock; | 1015 | goto out_unlock; |
1019 | } | 1016 | } |
1020 | (*packet_size) += s->block_aligned_filename_size; | 1017 | (*packet_size) += s->block_aligned_filename_size; |
@@ -1028,13 +1025,12 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size, | |||
1028 | goto out_unlock; | 1025 | goto out_unlock; |
1029 | } | 1026 | } |
1030 | rc = virt_to_scatterlist(s->decrypted_filename, | 1027 | rc = virt_to_scatterlist(s->decrypted_filename, |
1031 | s->block_aligned_filename_size, &s->dst_sg, 1); | 1028 | s->block_aligned_filename_size, s->dst_sg, 2); |
1032 | if (rc != 1) { | 1029 | if (rc < 1) { |
1033 | printk(KERN_ERR "%s: Internal error whilst attempting to " | 1030 | printk(KERN_ERR "%s: Internal error whilst attempting to " |
1034 | "convert decrypted filename memory to scatterlist; " | 1031 | "convert decrypted filename memory to scatterlist; " |
1035 | "expected rc = 1; got rc = [%d]. " | 1032 | "rc = [%d]. block_aligned_filename_size = [%zd]\n", |
1036 | "block_aligned_filename_size = [%zd]\n", __func__, rc, | 1033 | __func__, rc, s->block_aligned_filename_size); |
1037 | s->block_aligned_filename_size); | ||
1038 | goto out_free_unlock; | 1034 | goto out_free_unlock; |
1039 | } | 1035 | } |
1040 | /* The characters in the first block effectively do the job of | 1036 | /* The characters in the first block effectively do the job of |
@@ -1065,7 +1061,7 @@ ecryptfs_parse_tag_70_packet(char **filename, size_t *filename_size, | |||
1065 | mount_crypt_stat->global_default_fn_cipher_key_bytes); | 1061 | mount_crypt_stat->global_default_fn_cipher_key_bytes); |
1066 | goto out_free_unlock; | 1062 | goto out_free_unlock; |
1067 | } | 1063 | } |
1068 | rc = crypto_blkcipher_decrypt_iv(&s->desc, &s->dst_sg, &s->src_sg, | 1064 | rc = crypto_blkcipher_decrypt_iv(&s->desc, s->dst_sg, s->src_sg, |
1069 | s->block_aligned_filename_size); | 1065 | s->block_aligned_filename_size); |
1070 | if (rc) { | 1066 | if (rc) { |
1071 | printk(KERN_ERR "%s: Error attempting to decrypt filename; " | 1067 | printk(KERN_ERR "%s: Error attempting to decrypt filename; " |
@@ -42,7 +42,6 @@ | |||
42 | #include <linux/pid_namespace.h> | 42 | #include <linux/pid_namespace.h> |
43 | #include <linux/module.h> | 43 | #include <linux/module.h> |
44 | #include <linux/namei.h> | 44 | #include <linux/namei.h> |
45 | #include <linux/proc_fs.h> | ||
46 | #include <linux/mount.h> | 45 | #include <linux/mount.h> |
47 | #include <linux/security.h> | 46 | #include <linux/security.h> |
48 | #include <linux/syscalls.h> | 47 | #include <linux/syscalls.h> |
@@ -1624,6 +1623,41 @@ expand_fail: | |||
1624 | return ret; | 1623 | return ret; |
1625 | } | 1624 | } |
1626 | 1625 | ||
1626 | static int cn_print_exe_file(struct core_name *cn) | ||
1627 | { | ||
1628 | struct file *exe_file; | ||
1629 | char *pathbuf, *path, *p; | ||
1630 | int ret; | ||
1631 | |||
1632 | exe_file = get_mm_exe_file(current->mm); | ||
1633 | if (!exe_file) | ||
1634 | return cn_printf(cn, "(unknown)"); | ||
1635 | |||
1636 | pathbuf = kmalloc(PATH_MAX, GFP_TEMPORARY); | ||
1637 | if (!pathbuf) { | ||
1638 | ret = -ENOMEM; | ||
1639 | goto put_exe_file; | ||
1640 | } | ||
1641 | |||
1642 | path = d_path(&exe_file->f_path, pathbuf, PATH_MAX); | ||
1643 | if (IS_ERR(path)) { | ||
1644 | ret = PTR_ERR(path); | ||
1645 | goto free_buf; | ||
1646 | } | ||
1647 | |||
1648 | for (p = path; *p; p++) | ||
1649 | if (*p == '/') | ||
1650 | *p = '!'; | ||
1651 | |||
1652 | ret = cn_printf(cn, "%s", path); | ||
1653 | |||
1654 | free_buf: | ||
1655 | kfree(pathbuf); | ||
1656 | put_exe_file: | ||
1657 | fput(exe_file); | ||
1658 | return ret; | ||
1659 | } | ||
1660 | |||
1627 | /* format_corename will inspect the pattern parameter, and output a | 1661 | /* format_corename will inspect the pattern parameter, and output a |
1628 | * name into corename, which must have space for at least | 1662 | * name into corename, which must have space for at least |
1629 | * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. | 1663 | * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. |
@@ -1695,6 +1729,9 @@ static int format_corename(struct core_name *cn, long signr) | |||
1695 | case 'e': | 1729 | case 'e': |
1696 | err = cn_printf(cn, "%s", current->comm); | 1730 | err = cn_printf(cn, "%s", current->comm); |
1697 | break; | 1731 | break; |
1732 | case 'E': | ||
1733 | err = cn_print_exe_file(cn); | ||
1734 | break; | ||
1698 | /* core limit size */ | 1735 | /* core limit size */ |
1699 | case 'c': | 1736 | case 'c': |
1700 | err = cn_printf(cn, "%lu", | 1737 | err = cn_printf(cn, "%lu", |
diff --git a/fs/ext3/super.c b/fs/ext3/super.c index 3c6a9e0eadc1..aad153ef6b78 100644 --- a/fs/ext3/super.c +++ b/fs/ext3/super.c | |||
@@ -36,6 +36,7 @@ | |||
36 | #include <linux/quotaops.h> | 36 | #include <linux/quotaops.h> |
37 | #include <linux/seq_file.h> | 37 | #include <linux/seq_file.h> |
38 | #include <linux/log2.h> | 38 | #include <linux/log2.h> |
39 | #include <linux/cleancache.h> | ||
39 | 40 | ||
40 | #include <asm/uaccess.h> | 41 | #include <asm/uaccess.h> |
41 | 42 | ||
@@ -1367,6 +1368,7 @@ static int ext3_setup_super(struct super_block *sb, struct ext3_super_block *es, | |||
1367 | } else { | 1368 | } else { |
1368 | ext3_msg(sb, KERN_INFO, "using internal journal"); | 1369 | ext3_msg(sb, KERN_INFO, "using internal journal"); |
1369 | } | 1370 | } |
1371 | cleancache_init_fs(sb); | ||
1370 | return res; | 1372 | return res; |
1371 | } | 1373 | } |
1372 | 1374 | ||
diff --git a/fs/ext4/Makefile b/fs/ext4/Makefile index c947e36eda6c..04109460ba9e 100644 --- a/fs/ext4/Makefile +++ b/fs/ext4/Makefile | |||
@@ -6,7 +6,8 @@ obj-$(CONFIG_EXT4_FS) += ext4.o | |||
6 | 6 | ||
7 | ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \ | 7 | ext4-y := balloc.o bitmap.o dir.o file.o fsync.o ialloc.o inode.o page-io.o \ |
8 | ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \ | 8 | ioctl.o namei.o super.o symlink.o hash.o resize.o extents.o \ |
9 | ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o | 9 | ext4_jbd2.o migrate.o mballoc.o block_validity.o move_extent.o \ |
10 | mmp.o | ||
10 | 11 | ||
11 | ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o | 12 | ext4-$(CONFIG_EXT4_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o |
12 | ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o | 13 | ext4-$(CONFIG_EXT4_FS_POSIX_ACL) += acl.o |
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c index 1c67139ad4b4..264f6949511e 100644 --- a/fs/ext4/balloc.c +++ b/fs/ext4/balloc.c | |||
@@ -362,130 +362,6 @@ ext4_read_block_bitmap(struct super_block *sb, ext4_group_t block_group) | |||
362 | } | 362 | } |
363 | 363 | ||
364 | /** | 364 | /** |
365 | * ext4_add_groupblocks() -- Add given blocks to an existing group | ||
366 | * @handle: handle to this transaction | ||
367 | * @sb: super block | ||
368 | * @block: start physcial block to add to the block group | ||
369 | * @count: number of blocks to free | ||
370 | * | ||
371 | * This marks the blocks as free in the bitmap. We ask the | ||
372 | * mballoc to reload the buddy after this by setting group | ||
373 | * EXT4_GROUP_INFO_NEED_INIT_BIT flag | ||
374 | */ | ||
375 | void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, | ||
376 | ext4_fsblk_t block, unsigned long count) | ||
377 | { | ||
378 | struct buffer_head *bitmap_bh = NULL; | ||
379 | struct buffer_head *gd_bh; | ||
380 | ext4_group_t block_group; | ||
381 | ext4_grpblk_t bit; | ||
382 | unsigned int i; | ||
383 | struct ext4_group_desc *desc; | ||
384 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
385 | int err = 0, ret, blk_free_count; | ||
386 | ext4_grpblk_t blocks_freed; | ||
387 | struct ext4_group_info *grp; | ||
388 | |||
389 | ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); | ||
390 | |||
391 | ext4_get_group_no_and_offset(sb, block, &block_group, &bit); | ||
392 | grp = ext4_get_group_info(sb, block_group); | ||
393 | /* | ||
394 | * Check to see if we are freeing blocks across a group | ||
395 | * boundary. | ||
396 | */ | ||
397 | if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) { | ||
398 | goto error_return; | ||
399 | } | ||
400 | bitmap_bh = ext4_read_block_bitmap(sb, block_group); | ||
401 | if (!bitmap_bh) | ||
402 | goto error_return; | ||
403 | desc = ext4_get_group_desc(sb, block_group, &gd_bh); | ||
404 | if (!desc) | ||
405 | goto error_return; | ||
406 | |||
407 | if (in_range(ext4_block_bitmap(sb, desc), block, count) || | ||
408 | in_range(ext4_inode_bitmap(sb, desc), block, count) || | ||
409 | in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || | ||
410 | in_range(block + count - 1, ext4_inode_table(sb, desc), | ||
411 | sbi->s_itb_per_group)) { | ||
412 | ext4_error(sb, "Adding blocks in system zones - " | ||
413 | "Block = %llu, count = %lu", | ||
414 | block, count); | ||
415 | goto error_return; | ||
416 | } | ||
417 | |||
418 | /* | ||
419 | * We are about to add blocks to the bitmap, | ||
420 | * so we need undo access. | ||
421 | */ | ||
422 | BUFFER_TRACE(bitmap_bh, "getting undo access"); | ||
423 | err = ext4_journal_get_undo_access(handle, bitmap_bh); | ||
424 | if (err) | ||
425 | goto error_return; | ||
426 | |||
427 | /* | ||
428 | * We are about to modify some metadata. Call the journal APIs | ||
429 | * to unshare ->b_data if a currently-committing transaction is | ||
430 | * using it | ||
431 | */ | ||
432 | BUFFER_TRACE(gd_bh, "get_write_access"); | ||
433 | err = ext4_journal_get_write_access(handle, gd_bh); | ||
434 | if (err) | ||
435 | goto error_return; | ||
436 | /* | ||
437 | * make sure we don't allow a parallel init on other groups in the | ||
438 | * same buddy cache | ||
439 | */ | ||
440 | down_write(&grp->alloc_sem); | ||
441 | for (i = 0, blocks_freed = 0; i < count; i++) { | ||
442 | BUFFER_TRACE(bitmap_bh, "clear bit"); | ||
443 | if (!ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group), | ||
444 | bit + i, bitmap_bh->b_data)) { | ||
445 | ext4_error(sb, "bit already cleared for block %llu", | ||
446 | (ext4_fsblk_t)(block + i)); | ||
447 | BUFFER_TRACE(bitmap_bh, "bit already cleared"); | ||
448 | } else { | ||
449 | blocks_freed++; | ||
450 | } | ||
451 | } | ||
452 | ext4_lock_group(sb, block_group); | ||
453 | blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc); | ||
454 | ext4_free_blks_set(sb, desc, blk_free_count); | ||
455 | desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); | ||
456 | ext4_unlock_group(sb, block_group); | ||
457 | percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed); | ||
458 | |||
459 | if (sbi->s_log_groups_per_flex) { | ||
460 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); | ||
461 | atomic_add(blocks_freed, | ||
462 | &sbi->s_flex_groups[flex_group].free_blocks); | ||
463 | } | ||
464 | /* | ||
465 | * request to reload the buddy with the | ||
466 | * new bitmap information | ||
467 | */ | ||
468 | set_bit(EXT4_GROUP_INFO_NEED_INIT_BIT, &(grp->bb_state)); | ||
469 | grp->bb_free += blocks_freed; | ||
470 | up_write(&grp->alloc_sem); | ||
471 | |||
472 | /* We dirtied the bitmap block */ | ||
473 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); | ||
474 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); | ||
475 | |||
476 | /* And the group descriptor block */ | ||
477 | BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); | ||
478 | ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); | ||
479 | if (!err) | ||
480 | err = ret; | ||
481 | |||
482 | error_return: | ||
483 | brelse(bitmap_bh); | ||
484 | ext4_std_error(sb, err); | ||
485 | return; | ||
486 | } | ||
487 | |||
488 | /** | ||
489 | * ext4_has_free_blocks() | 365 | * ext4_has_free_blocks() |
490 | * @sbi: in-core super block structure. | 366 | * @sbi: in-core super block structure. |
491 | * @nblocks: number of needed blocks | 367 | * @nblocks: number of needed blocks |
@@ -493,7 +369,8 @@ error_return: | |||
493 | * Check if filesystem has nblocks free & available for allocation. | 369 | * Check if filesystem has nblocks free & available for allocation. |
494 | * On success return 1, return 0 on failure. | 370 | * On success return 1, return 0 on failure. |
495 | */ | 371 | */ |
496 | static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) | 372 | static int ext4_has_free_blocks(struct ext4_sb_info *sbi, |
373 | s64 nblocks, unsigned int flags) | ||
497 | { | 374 | { |
498 | s64 free_blocks, dirty_blocks, root_blocks; | 375 | s64 free_blocks, dirty_blocks, root_blocks; |
499 | struct percpu_counter *fbc = &sbi->s_freeblocks_counter; | 376 | struct percpu_counter *fbc = &sbi->s_freeblocks_counter; |
@@ -507,11 +384,6 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) | |||
507 | EXT4_FREEBLOCKS_WATERMARK) { | 384 | EXT4_FREEBLOCKS_WATERMARK) { |
508 | free_blocks = percpu_counter_sum_positive(fbc); | 385 | free_blocks = percpu_counter_sum_positive(fbc); |
509 | dirty_blocks = percpu_counter_sum_positive(dbc); | 386 | dirty_blocks = percpu_counter_sum_positive(dbc); |
510 | if (dirty_blocks < 0) { | ||
511 | printk(KERN_CRIT "Dirty block accounting " | ||
512 | "went wrong %lld\n", | ||
513 | (long long)dirty_blocks); | ||
514 | } | ||
515 | } | 387 | } |
516 | /* Check whether we have space after | 388 | /* Check whether we have space after |
517 | * accounting for current dirty blocks & root reserved blocks. | 389 | * accounting for current dirty blocks & root reserved blocks. |
@@ -522,7 +394,9 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) | |||
522 | /* Hm, nope. Are (enough) root reserved blocks available? */ | 394 | /* Hm, nope. Are (enough) root reserved blocks available? */ |
523 | if (sbi->s_resuid == current_fsuid() || | 395 | if (sbi->s_resuid == current_fsuid() || |
524 | ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || | 396 | ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || |
525 | capable(CAP_SYS_RESOURCE)) { | 397 | capable(CAP_SYS_RESOURCE) || |
398 | (flags & EXT4_MB_USE_ROOT_BLOCKS)) { | ||
399 | |||
526 | if (free_blocks >= (nblocks + dirty_blocks)) | 400 | if (free_blocks >= (nblocks + dirty_blocks)) |
527 | return 1; | 401 | return 1; |
528 | } | 402 | } |
@@ -531,9 +405,9 @@ static int ext4_has_free_blocks(struct ext4_sb_info *sbi, s64 nblocks) | |||
531 | } | 405 | } |
532 | 406 | ||
533 | int ext4_claim_free_blocks(struct ext4_sb_info *sbi, | 407 | int ext4_claim_free_blocks(struct ext4_sb_info *sbi, |
534 | s64 nblocks) | 408 | s64 nblocks, unsigned int flags) |
535 | { | 409 | { |
536 | if (ext4_has_free_blocks(sbi, nblocks)) { | 410 | if (ext4_has_free_blocks(sbi, nblocks, flags)) { |
537 | percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks); | 411 | percpu_counter_add(&sbi->s_dirtyblocks_counter, nblocks); |
538 | return 0; | 412 | return 0; |
539 | } else | 413 | } else |
@@ -554,7 +428,7 @@ int ext4_claim_free_blocks(struct ext4_sb_info *sbi, | |||
554 | */ | 428 | */ |
555 | int ext4_should_retry_alloc(struct super_block *sb, int *retries) | 429 | int ext4_should_retry_alloc(struct super_block *sb, int *retries) |
556 | { | 430 | { |
557 | if (!ext4_has_free_blocks(EXT4_SB(sb), 1) || | 431 | if (!ext4_has_free_blocks(EXT4_SB(sb), 1, 0) || |
558 | (*retries)++ > 3 || | 432 | (*retries)++ > 3 || |
559 | !EXT4_SB(sb)->s_journal) | 433 | !EXT4_SB(sb)->s_journal) |
560 | return 0; | 434 | return 0; |
@@ -577,7 +451,8 @@ int ext4_should_retry_alloc(struct super_block *sb, int *retries) | |||
577 | * error stores in errp pointer | 451 | * error stores in errp pointer |
578 | */ | 452 | */ |
579 | ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, | 453 | ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, |
580 | ext4_fsblk_t goal, unsigned long *count, int *errp) | 454 | ext4_fsblk_t goal, unsigned int flags, |
455 | unsigned long *count, int *errp) | ||
581 | { | 456 | { |
582 | struct ext4_allocation_request ar; | 457 | struct ext4_allocation_request ar; |
583 | ext4_fsblk_t ret; | 458 | ext4_fsblk_t ret; |
@@ -587,6 +462,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, | |||
587 | ar.inode = inode; | 462 | ar.inode = inode; |
588 | ar.goal = goal; | 463 | ar.goal = goal; |
589 | ar.len = count ? *count : 1; | 464 | ar.len = count ? *count : 1; |
465 | ar.flags = flags; | ||
590 | 466 | ||
591 | ret = ext4_mb_new_blocks(handle, &ar, errp); | 467 | ret = ext4_mb_new_blocks(handle, &ar, errp); |
592 | if (count) | 468 | if (count) |
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 4daaf2b753f4..a74b89c09f90 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h | |||
@@ -108,7 +108,8 @@ typedef unsigned int ext4_group_t; | |||
108 | #define EXT4_MB_DELALLOC_RESERVED 0x0400 | 108 | #define EXT4_MB_DELALLOC_RESERVED 0x0400 |
109 | /* We are doing stream allocation */ | 109 | /* We are doing stream allocation */ |
110 | #define EXT4_MB_STREAM_ALLOC 0x0800 | 110 | #define EXT4_MB_STREAM_ALLOC 0x0800 |
111 | 111 | /* Use reserved root blocks if needed */ | |
112 | #define EXT4_MB_USE_ROOT_BLOCKS 0x1000 | ||
112 | 113 | ||
113 | struct ext4_allocation_request { | 114 | struct ext4_allocation_request { |
114 | /* target inode for block we're allocating */ | 115 | /* target inode for block we're allocating */ |
@@ -209,6 +210,8 @@ struct ext4_io_submit { | |||
209 | */ | 210 | */ |
210 | #define EXT4_BAD_INO 1 /* Bad blocks inode */ | 211 | #define EXT4_BAD_INO 1 /* Bad blocks inode */ |
211 | #define EXT4_ROOT_INO 2 /* Root inode */ | 212 | #define EXT4_ROOT_INO 2 /* Root inode */ |
213 | #define EXT4_USR_QUOTA_INO 3 /* User quota inode */ | ||
214 | #define EXT4_GRP_QUOTA_INO 4 /* Group quota inode */ | ||
212 | #define EXT4_BOOT_LOADER_INO 5 /* Boot loader inode */ | 215 | #define EXT4_BOOT_LOADER_INO 5 /* Boot loader inode */ |
213 | #define EXT4_UNDEL_DIR_INO 6 /* Undelete directory inode */ | 216 | #define EXT4_UNDEL_DIR_INO 6 /* Undelete directory inode */ |
214 | #define EXT4_RESIZE_INO 7 /* Reserved group descriptors inode */ | 217 | #define EXT4_RESIZE_INO 7 /* Reserved group descriptors inode */ |
@@ -512,6 +515,10 @@ struct ext4_new_group_data { | |||
512 | /* Convert extent to initialized after IO complete */ | 515 | /* Convert extent to initialized after IO complete */ |
513 | #define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\ | 516 | #define EXT4_GET_BLOCKS_IO_CONVERT_EXT (EXT4_GET_BLOCKS_CONVERT|\ |
514 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) | 517 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT) |
518 | /* Punch out blocks of an extent */ | ||
519 | #define EXT4_GET_BLOCKS_PUNCH_OUT_EXT 0x0020 | ||
520 | /* Don't normalize allocation size (used for fallocate) */ | ||
521 | #define EXT4_GET_BLOCKS_NO_NORMALIZE 0x0040 | ||
515 | 522 | ||
516 | /* | 523 | /* |
517 | * Flags used by ext4_free_blocks | 524 | * Flags used by ext4_free_blocks |
@@ -1028,7 +1035,7 @@ struct ext4_super_block { | |||
1028 | __le16 s_want_extra_isize; /* New inodes should reserve # bytes */ | 1035 | __le16 s_want_extra_isize; /* New inodes should reserve # bytes */ |
1029 | __le32 s_flags; /* Miscellaneous flags */ | 1036 | __le32 s_flags; /* Miscellaneous flags */ |
1030 | __le16 s_raid_stride; /* RAID stride */ | 1037 | __le16 s_raid_stride; /* RAID stride */ |
1031 | __le16 s_mmp_interval; /* # seconds to wait in MMP checking */ | 1038 | __le16 s_mmp_update_interval; /* # seconds to wait in MMP checking */ |
1032 | __le64 s_mmp_block; /* Block for multi-mount protection */ | 1039 | __le64 s_mmp_block; /* Block for multi-mount protection */ |
1033 | __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ | 1040 | __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ |
1034 | __u8 s_log_groups_per_flex; /* FLEX_BG group size */ | 1041 | __u8 s_log_groups_per_flex; /* FLEX_BG group size */ |
@@ -1144,6 +1151,9 @@ struct ext4_sb_info { | |||
1144 | unsigned long s_ext_blocks; | 1151 | unsigned long s_ext_blocks; |
1145 | unsigned long s_ext_extents; | 1152 | unsigned long s_ext_extents; |
1146 | #endif | 1153 | #endif |
1154 | /* ext4 extent cache stats */ | ||
1155 | unsigned long extent_cache_hits; | ||
1156 | unsigned long extent_cache_misses; | ||
1147 | 1157 | ||
1148 | /* for buddy allocator */ | 1158 | /* for buddy allocator */ |
1149 | struct ext4_group_info ***s_group_info; | 1159 | struct ext4_group_info ***s_group_info; |
@@ -1201,6 +1211,9 @@ struct ext4_sb_info { | |||
1201 | struct ext4_li_request *s_li_request; | 1211 | struct ext4_li_request *s_li_request; |
1202 | /* Wait multiplier for lazy initialization thread */ | 1212 | /* Wait multiplier for lazy initialization thread */ |
1203 | unsigned int s_li_wait_mult; | 1213 | unsigned int s_li_wait_mult; |
1214 | |||
1215 | /* Kernel thread for multiple mount protection */ | ||
1216 | struct task_struct *s_mmp_tsk; | ||
1204 | }; | 1217 | }; |
1205 | 1218 | ||
1206 | static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) | 1219 | static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) |
@@ -1338,6 +1351,7 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) | |||
1338 | #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010 | 1351 | #define EXT4_FEATURE_RO_COMPAT_GDT_CSUM 0x0010 |
1339 | #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 | 1352 | #define EXT4_FEATURE_RO_COMPAT_DIR_NLINK 0x0020 |
1340 | #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 | 1353 | #define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 |
1354 | #define EXT4_FEATURE_RO_COMPAT_QUOTA 0x0100 | ||
1341 | 1355 | ||
1342 | #define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001 | 1356 | #define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001 |
1343 | #define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002 | 1357 | #define EXT4_FEATURE_INCOMPAT_FILETYPE 0x0002 |
@@ -1351,13 +1365,29 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei) | |||
1351 | #define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */ | 1365 | #define EXT4_FEATURE_INCOMPAT_EA_INODE 0x0400 /* EA in inode */ |
1352 | #define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */ | 1366 | #define EXT4_FEATURE_INCOMPAT_DIRDATA 0x1000 /* data in dirent */ |
1353 | 1367 | ||
1368 | #define EXT2_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR | ||
1369 | #define EXT2_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ | ||
1370 | EXT4_FEATURE_INCOMPAT_META_BG) | ||
1371 | #define EXT2_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ | ||
1372 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ | ||
1373 | EXT4_FEATURE_RO_COMPAT_BTREE_DIR) | ||
1374 | |||
1375 | #define EXT3_FEATURE_COMPAT_SUPP EXT4_FEATURE_COMPAT_EXT_ATTR | ||
1376 | #define EXT3_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ | ||
1377 | EXT4_FEATURE_INCOMPAT_RECOVER| \ | ||
1378 | EXT4_FEATURE_INCOMPAT_META_BG) | ||
1379 | #define EXT3_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ | ||
1380 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ | ||
1381 | EXT4_FEATURE_RO_COMPAT_BTREE_DIR) | ||
1382 | |||
1354 | #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR | 1383 | #define EXT4_FEATURE_COMPAT_SUPP EXT2_FEATURE_COMPAT_EXT_ATTR |
1355 | #define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ | 1384 | #define EXT4_FEATURE_INCOMPAT_SUPP (EXT4_FEATURE_INCOMPAT_FILETYPE| \ |
1356 | EXT4_FEATURE_INCOMPAT_RECOVER| \ | 1385 | EXT4_FEATURE_INCOMPAT_RECOVER| \ |
1357 | EXT4_FEATURE_INCOMPAT_META_BG| \ | 1386 | EXT4_FEATURE_INCOMPAT_META_BG| \ |
1358 | EXT4_FEATURE_INCOMPAT_EXTENTS| \ | 1387 | EXT4_FEATURE_INCOMPAT_EXTENTS| \ |
1359 | EXT4_FEATURE_INCOMPAT_64BIT| \ | 1388 | EXT4_FEATURE_INCOMPAT_64BIT| \ |
1360 | EXT4_FEATURE_INCOMPAT_FLEX_BG) | 1389 | EXT4_FEATURE_INCOMPAT_FLEX_BG| \ |
1390 | EXT4_FEATURE_INCOMPAT_MMP) | ||
1361 | #define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ | 1391 | #define EXT4_FEATURE_RO_COMPAT_SUPP (EXT4_FEATURE_RO_COMPAT_SPARSE_SUPER| \ |
1362 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ | 1392 | EXT4_FEATURE_RO_COMPAT_LARGE_FILE| \ |
1363 | EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \ | 1393 | EXT4_FEATURE_RO_COMPAT_GDT_CSUM| \ |
@@ -1590,12 +1620,6 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr, | |||
1590 | */ | 1620 | */ |
1591 | struct ext4_lazy_init { | 1621 | struct ext4_lazy_init { |
1592 | unsigned long li_state; | 1622 | unsigned long li_state; |
1593 | |||
1594 | wait_queue_head_t li_wait_daemon; | ||
1595 | wait_queue_head_t li_wait_task; | ||
1596 | struct timer_list li_timer; | ||
1597 | struct task_struct *li_task; | ||
1598 | |||
1599 | struct list_head li_request_list; | 1623 | struct list_head li_request_list; |
1600 | struct mutex li_list_mtx; | 1624 | struct mutex li_list_mtx; |
1601 | }; | 1625 | }; |
@@ -1615,6 +1639,67 @@ struct ext4_features { | |||
1615 | }; | 1639 | }; |
1616 | 1640 | ||
1617 | /* | 1641 | /* |
1642 | * This structure will be used for multiple mount protection. It will be | ||
1643 | * written into the block number saved in the s_mmp_block field in the | ||
1644 | * superblock. Programs that check MMP should assume that if | ||
1645 | * SEQ_FSCK (or any unknown code above SEQ_MAX) is present then it is NOT safe | ||
1646 | * to use the filesystem, regardless of how old the timestamp is. | ||
1647 | */ | ||
1648 | #define EXT4_MMP_MAGIC 0x004D4D50U /* ASCII for MMP */ | ||
1649 | #define EXT4_MMP_SEQ_CLEAN 0xFF4D4D50U /* mmp_seq value for clean unmount */ | ||
1650 | #define EXT4_MMP_SEQ_FSCK 0xE24D4D50U /* mmp_seq value when being fscked */ | ||
1651 | #define EXT4_MMP_SEQ_MAX 0xE24D4D4FU /* maximum valid mmp_seq value */ | ||
1652 | |||
1653 | struct mmp_struct { | ||
1654 | __le32 mmp_magic; /* Magic number for MMP */ | ||
1655 | __le32 mmp_seq; /* Sequence no. updated periodically */ | ||
1656 | |||
1657 | /* | ||
1658 | * mmp_time, mmp_nodename & mmp_bdevname are only used for information | ||
1659 | * purposes and do not affect the correctness of the algorithm | ||
1660 | */ | ||
1661 | __le64 mmp_time; /* Time last updated */ | ||
1662 | char mmp_nodename[64]; /* Node which last updated MMP block */ | ||
1663 | char mmp_bdevname[32]; /* Bdev which last updated MMP block */ | ||
1664 | |||
1665 | /* | ||
1666 | * mmp_check_interval is used to verify if the MMP block has been | ||
1667 | * updated on the block device. The value is updated based on the | ||
1668 | * maximum time to write the MMP block during an update cycle. | ||
1669 | */ | ||
1670 | __le16 mmp_check_interval; | ||
1671 | |||
1672 | __le16 mmp_pad1; | ||
1673 | __le32 mmp_pad2[227]; | ||
1674 | }; | ||
1675 | |||
1676 | /* arguments passed to the mmp thread */ | ||
1677 | struct mmpd_data { | ||
1678 | struct buffer_head *bh; /* bh from initial read_mmp_block() */ | ||
1679 | struct super_block *sb; /* super block of the fs */ | ||
1680 | }; | ||
1681 | |||
1682 | /* | ||
1683 | * Check interval multiplier | ||
1684 | * The MMP block is written every update interval and initially checked every | ||
1685 | * update interval x the multiplier (the value is then adapted based on the | ||
1686 | * write latency). The reason is that writes can be delayed under load and we | ||
1687 | * don't want readers to incorrectly assume that the filesystem is no longer | ||
1688 | * in use. | ||
1689 | */ | ||
1690 | #define EXT4_MMP_CHECK_MULT 2UL | ||
1691 | |||
1692 | /* | ||
1693 | * Minimum interval for MMP checking in seconds. | ||
1694 | */ | ||
1695 | #define EXT4_MMP_MIN_CHECK_INTERVAL 5UL | ||
1696 | |||
1697 | /* | ||
1698 | * Maximum interval for MMP checking in seconds. | ||
1699 | */ | ||
1700 | #define EXT4_MMP_MAX_CHECK_INTERVAL 300UL | ||
1701 | |||
1702 | /* | ||
1618 | * Function prototypes | 1703 | * Function prototypes |
1619 | */ | 1704 | */ |
1620 | 1705 | ||
@@ -1638,10 +1723,12 @@ extern int ext4_bg_has_super(struct super_block *sb, ext4_group_t group); | |||
1638 | extern unsigned long ext4_bg_num_gdb(struct super_block *sb, | 1723 | extern unsigned long ext4_bg_num_gdb(struct super_block *sb, |
1639 | ext4_group_t group); | 1724 | ext4_group_t group); |
1640 | extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, | 1725 | extern ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode, |
1641 | ext4_fsblk_t goal, unsigned long *count, int *errp); | 1726 | ext4_fsblk_t goal, |
1642 | extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi, s64 nblocks); | 1727 | unsigned int flags, |
1643 | extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, | 1728 | unsigned long *count, |
1644 | ext4_fsblk_t block, unsigned long count); | 1729 | int *errp); |
1730 | extern int ext4_claim_free_blocks(struct ext4_sb_info *sbi, | ||
1731 | s64 nblocks, unsigned int flags); | ||
1645 | extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *); | 1732 | extern ext4_fsblk_t ext4_count_free_blocks(struct super_block *); |
1646 | extern void ext4_check_blocks_bitmap(struct super_block *); | 1733 | extern void ext4_check_blocks_bitmap(struct super_block *); |
1647 | extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, | 1734 | extern struct ext4_group_desc * ext4_get_group_desc(struct super_block * sb, |
@@ -1706,6 +1793,8 @@ extern void ext4_free_blocks(handle_t *handle, struct inode *inode, | |||
1706 | unsigned long count, int flags); | 1793 | unsigned long count, int flags); |
1707 | extern int ext4_mb_add_groupinfo(struct super_block *sb, | 1794 | extern int ext4_mb_add_groupinfo(struct super_block *sb, |
1708 | ext4_group_t i, struct ext4_group_desc *desc); | 1795 | ext4_group_t i, struct ext4_group_desc *desc); |
1796 | extern void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, | ||
1797 | ext4_fsblk_t block, unsigned long count); | ||
1709 | extern int ext4_trim_fs(struct super_block *, struct fstrim_range *); | 1798 | extern int ext4_trim_fs(struct super_block *, struct fstrim_range *); |
1710 | 1799 | ||
1711 | /* inode.c */ | 1800 | /* inode.c */ |
@@ -1729,6 +1818,7 @@ extern int ext4_change_inode_journal_flag(struct inode *, int); | |||
1729 | extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); | 1818 | extern int ext4_get_inode_loc(struct inode *, struct ext4_iloc *); |
1730 | extern int ext4_can_truncate(struct inode *inode); | 1819 | extern int ext4_can_truncate(struct inode *inode); |
1731 | extern void ext4_truncate(struct inode *); | 1820 | extern void ext4_truncate(struct inode *); |
1821 | extern int ext4_punch_hole(struct file *file, loff_t offset, loff_t length); | ||
1732 | extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks); | 1822 | extern int ext4_truncate_restart_trans(handle_t *, struct inode *, int nblocks); |
1733 | extern void ext4_set_inode_flags(struct inode *); | 1823 | extern void ext4_set_inode_flags(struct inode *); |
1734 | extern void ext4_get_inode_flags(struct ext4_inode_info *); | 1824 | extern void ext4_get_inode_flags(struct ext4_inode_info *); |
@@ -1738,6 +1828,8 @@ extern int ext4_writepage_trans_blocks(struct inode *); | |||
1738 | extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); | 1828 | extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks); |
1739 | extern int ext4_block_truncate_page(handle_t *handle, | 1829 | extern int ext4_block_truncate_page(handle_t *handle, |
1740 | struct address_space *mapping, loff_t from); | 1830 | struct address_space *mapping, loff_t from); |
1831 | extern int ext4_block_zero_page_range(handle_t *handle, | ||
1832 | struct address_space *mapping, loff_t from, loff_t length); | ||
1741 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); | 1833 | extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf); |
1742 | extern qsize_t *ext4_get_reserved_space(struct inode *inode); | 1834 | extern qsize_t *ext4_get_reserved_space(struct inode *inode); |
1743 | extern void ext4_da_update_reserve_space(struct inode *inode, | 1835 | extern void ext4_da_update_reserve_space(struct inode *inode, |
@@ -1788,6 +1880,10 @@ extern void __ext4_warning(struct super_block *, const char *, unsigned int, | |||
1788 | __LINE__, ## message) | 1880 | __LINE__, ## message) |
1789 | extern void ext4_msg(struct super_block *, const char *, const char *, ...) | 1881 | extern void ext4_msg(struct super_block *, const char *, const char *, ...) |
1790 | __attribute__ ((format (printf, 3, 4))); | 1882 | __attribute__ ((format (printf, 3, 4))); |
1883 | extern void __dump_mmp_msg(struct super_block *, struct mmp_struct *mmp, | ||
1884 | const char *, unsigned int, const char *); | ||
1885 | #define dump_mmp_msg(sb, mmp, msg) __dump_mmp_msg(sb, mmp, __func__, \ | ||
1886 | __LINE__, msg) | ||
1791 | extern void __ext4_grp_locked_error(const char *, unsigned int, \ | 1887 | extern void __ext4_grp_locked_error(const char *, unsigned int, \ |
1792 | struct super_block *, ext4_group_t, \ | 1888 | struct super_block *, ext4_group_t, \ |
1793 | unsigned long, ext4_fsblk_t, \ | 1889 | unsigned long, ext4_fsblk_t, \ |
@@ -2064,6 +2160,8 @@ extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks, | |||
2064 | extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | 2160 | extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, |
2065 | struct ext4_map_blocks *map, int flags); | 2161 | struct ext4_map_blocks *map, int flags); |
2066 | extern void ext4_ext_truncate(struct inode *); | 2162 | extern void ext4_ext_truncate(struct inode *); |
2163 | extern int ext4_ext_punch_hole(struct file *file, loff_t offset, | ||
2164 | loff_t length); | ||
2067 | extern void ext4_ext_init(struct super_block *); | 2165 | extern void ext4_ext_init(struct super_block *); |
2068 | extern void ext4_ext_release(struct super_block *); | 2166 | extern void ext4_ext_release(struct super_block *); |
2069 | extern long ext4_fallocate(struct file *file, int mode, loff_t offset, | 2167 | extern long ext4_fallocate(struct file *file, int mode, loff_t offset, |
@@ -2092,6 +2190,9 @@ extern int ext4_bio_write_page(struct ext4_io_submit *io, | |||
2092 | int len, | 2190 | int len, |
2093 | struct writeback_control *wbc); | 2191 | struct writeback_control *wbc); |
2094 | 2192 | ||
2193 | /* mmp.c */ | ||
2194 | extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t); | ||
2195 | |||
2095 | /* BH_Uninit flag: blocks are allocated but uninitialized on disk */ | 2196 | /* BH_Uninit flag: blocks are allocated but uninitialized on disk */ |
2096 | enum ext4_state_bits { | 2197 | enum ext4_state_bits { |
2097 | BH_Uninit /* blocks are allocated but uninitialized on disk */ | 2198 | BH_Uninit /* blocks are allocated but uninitialized on disk */ |
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c index 6e272ef6ba96..f5240aa15601 100644 --- a/fs/ext4/ext4_jbd2.c +++ b/fs/ext4/ext4_jbd2.c | |||
@@ -6,20 +6,6 @@ | |||
6 | 6 | ||
7 | #include <trace/events/ext4.h> | 7 | #include <trace/events/ext4.h> |
8 | 8 | ||
9 | int __ext4_journal_get_undo_access(const char *where, unsigned int line, | ||
10 | handle_t *handle, struct buffer_head *bh) | ||
11 | { | ||
12 | int err = 0; | ||
13 | |||
14 | if (ext4_handle_valid(handle)) { | ||
15 | err = jbd2_journal_get_undo_access(handle, bh); | ||
16 | if (err) | ||
17 | ext4_journal_abort_handle(where, line, __func__, bh, | ||
18 | handle, err); | ||
19 | } | ||
20 | return err; | ||
21 | } | ||
22 | |||
23 | int __ext4_journal_get_write_access(const char *where, unsigned int line, | 9 | int __ext4_journal_get_write_access(const char *where, unsigned int line, |
24 | handle_t *handle, struct buffer_head *bh) | 10 | handle_t *handle, struct buffer_head *bh) |
25 | { | 11 | { |
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h index d0f53538a57f..bb85757689b6 100644 --- a/fs/ext4/ext4_jbd2.h +++ b/fs/ext4/ext4_jbd2.h | |||
@@ -126,9 +126,6 @@ void ext4_journal_abort_handle(const char *caller, unsigned int line, | |||
126 | const char *err_fn, | 126 | const char *err_fn, |
127 | struct buffer_head *bh, handle_t *handle, int err); | 127 | struct buffer_head *bh, handle_t *handle, int err); |
128 | 128 | ||
129 | int __ext4_journal_get_undo_access(const char *where, unsigned int line, | ||
130 | handle_t *handle, struct buffer_head *bh); | ||
131 | |||
132 | int __ext4_journal_get_write_access(const char *where, unsigned int line, | 129 | int __ext4_journal_get_write_access(const char *where, unsigned int line, |
133 | handle_t *handle, struct buffer_head *bh); | 130 | handle_t *handle, struct buffer_head *bh); |
134 | 131 | ||
@@ -146,8 +143,6 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line, | |||
146 | int __ext4_handle_dirty_super(const char *where, unsigned int line, | 143 | int __ext4_handle_dirty_super(const char *where, unsigned int line, |
147 | handle_t *handle, struct super_block *sb); | 144 | handle_t *handle, struct super_block *sb); |
148 | 145 | ||
149 | #define ext4_journal_get_undo_access(handle, bh) \ | ||
150 | __ext4_journal_get_undo_access(__func__, __LINE__, (handle), (bh)) | ||
151 | #define ext4_journal_get_write_access(handle, bh) \ | 146 | #define ext4_journal_get_write_access(handle, bh) \ |
152 | __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh)) | 147 | __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh)) |
153 | #define ext4_forget(handle, is_metadata, inode, bh, block_nr) \ | 148 | #define ext4_forget(handle, is_metadata, inode, bh, block_nr) \ |
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 4890d6f3ad15..5199bac7fc62 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c | |||
@@ -46,6 +46,13 @@ | |||
46 | 46 | ||
47 | #include <trace/events/ext4.h> | 47 | #include <trace/events/ext4.h> |
48 | 48 | ||
49 | static int ext4_split_extent(handle_t *handle, | ||
50 | struct inode *inode, | ||
51 | struct ext4_ext_path *path, | ||
52 | struct ext4_map_blocks *map, | ||
53 | int split_flag, | ||
54 | int flags); | ||
55 | |||
49 | static int ext4_ext_truncate_extend_restart(handle_t *handle, | 56 | static int ext4_ext_truncate_extend_restart(handle_t *handle, |
50 | struct inode *inode, | 57 | struct inode *inode, |
51 | int needed) | 58 | int needed) |
@@ -192,12 +199,13 @@ static ext4_fsblk_t ext4_ext_find_goal(struct inode *inode, | |||
192 | static ext4_fsblk_t | 199 | static ext4_fsblk_t |
193 | ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, | 200 | ext4_ext_new_meta_block(handle_t *handle, struct inode *inode, |
194 | struct ext4_ext_path *path, | 201 | struct ext4_ext_path *path, |
195 | struct ext4_extent *ex, int *err) | 202 | struct ext4_extent *ex, int *err, unsigned int flags) |
196 | { | 203 | { |
197 | ext4_fsblk_t goal, newblock; | 204 | ext4_fsblk_t goal, newblock; |
198 | 205 | ||
199 | goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); | 206 | goal = ext4_ext_find_goal(inode, path, le32_to_cpu(ex->ee_block)); |
200 | newblock = ext4_new_meta_blocks(handle, inode, goal, NULL, err); | 207 | newblock = ext4_new_meta_blocks(handle, inode, goal, flags, |
208 | NULL, err); | ||
201 | return newblock; | 209 | return newblock; |
202 | } | 210 | } |
203 | 211 | ||
@@ -474,9 +482,43 @@ static void ext4_ext_show_leaf(struct inode *inode, struct ext4_ext_path *path) | |||
474 | } | 482 | } |
475 | ext_debug("\n"); | 483 | ext_debug("\n"); |
476 | } | 484 | } |
485 | |||
486 | static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path, | ||
487 | ext4_fsblk_t newblock, int level) | ||
488 | { | ||
489 | int depth = ext_depth(inode); | ||
490 | struct ext4_extent *ex; | ||
491 | |||
492 | if (depth != level) { | ||
493 | struct ext4_extent_idx *idx; | ||
494 | idx = path[level].p_idx; | ||
495 | while (idx <= EXT_MAX_INDEX(path[level].p_hdr)) { | ||
496 | ext_debug("%d: move %d:%llu in new index %llu\n", level, | ||
497 | le32_to_cpu(idx->ei_block), | ||
498 | ext4_idx_pblock(idx), | ||
499 | newblock); | ||
500 | idx++; | ||
501 | } | ||
502 | |||
503 | return; | ||
504 | } | ||
505 | |||
506 | ex = path[depth].p_ext; | ||
507 | while (ex <= EXT_MAX_EXTENT(path[depth].p_hdr)) { | ||
508 | ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", | ||
509 | le32_to_cpu(ex->ee_block), | ||
510 | ext4_ext_pblock(ex), | ||
511 | ext4_ext_is_uninitialized(ex), | ||
512 | ext4_ext_get_actual_len(ex), | ||
513 | newblock); | ||
514 | ex++; | ||
515 | } | ||
516 | } | ||
517 | |||
477 | #else | 518 | #else |
478 | #define ext4_ext_show_path(inode, path) | 519 | #define ext4_ext_show_path(inode, path) |
479 | #define ext4_ext_show_leaf(inode, path) | 520 | #define ext4_ext_show_leaf(inode, path) |
521 | #define ext4_ext_show_move(inode, path, newblock, level) | ||
480 | #endif | 522 | #endif |
481 | 523 | ||
482 | void ext4_ext_drop_refs(struct ext4_ext_path *path) | 524 | void ext4_ext_drop_refs(struct ext4_ext_path *path) |
@@ -792,14 +834,14 @@ static int ext4_ext_insert_index(handle_t *handle, struct inode *inode, | |||
792 | * - initializes subtree | 834 | * - initializes subtree |
793 | */ | 835 | */ |
794 | static int ext4_ext_split(handle_t *handle, struct inode *inode, | 836 | static int ext4_ext_split(handle_t *handle, struct inode *inode, |
795 | struct ext4_ext_path *path, | 837 | unsigned int flags, |
796 | struct ext4_extent *newext, int at) | 838 | struct ext4_ext_path *path, |
839 | struct ext4_extent *newext, int at) | ||
797 | { | 840 | { |
798 | struct buffer_head *bh = NULL; | 841 | struct buffer_head *bh = NULL; |
799 | int depth = ext_depth(inode); | 842 | int depth = ext_depth(inode); |
800 | struct ext4_extent_header *neh; | 843 | struct ext4_extent_header *neh; |
801 | struct ext4_extent_idx *fidx; | 844 | struct ext4_extent_idx *fidx; |
802 | struct ext4_extent *ex; | ||
803 | int i = at, k, m, a; | 845 | int i = at, k, m, a; |
804 | ext4_fsblk_t newblock, oldblock; | 846 | ext4_fsblk_t newblock, oldblock; |
805 | __le32 border; | 847 | __le32 border; |
@@ -847,7 +889,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
847 | ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); | 889 | ext_debug("allocate %d blocks for indexes/leaf\n", depth - at); |
848 | for (a = 0; a < depth - at; a++) { | 890 | for (a = 0; a < depth - at; a++) { |
849 | newblock = ext4_ext_new_meta_block(handle, inode, path, | 891 | newblock = ext4_ext_new_meta_block(handle, inode, path, |
850 | newext, &err); | 892 | newext, &err, flags); |
851 | if (newblock == 0) | 893 | if (newblock == 0) |
852 | goto cleanup; | 894 | goto cleanup; |
853 | ablocks[a] = newblock; | 895 | ablocks[a] = newblock; |
@@ -876,7 +918,6 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
876 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); | 918 | neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); |
877 | neh->eh_magic = EXT4_EXT_MAGIC; | 919 | neh->eh_magic = EXT4_EXT_MAGIC; |
878 | neh->eh_depth = 0; | 920 | neh->eh_depth = 0; |
879 | ex = EXT_FIRST_EXTENT(neh); | ||
880 | 921 | ||
881 | /* move remainder of path[depth] to the new leaf */ | 922 | /* move remainder of path[depth] to the new leaf */ |
882 | if (unlikely(path[depth].p_hdr->eh_entries != | 923 | if (unlikely(path[depth].p_hdr->eh_entries != |
@@ -888,25 +929,12 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
888 | goto cleanup; | 929 | goto cleanup; |
889 | } | 930 | } |
890 | /* start copy from next extent */ | 931 | /* start copy from next extent */ |
891 | /* TODO: we could do it by single memmove */ | 932 | m = EXT_MAX_EXTENT(path[depth].p_hdr) - path[depth].p_ext++; |
892 | m = 0; | 933 | ext4_ext_show_move(inode, path, newblock, depth); |
893 | path[depth].p_ext++; | ||
894 | while (path[depth].p_ext <= | ||
895 | EXT_MAX_EXTENT(path[depth].p_hdr)) { | ||
896 | ext_debug("move %d:%llu:[%d]%d in new leaf %llu\n", | ||
897 | le32_to_cpu(path[depth].p_ext->ee_block), | ||
898 | ext4_ext_pblock(path[depth].p_ext), | ||
899 | ext4_ext_is_uninitialized(path[depth].p_ext), | ||
900 | ext4_ext_get_actual_len(path[depth].p_ext), | ||
901 | newblock); | ||
902 | /*memmove(ex++, path[depth].p_ext++, | ||
903 | sizeof(struct ext4_extent)); | ||
904 | neh->eh_entries++;*/ | ||
905 | path[depth].p_ext++; | ||
906 | m++; | ||
907 | } | ||
908 | if (m) { | 934 | if (m) { |
909 | memmove(ex, path[depth].p_ext-m, sizeof(struct ext4_extent)*m); | 935 | struct ext4_extent *ex; |
936 | ex = EXT_FIRST_EXTENT(neh); | ||
937 | memmove(ex, path[depth].p_ext, sizeof(struct ext4_extent) * m); | ||
910 | le16_add_cpu(&neh->eh_entries, m); | 938 | le16_add_cpu(&neh->eh_entries, m); |
911 | } | 939 | } |
912 | 940 | ||
@@ -968,12 +996,8 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
968 | 996 | ||
969 | ext_debug("int.index at %d (block %llu): %u -> %llu\n", | 997 | ext_debug("int.index at %d (block %llu): %u -> %llu\n", |
970 | i, newblock, le32_to_cpu(border), oldblock); | 998 | i, newblock, le32_to_cpu(border), oldblock); |
971 | /* copy indexes */ | ||
972 | m = 0; | ||
973 | path[i].p_idx++; | ||
974 | 999 | ||
975 | ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, | 1000 | /* move remainder of path[i] to the new index block */ |
976 | EXT_MAX_INDEX(path[i].p_hdr)); | ||
977 | if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != | 1001 | if (unlikely(EXT_MAX_INDEX(path[i].p_hdr) != |
978 | EXT_LAST_INDEX(path[i].p_hdr))) { | 1002 | EXT_LAST_INDEX(path[i].p_hdr))) { |
979 | EXT4_ERROR_INODE(inode, | 1003 | EXT4_ERROR_INODE(inode, |
@@ -982,20 +1006,13 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode, | |||
982 | err = -EIO; | 1006 | err = -EIO; |
983 | goto cleanup; | 1007 | goto cleanup; |
984 | } | 1008 | } |
985 | while (path[i].p_idx <= EXT_MAX_INDEX(path[i].p_hdr)) { | 1009 | /* start copy indexes */ |
986 | ext_debug("%d: move %d:%llu in new index %llu\n", i, | 1010 | m = EXT_MAX_INDEX(path[i].p_hdr) - path[i].p_idx++; |
987 | le32_to_cpu(path[i].p_idx->ei_block), | 1011 | ext_debug("cur 0x%p, last 0x%p\n", path[i].p_idx, |
988 | ext4_idx_pblock(path[i].p_idx), | 1012 | EXT_MAX_INDEX(path[i].p_hdr)); |
989 | newblock); | 1013 | ext4_ext_show_move(inode, path, newblock, i); |
990 | /*memmove(++fidx, path[i].p_idx++, | ||
991 | sizeof(struct ext4_extent_idx)); | ||
992 | neh->eh_entries++; | ||
993 | BUG_ON(neh->eh_entries > neh->eh_max);*/ | ||
994 | path[i].p_idx++; | ||
995 | m++; | ||
996 | } | ||
997 | if (m) { | 1014 | if (m) { |
998 | memmove(++fidx, path[i].p_idx - m, | 1015 | memmove(++fidx, path[i].p_idx, |
999 | sizeof(struct ext4_extent_idx) * m); | 1016 | sizeof(struct ext4_extent_idx) * m); |
1000 | le16_add_cpu(&neh->eh_entries, m); | 1017 | le16_add_cpu(&neh->eh_entries, m); |
1001 | } | 1018 | } |
@@ -1056,8 +1073,9 @@ cleanup: | |||
1056 | * just created block | 1073 | * just created block |
1057 | */ | 1074 | */ |
1058 | static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, | 1075 | static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, |
1059 | struct ext4_ext_path *path, | 1076 | unsigned int flags, |
1060 | struct ext4_extent *newext) | 1077 | struct ext4_ext_path *path, |
1078 | struct ext4_extent *newext) | ||
1061 | { | 1079 | { |
1062 | struct ext4_ext_path *curp = path; | 1080 | struct ext4_ext_path *curp = path; |
1063 | struct ext4_extent_header *neh; | 1081 | struct ext4_extent_header *neh; |
@@ -1065,7 +1083,8 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, | |||
1065 | ext4_fsblk_t newblock; | 1083 | ext4_fsblk_t newblock; |
1066 | int err = 0; | 1084 | int err = 0; |
1067 | 1085 | ||
1068 | newblock = ext4_ext_new_meta_block(handle, inode, path, newext, &err); | 1086 | newblock = ext4_ext_new_meta_block(handle, inode, path, |
1087 | newext, &err, flags); | ||
1069 | if (newblock == 0) | 1088 | if (newblock == 0) |
1070 | return err; | 1089 | return err; |
1071 | 1090 | ||
@@ -1140,8 +1159,9 @@ out: | |||
1140 | * if no free index is found, then it requests in-depth growing. | 1159 | * if no free index is found, then it requests in-depth growing. |
1141 | */ | 1160 | */ |
1142 | static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, | 1161 | static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, |
1143 | struct ext4_ext_path *path, | 1162 | unsigned int flags, |
1144 | struct ext4_extent *newext) | 1163 | struct ext4_ext_path *path, |
1164 | struct ext4_extent *newext) | ||
1145 | { | 1165 | { |
1146 | struct ext4_ext_path *curp; | 1166 | struct ext4_ext_path *curp; |
1147 | int depth, i, err = 0; | 1167 | int depth, i, err = 0; |
@@ -1161,7 +1181,7 @@ repeat: | |||
1161 | if (EXT_HAS_FREE_INDEX(curp)) { | 1181 | if (EXT_HAS_FREE_INDEX(curp)) { |
1162 | /* if we found index with free entry, then use that | 1182 | /* if we found index with free entry, then use that |
1163 | * entry: create all needed subtree and add new leaf */ | 1183 | * entry: create all needed subtree and add new leaf */ |
1164 | err = ext4_ext_split(handle, inode, path, newext, i); | 1184 | err = ext4_ext_split(handle, inode, flags, path, newext, i); |
1165 | if (err) | 1185 | if (err) |
1166 | goto out; | 1186 | goto out; |
1167 | 1187 | ||
@@ -1174,7 +1194,8 @@ repeat: | |||
1174 | err = PTR_ERR(path); | 1194 | err = PTR_ERR(path); |
1175 | } else { | 1195 | } else { |
1176 | /* tree is full, time to grow in depth */ | 1196 | /* tree is full, time to grow in depth */ |
1177 | err = ext4_ext_grow_indepth(handle, inode, path, newext); | 1197 | err = ext4_ext_grow_indepth(handle, inode, flags, |
1198 | path, newext); | ||
1178 | if (err) | 1199 | if (err) |
1179 | goto out; | 1200 | goto out; |
1180 | 1201 | ||
@@ -1563,7 +1584,7 @@ ext4_can_extents_be_merged(struct inode *inode, struct ext4_extent *ex1, | |||
1563 | * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns | 1584 | * Returns 0 if the extents (ex and ex+1) were _not_ merged and returns |
1564 | * 1 if they got merged. | 1585 | * 1 if they got merged. |
1565 | */ | 1586 | */ |
1566 | static int ext4_ext_try_to_merge(struct inode *inode, | 1587 | static int ext4_ext_try_to_merge_right(struct inode *inode, |
1567 | struct ext4_ext_path *path, | 1588 | struct ext4_ext_path *path, |
1568 | struct ext4_extent *ex) | 1589 | struct ext4_extent *ex) |
1569 | { | 1590 | { |
@@ -1603,6 +1624,31 @@ static int ext4_ext_try_to_merge(struct inode *inode, | |||
1603 | } | 1624 | } |
1604 | 1625 | ||
1605 | /* | 1626 | /* |
1627 | * This function tries to merge the @ex extent to neighbours in the tree. | ||
1628 | * return 1 if merge left else 0. | ||
1629 | */ | ||
1630 | static int ext4_ext_try_to_merge(struct inode *inode, | ||
1631 | struct ext4_ext_path *path, | ||
1632 | struct ext4_extent *ex) { | ||
1633 | struct ext4_extent_header *eh; | ||
1634 | unsigned int depth; | ||
1635 | int merge_done = 0; | ||
1636 | int ret = 0; | ||
1637 | |||
1638 | depth = ext_depth(inode); | ||
1639 | BUG_ON(path[depth].p_hdr == NULL); | ||
1640 | eh = path[depth].p_hdr; | ||
1641 | |||
1642 | if (ex > EXT_FIRST_EXTENT(eh)) | ||
1643 | merge_done = ext4_ext_try_to_merge_right(inode, path, ex - 1); | ||
1644 | |||
1645 | if (!merge_done) | ||
1646 | ret = ext4_ext_try_to_merge_right(inode, path, ex); | ||
1647 | |||
1648 | return ret; | ||
1649 | } | ||
1650 | |||
1651 | /* | ||
1606 | * check if a portion of the "newext" extent overlaps with an | 1652 | * check if a portion of the "newext" extent overlaps with an |
1607 | * existing extent. | 1653 | * existing extent. |
1608 | * | 1654 | * |
@@ -1668,6 +1714,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, | |||
1668 | int depth, len, err; | 1714 | int depth, len, err; |
1669 | ext4_lblk_t next; | 1715 | ext4_lblk_t next; |
1670 | unsigned uninitialized = 0; | 1716 | unsigned uninitialized = 0; |
1717 | int flags = 0; | ||
1671 | 1718 | ||
1672 | if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { | 1719 | if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { |
1673 | EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); | 1720 | EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); |
@@ -1742,7 +1789,9 @@ repeat: | |||
1742 | * There is no free space in the found leaf. | 1789 | * There is no free space in the found leaf. |
1743 | * We're gonna add a new leaf in the tree. | 1790 | * We're gonna add a new leaf in the tree. |
1744 | */ | 1791 | */ |
1745 | err = ext4_ext_create_new_leaf(handle, inode, path, newext); | 1792 | if (flag & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) |
1793 | flags = EXT4_MB_USE_ROOT_BLOCKS; | ||
1794 | err = ext4_ext_create_new_leaf(handle, inode, flags, path, newext); | ||
1746 | if (err) | 1795 | if (err) |
1747 | goto cleanup; | 1796 | goto cleanup; |
1748 | depth = ext_depth(inode); | 1797 | depth = ext_depth(inode); |
@@ -2003,13 +2052,25 @@ ext4_ext_put_gap_in_cache(struct inode *inode, struct ext4_ext_path *path, | |||
2003 | } | 2052 | } |
2004 | 2053 | ||
2005 | /* | 2054 | /* |
2055 | * ext4_ext_in_cache() | ||
2056 | * Checks to see if the given block is in the cache. | ||
2057 | * If it is, the cached extent is stored in the given | ||
2058 | * cache extent pointer. If the cached extent is a hole, | ||
2059 | * this routine should be used instead of | ||
2060 | * ext4_ext_in_cache if the calling function needs to | ||
2061 | * know the size of the hole. | ||
2062 | * | ||
2063 | * @inode: The files inode | ||
2064 | * @block: The block to look for in the cache | ||
2065 | * @ex: Pointer where the cached extent will be stored | ||
2066 | * if it contains block | ||
2067 | * | ||
2006 | * Return 0 if cache is invalid; 1 if the cache is valid | 2068 | * Return 0 if cache is invalid; 1 if the cache is valid |
2007 | */ | 2069 | */ |
2008 | static int | 2070 | static int ext4_ext_check_cache(struct inode *inode, ext4_lblk_t block, |
2009 | ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, | 2071 | struct ext4_ext_cache *ex){ |
2010 | struct ext4_extent *ex) | ||
2011 | { | ||
2012 | struct ext4_ext_cache *cex; | 2072 | struct ext4_ext_cache *cex; |
2073 | struct ext4_sb_info *sbi; | ||
2013 | int ret = 0; | 2074 | int ret = 0; |
2014 | 2075 | ||
2015 | /* | 2076 | /* |
@@ -2017,26 +2078,60 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, | |||
2017 | */ | 2078 | */ |
2018 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); | 2079 | spin_lock(&EXT4_I(inode)->i_block_reservation_lock); |
2019 | cex = &EXT4_I(inode)->i_cached_extent; | 2080 | cex = &EXT4_I(inode)->i_cached_extent; |
2081 | sbi = EXT4_SB(inode->i_sb); | ||
2020 | 2082 | ||
2021 | /* has cache valid data? */ | 2083 | /* has cache valid data? */ |
2022 | if (cex->ec_len == 0) | 2084 | if (cex->ec_len == 0) |
2023 | goto errout; | 2085 | goto errout; |
2024 | 2086 | ||
2025 | if (in_range(block, cex->ec_block, cex->ec_len)) { | 2087 | if (in_range(block, cex->ec_block, cex->ec_len)) { |
2026 | ex->ee_block = cpu_to_le32(cex->ec_block); | 2088 | memcpy(ex, cex, sizeof(struct ext4_ext_cache)); |
2027 | ext4_ext_store_pblock(ex, cex->ec_start); | ||
2028 | ex->ee_len = cpu_to_le16(cex->ec_len); | ||
2029 | ext_debug("%u cached by %u:%u:%llu\n", | 2089 | ext_debug("%u cached by %u:%u:%llu\n", |
2030 | block, | 2090 | block, |
2031 | cex->ec_block, cex->ec_len, cex->ec_start); | 2091 | cex->ec_block, cex->ec_len, cex->ec_start); |
2032 | ret = 1; | 2092 | ret = 1; |
2033 | } | 2093 | } |
2034 | errout: | 2094 | errout: |
2095 | if (!ret) | ||
2096 | sbi->extent_cache_misses++; | ||
2097 | else | ||
2098 | sbi->extent_cache_hits++; | ||
2035 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); | 2099 | spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); |
2036 | return ret; | 2100 | return ret; |
2037 | } | 2101 | } |
2038 | 2102 | ||
2039 | /* | 2103 | /* |
2104 | * ext4_ext_in_cache() | ||
2105 | * Checks to see if the given block is in the cache. | ||
2106 | * If it is, the cached extent is stored in the given | ||
2107 | * extent pointer. | ||
2108 | * | ||
2109 | * @inode: The files inode | ||
2110 | * @block: The block to look for in the cache | ||
2111 | * @ex: Pointer where the cached extent will be stored | ||
2112 | * if it contains block | ||
2113 | * | ||
2114 | * Return 0 if cache is invalid; 1 if the cache is valid | ||
2115 | */ | ||
2116 | static int | ||
2117 | ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block, | ||
2118 | struct ext4_extent *ex) | ||
2119 | { | ||
2120 | struct ext4_ext_cache cex; | ||
2121 | int ret = 0; | ||
2122 | |||
2123 | if (ext4_ext_check_cache(inode, block, &cex)) { | ||
2124 | ex->ee_block = cpu_to_le32(cex.ec_block); | ||
2125 | ext4_ext_store_pblock(ex, cex.ec_start); | ||
2126 | ex->ee_len = cpu_to_le16(cex.ec_len); | ||
2127 | ret = 1; | ||
2128 | } | ||
2129 | |||
2130 | return ret; | ||
2131 | } | ||
2132 | |||
2133 | |||
2134 | /* | ||
2040 | * ext4_ext_rm_idx: | 2135 | * ext4_ext_rm_idx: |
2041 | * removes index from the index block. | 2136 | * removes index from the index block. |
2042 | * It's used in truncate case only, thus all requests are for | 2137 | * It's used in truncate case only, thus all requests are for |
@@ -2163,8 +2258,16 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, | |||
2163 | ext4_free_blocks(handle, inode, NULL, start, num, flags); | 2258 | ext4_free_blocks(handle, inode, NULL, start, num, flags); |
2164 | } else if (from == le32_to_cpu(ex->ee_block) | 2259 | } else if (from == le32_to_cpu(ex->ee_block) |
2165 | && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { | 2260 | && to <= le32_to_cpu(ex->ee_block) + ee_len - 1) { |
2166 | printk(KERN_INFO "strange request: removal %u-%u from %u:%u\n", | 2261 | /* head removal */ |
2167 | from, to, le32_to_cpu(ex->ee_block), ee_len); | 2262 | ext4_lblk_t num; |
2263 | ext4_fsblk_t start; | ||
2264 | |||
2265 | num = to - from; | ||
2266 | start = ext4_ext_pblock(ex); | ||
2267 | |||
2268 | ext_debug("free first %u blocks starting %llu\n", num, start); | ||
2269 | ext4_free_blocks(handle, inode, 0, start, num, flags); | ||
2270 | |||
2168 | } else { | 2271 | } else { |
2169 | printk(KERN_INFO "strange request: removal(2) " | 2272 | printk(KERN_INFO "strange request: removal(2) " |
2170 | "%u-%u from %u:%u\n", | 2273 | "%u-%u from %u:%u\n", |
@@ -2173,9 +2276,22 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, | |||
2173 | return 0; | 2276 | return 0; |
2174 | } | 2277 | } |
2175 | 2278 | ||
2279 | |||
2280 | /* | ||
2281 | * ext4_ext_rm_leaf() Removes the extents associated with the | ||
2282 | * blocks appearing between "start" and "end", and splits the extents | ||
2283 | * if "start" and "end" appear in the same extent | ||
2284 | * | ||
2285 | * @handle: The journal handle | ||
2286 | * @inode: The files inode | ||
2287 | * @path: The path to the leaf | ||
2288 | * @start: The first block to remove | ||
2289 | * @end: The last block to remove | ||
2290 | */ | ||
2176 | static int | 2291 | static int |
2177 | ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | 2292 | ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, |
2178 | struct ext4_ext_path *path, ext4_lblk_t start) | 2293 | struct ext4_ext_path *path, ext4_lblk_t start, |
2294 | ext4_lblk_t end) | ||
2179 | { | 2295 | { |
2180 | int err = 0, correct_index = 0; | 2296 | int err = 0, correct_index = 0; |
2181 | int depth = ext_depth(inode), credits; | 2297 | int depth = ext_depth(inode), credits; |
@@ -2186,6 +2302,7 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |||
2186 | unsigned short ex_ee_len; | 2302 | unsigned short ex_ee_len; |
2187 | unsigned uninitialized = 0; | 2303 | unsigned uninitialized = 0; |
2188 | struct ext4_extent *ex; | 2304 | struct ext4_extent *ex; |
2305 | struct ext4_map_blocks map; | ||
2189 | 2306 | ||
2190 | /* the header must be checked already in ext4_ext_remove_space() */ | 2307 | /* the header must be checked already in ext4_ext_remove_space() */ |
2191 | ext_debug("truncate since %u in leaf\n", start); | 2308 | ext_debug("truncate since %u in leaf\n", start); |
@@ -2215,31 +2332,95 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |||
2215 | path[depth].p_ext = ex; | 2332 | path[depth].p_ext = ex; |
2216 | 2333 | ||
2217 | a = ex_ee_block > start ? ex_ee_block : start; | 2334 | a = ex_ee_block > start ? ex_ee_block : start; |
2218 | b = ex_ee_block + ex_ee_len - 1 < EXT_MAX_BLOCK ? | 2335 | b = ex_ee_block+ex_ee_len - 1 < end ? |
2219 | ex_ee_block + ex_ee_len - 1 : EXT_MAX_BLOCK; | 2336 | ex_ee_block+ex_ee_len - 1 : end; |
2220 | 2337 | ||
2221 | ext_debug(" border %u:%u\n", a, b); | 2338 | ext_debug(" border %u:%u\n", a, b); |
2222 | 2339 | ||
2223 | if (a != ex_ee_block && b != ex_ee_block + ex_ee_len - 1) { | 2340 | /* If this extent is beyond the end of the hole, skip it */ |
2224 | block = 0; | 2341 | if (end <= ex_ee_block) { |
2225 | num = 0; | 2342 | ex--; |
2226 | BUG(); | 2343 | ex_ee_block = le32_to_cpu(ex->ee_block); |
2344 | ex_ee_len = ext4_ext_get_actual_len(ex); | ||
2345 | continue; | ||
2346 | } else if (a != ex_ee_block && | ||
2347 | b != ex_ee_block + ex_ee_len - 1) { | ||
2348 | /* | ||
2349 | * If this is a truncate, then this condition should | ||
2350 | * never happen because at least one of the end points | ||
2351 | * needs to be on the edge of the extent. | ||
2352 | */ | ||
2353 | if (end == EXT_MAX_BLOCK) { | ||
2354 | ext_debug(" bad truncate %u:%u\n", | ||
2355 | start, end); | ||
2356 | block = 0; | ||
2357 | num = 0; | ||
2358 | err = -EIO; | ||
2359 | goto out; | ||
2360 | } | ||
2361 | /* | ||
2362 | * else this is a hole punch, so the extent needs to | ||
2363 | * be split since neither edge of the hole is on the | ||
2364 | * extent edge | ||
2365 | */ | ||
2366 | else{ | ||
2367 | map.m_pblk = ext4_ext_pblock(ex); | ||
2368 | map.m_lblk = ex_ee_block; | ||
2369 | map.m_len = b - ex_ee_block; | ||
2370 | |||
2371 | err = ext4_split_extent(handle, | ||
2372 | inode, path, &map, 0, | ||
2373 | EXT4_GET_BLOCKS_PUNCH_OUT_EXT | | ||
2374 | EXT4_GET_BLOCKS_PRE_IO); | ||
2375 | |||
2376 | if (err < 0) | ||
2377 | goto out; | ||
2378 | |||
2379 | ex_ee_len = ext4_ext_get_actual_len(ex); | ||
2380 | |||
2381 | b = ex_ee_block+ex_ee_len - 1 < end ? | ||
2382 | ex_ee_block+ex_ee_len - 1 : end; | ||
2383 | |||
2384 | /* Then remove tail of this extent */ | ||
2385 | block = ex_ee_block; | ||
2386 | num = a - block; | ||
2387 | } | ||
2227 | } else if (a != ex_ee_block) { | 2388 | } else if (a != ex_ee_block) { |
2228 | /* remove tail of the extent */ | 2389 | /* remove tail of the extent */ |
2229 | block = ex_ee_block; | 2390 | block = ex_ee_block; |
2230 | num = a - block; | 2391 | num = a - block; |
2231 | } else if (b != ex_ee_block + ex_ee_len - 1) { | 2392 | } else if (b != ex_ee_block + ex_ee_len - 1) { |
2232 | /* remove head of the extent */ | 2393 | /* remove head of the extent */ |
2233 | block = a; | 2394 | block = b; |
2234 | num = b - a; | 2395 | num = ex_ee_block + ex_ee_len - b; |
2235 | /* there is no "make a hole" API yet */ | 2396 | |
2236 | BUG(); | 2397 | /* |
2398 | * If this is a truncate, this condition | ||
2399 | * should never happen | ||
2400 | */ | ||
2401 | if (end == EXT_MAX_BLOCK) { | ||
2402 | ext_debug(" bad truncate %u:%u\n", | ||
2403 | start, end); | ||
2404 | err = -EIO; | ||
2405 | goto out; | ||
2406 | } | ||
2237 | } else { | 2407 | } else { |
2238 | /* remove whole extent: excellent! */ | 2408 | /* remove whole extent: excellent! */ |
2239 | block = ex_ee_block; | 2409 | block = ex_ee_block; |
2240 | num = 0; | 2410 | num = 0; |
2241 | BUG_ON(a != ex_ee_block); | 2411 | if (a != ex_ee_block) { |
2242 | BUG_ON(b != ex_ee_block + ex_ee_len - 1); | 2412 | ext_debug(" bad truncate %u:%u\n", |
2413 | start, end); | ||
2414 | err = -EIO; | ||
2415 | goto out; | ||
2416 | } | ||
2417 | |||
2418 | if (b != ex_ee_block + ex_ee_len - 1) { | ||
2419 | ext_debug(" bad truncate %u:%u\n", | ||
2420 | start, end); | ||
2421 | err = -EIO; | ||
2422 | goto out; | ||
2423 | } | ||
2243 | } | 2424 | } |
2244 | 2425 | ||
2245 | /* | 2426 | /* |
@@ -2270,7 +2451,13 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |||
2270 | if (num == 0) { | 2451 | if (num == 0) { |
2271 | /* this extent is removed; mark slot entirely unused */ | 2452 | /* this extent is removed; mark slot entirely unused */ |
2272 | ext4_ext_store_pblock(ex, 0); | 2453 | ext4_ext_store_pblock(ex, 0); |
2273 | le16_add_cpu(&eh->eh_entries, -1); | 2454 | } else if (block != ex_ee_block) { |
2455 | /* | ||
2456 | * If this was a head removal, then we need to update | ||
2457 | * the physical block since it is now at a different | ||
2458 | * location | ||
2459 | */ | ||
2460 | ext4_ext_store_pblock(ex, ext4_ext_pblock(ex) + (b-a)); | ||
2274 | } | 2461 | } |
2275 | 2462 | ||
2276 | ex->ee_block = cpu_to_le32(block); | 2463 | ex->ee_block = cpu_to_le32(block); |
@@ -2286,6 +2473,27 @@ ext4_ext_rm_leaf(handle_t *handle, struct inode *inode, | |||
2286 | if (err) | 2473 | if (err) |
2287 | goto out; | 2474 | goto out; |
2288 | 2475 | ||
2476 | /* | ||
2477 | * If the extent was completely released, | ||
2478 | * we need to remove it from the leaf | ||
2479 | */ | ||
2480 | if (num == 0) { | ||
2481 | if (end != EXT_MAX_BLOCK) { | ||
2482 | /* | ||
2483 | * For hole punching, we need to scoot all the | ||
2484 | * extents up when an extent is removed so that | ||
2485 | * we dont have blank extents in the middle | ||
2486 | */ | ||
2487 | memmove(ex, ex+1, (EXT_LAST_EXTENT(eh) - ex) * | ||
2488 | sizeof(struct ext4_extent)); | ||
2489 | |||
2490 | /* Now get rid of the one at the end */ | ||
2491 | memset(EXT_LAST_EXTENT(eh), 0, | ||
2492 | sizeof(struct ext4_extent)); | ||
2493 | } | ||
2494 | le16_add_cpu(&eh->eh_entries, -1); | ||
2495 | } | ||
2496 | |||
2289 | ext_debug("new extent: %u:%u:%llu\n", block, num, | 2497 | ext_debug("new extent: %u:%u:%llu\n", block, num, |
2290 | ext4_ext_pblock(ex)); | 2498 | ext4_ext_pblock(ex)); |
2291 | ex--; | 2499 | ex--; |
@@ -2326,7 +2534,8 @@ ext4_ext_more_to_rm(struct ext4_ext_path *path) | |||
2326 | return 1; | 2534 | return 1; |
2327 | } | 2535 | } |
2328 | 2536 | ||
2329 | static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start) | 2537 | static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start, |
2538 | ext4_lblk_t end) | ||
2330 | { | 2539 | { |
2331 | struct super_block *sb = inode->i_sb; | 2540 | struct super_block *sb = inode->i_sb; |
2332 | int depth = ext_depth(inode); | 2541 | int depth = ext_depth(inode); |
@@ -2365,7 +2574,8 @@ again: | |||
2365 | while (i >= 0 && err == 0) { | 2574 | while (i >= 0 && err == 0) { |
2366 | if (i == depth) { | 2575 | if (i == depth) { |
2367 | /* this is leaf block */ | 2576 | /* this is leaf block */ |
2368 | err = ext4_ext_rm_leaf(handle, inode, path, start); | 2577 | err = ext4_ext_rm_leaf(handle, inode, path, |
2578 | start, end); | ||
2369 | /* root level has p_bh == NULL, brelse() eats this */ | 2579 | /* root level has p_bh == NULL, brelse() eats this */ |
2370 | brelse(path[i].p_bh); | 2580 | brelse(path[i].p_bh); |
2371 | path[i].p_bh = NULL; | 2581 | path[i].p_bh = NULL; |
@@ -2529,6 +2739,195 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex) | |||
2529 | return ret; | 2739 | return ret; |
2530 | } | 2740 | } |
2531 | 2741 | ||
2742 | /* | ||
2743 | * used by extent splitting. | ||
2744 | */ | ||
2745 | #define EXT4_EXT_MAY_ZEROOUT 0x1 /* safe to zeroout if split fails \ | ||
2746 | due to ENOSPC */ | ||
2747 | #define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ | ||
2748 | #define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ | ||
2749 | |||
2750 | /* | ||
2751 | * ext4_split_extent_at() splits an extent at given block. | ||
2752 | * | ||
2753 | * @handle: the journal handle | ||
2754 | * @inode: the file inode | ||
2755 | * @path: the path to the extent | ||
2756 | * @split: the logical block where the extent is splitted. | ||
2757 | * @split_flags: indicates if the extent could be zeroout if split fails, and | ||
2758 | * the states(init or uninit) of new extents. | ||
2759 | * @flags: flags used to insert new extent to extent tree. | ||
2760 | * | ||
2761 | * | ||
2762 | * Splits extent [a, b] into two extents [a, @split) and [@split, b], states | ||
2763 | * of which are deterimined by split_flag. | ||
2764 | * | ||
2765 | * There are two cases: | ||
2766 | * a> the extent are splitted into two extent. | ||
2767 | * b> split is not needed, and just mark the extent. | ||
2768 | * | ||
2769 | * return 0 on success. | ||
2770 | */ | ||
2771 | static int ext4_split_extent_at(handle_t *handle, | ||
2772 | struct inode *inode, | ||
2773 | struct ext4_ext_path *path, | ||
2774 | ext4_lblk_t split, | ||
2775 | int split_flag, | ||
2776 | int flags) | ||
2777 | { | ||
2778 | ext4_fsblk_t newblock; | ||
2779 | ext4_lblk_t ee_block; | ||
2780 | struct ext4_extent *ex, newex, orig_ex; | ||
2781 | struct ext4_extent *ex2 = NULL; | ||
2782 | unsigned int ee_len, depth; | ||
2783 | int err = 0; | ||
2784 | |||
2785 | ext_debug("ext4_split_extents_at: inode %lu, logical" | ||
2786 | "block %llu\n", inode->i_ino, (unsigned long long)split); | ||
2787 | |||
2788 | ext4_ext_show_leaf(inode, path); | ||
2789 | |||
2790 | depth = ext_depth(inode); | ||
2791 | ex = path[depth].p_ext; | ||
2792 | ee_block = le32_to_cpu(ex->ee_block); | ||
2793 | ee_len = ext4_ext_get_actual_len(ex); | ||
2794 | newblock = split - ee_block + ext4_ext_pblock(ex); | ||
2795 | |||
2796 | BUG_ON(split < ee_block || split >= (ee_block + ee_len)); | ||
2797 | |||
2798 | err = ext4_ext_get_access(handle, inode, path + depth); | ||
2799 | if (err) | ||
2800 | goto out; | ||
2801 | |||
2802 | if (split == ee_block) { | ||
2803 | /* | ||
2804 | * case b: block @split is the block that the extent begins with | ||
2805 | * then we just change the state of the extent, and splitting | ||
2806 | * is not needed. | ||
2807 | */ | ||
2808 | if (split_flag & EXT4_EXT_MARK_UNINIT2) | ||
2809 | ext4_ext_mark_uninitialized(ex); | ||
2810 | else | ||
2811 | ext4_ext_mark_initialized(ex); | ||
2812 | |||
2813 | if (!(flags & EXT4_GET_BLOCKS_PRE_IO)) | ||
2814 | ext4_ext_try_to_merge(inode, path, ex); | ||
2815 | |||
2816 | err = ext4_ext_dirty(handle, inode, path + depth); | ||
2817 | goto out; | ||
2818 | } | ||
2819 | |||
2820 | /* case a */ | ||
2821 | memcpy(&orig_ex, ex, sizeof(orig_ex)); | ||
2822 | ex->ee_len = cpu_to_le16(split - ee_block); | ||
2823 | if (split_flag & EXT4_EXT_MARK_UNINIT1) | ||
2824 | ext4_ext_mark_uninitialized(ex); | ||
2825 | |||
2826 | /* | ||
2827 | * path may lead to new leaf, not to original leaf any more | ||
2828 | * after ext4_ext_insert_extent() returns, | ||
2829 | */ | ||
2830 | err = ext4_ext_dirty(handle, inode, path + depth); | ||
2831 | if (err) | ||
2832 | goto fix_extent_len; | ||
2833 | |||
2834 | ex2 = &newex; | ||
2835 | ex2->ee_block = cpu_to_le32(split); | ||
2836 | ex2->ee_len = cpu_to_le16(ee_len - (split - ee_block)); | ||
2837 | ext4_ext_store_pblock(ex2, newblock); | ||
2838 | if (split_flag & EXT4_EXT_MARK_UNINIT2) | ||
2839 | ext4_ext_mark_uninitialized(ex2); | ||
2840 | |||
2841 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); | ||
2842 | if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { | ||
2843 | err = ext4_ext_zeroout(inode, &orig_ex); | ||
2844 | if (err) | ||
2845 | goto fix_extent_len; | ||
2846 | /* update the extent length and mark as initialized */ | ||
2847 | ex->ee_len = cpu_to_le32(ee_len); | ||
2848 | ext4_ext_try_to_merge(inode, path, ex); | ||
2849 | err = ext4_ext_dirty(handle, inode, path + depth); | ||
2850 | goto out; | ||
2851 | } else if (err) | ||
2852 | goto fix_extent_len; | ||
2853 | |||
2854 | out: | ||
2855 | ext4_ext_show_leaf(inode, path); | ||
2856 | return err; | ||
2857 | |||
2858 | fix_extent_len: | ||
2859 | ex->ee_len = orig_ex.ee_len; | ||
2860 | ext4_ext_dirty(handle, inode, path + depth); | ||
2861 | return err; | ||
2862 | } | ||
2863 | |||
2864 | /* | ||
2865 | * ext4_split_extents() splits an extent and mark extent which is covered | ||
2866 | * by @map as split_flags indicates | ||
2867 | * | ||
2868 | * It may result in splitting the extent into multiple extents (upto three) | ||
2869 | * There are three possibilities: | ||
2870 | * a> There is no split required | ||
2871 | * b> Splits in two extents: Split is happening at either end of the extent | ||
2872 | * c> Splits in three extents: Somone is splitting in middle of the extent | ||
2873 | * | ||
2874 | */ | ||
2875 | static int ext4_split_extent(handle_t *handle, | ||
2876 | struct inode *inode, | ||
2877 | struct ext4_ext_path *path, | ||
2878 | struct ext4_map_blocks *map, | ||
2879 | int split_flag, | ||
2880 | int flags) | ||
2881 | { | ||
2882 | ext4_lblk_t ee_block; | ||
2883 | struct ext4_extent *ex; | ||
2884 | unsigned int ee_len, depth; | ||
2885 | int err = 0; | ||
2886 | int uninitialized; | ||
2887 | int split_flag1, flags1; | ||
2888 | |||
2889 | depth = ext_depth(inode); | ||
2890 | ex = path[depth].p_ext; | ||
2891 | ee_block = le32_to_cpu(ex->ee_block); | ||
2892 | ee_len = ext4_ext_get_actual_len(ex); | ||
2893 | uninitialized = ext4_ext_is_uninitialized(ex); | ||
2894 | |||
2895 | if (map->m_lblk + map->m_len < ee_block + ee_len) { | ||
2896 | split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? | ||
2897 | EXT4_EXT_MAY_ZEROOUT : 0; | ||
2898 | flags1 = flags | EXT4_GET_BLOCKS_PRE_IO; | ||
2899 | if (uninitialized) | ||
2900 | split_flag1 |= EXT4_EXT_MARK_UNINIT1 | | ||
2901 | EXT4_EXT_MARK_UNINIT2; | ||
2902 | err = ext4_split_extent_at(handle, inode, path, | ||
2903 | map->m_lblk + map->m_len, split_flag1, flags1); | ||
2904 | if (err) | ||
2905 | goto out; | ||
2906 | } | ||
2907 | |||
2908 | ext4_ext_drop_refs(path); | ||
2909 | path = ext4_ext_find_extent(inode, map->m_lblk, path); | ||
2910 | if (IS_ERR(path)) | ||
2911 | return PTR_ERR(path); | ||
2912 | |||
2913 | if (map->m_lblk >= ee_block) { | ||
2914 | split_flag1 = split_flag & EXT4_EXT_MAY_ZEROOUT ? | ||
2915 | EXT4_EXT_MAY_ZEROOUT : 0; | ||
2916 | if (uninitialized) | ||
2917 | split_flag1 |= EXT4_EXT_MARK_UNINIT1; | ||
2918 | if (split_flag & EXT4_EXT_MARK_UNINIT2) | ||
2919 | split_flag1 |= EXT4_EXT_MARK_UNINIT2; | ||
2920 | err = ext4_split_extent_at(handle, inode, path, | ||
2921 | map->m_lblk, split_flag1, flags); | ||
2922 | if (err) | ||
2923 | goto out; | ||
2924 | } | ||
2925 | |||
2926 | ext4_ext_show_leaf(inode, path); | ||
2927 | out: | ||
2928 | return err ? err : map->m_len; | ||
2929 | } | ||
2930 | |||
2532 | #define EXT4_EXT_ZERO_LEN 7 | 2931 | #define EXT4_EXT_ZERO_LEN 7 |
2533 | /* | 2932 | /* |
2534 | * This function is called by ext4_ext_map_blocks() if someone tries to write | 2933 | * This function is called by ext4_ext_map_blocks() if someone tries to write |
@@ -2545,17 +2944,13 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2545 | struct ext4_map_blocks *map, | 2944 | struct ext4_map_blocks *map, |
2546 | struct ext4_ext_path *path) | 2945 | struct ext4_ext_path *path) |
2547 | { | 2946 | { |
2548 | struct ext4_extent *ex, newex, orig_ex; | 2947 | struct ext4_map_blocks split_map; |
2549 | struct ext4_extent *ex1 = NULL; | 2948 | struct ext4_extent zero_ex; |
2550 | struct ext4_extent *ex2 = NULL; | 2949 | struct ext4_extent *ex; |
2551 | struct ext4_extent *ex3 = NULL; | ||
2552 | struct ext4_extent_header *eh; | ||
2553 | ext4_lblk_t ee_block, eof_block; | 2950 | ext4_lblk_t ee_block, eof_block; |
2554 | unsigned int allocated, ee_len, depth; | 2951 | unsigned int allocated, ee_len, depth; |
2555 | ext4_fsblk_t newblock; | ||
2556 | int err = 0; | 2952 | int err = 0; |
2557 | int ret = 0; | 2953 | int split_flag = 0; |
2558 | int may_zeroout; | ||
2559 | 2954 | ||
2560 | ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" | 2955 | ext_debug("ext4_ext_convert_to_initialized: inode %lu, logical" |
2561 | "block %llu, max_blocks %u\n", inode->i_ino, | 2956 | "block %llu, max_blocks %u\n", inode->i_ino, |
@@ -2567,280 +2962,86 @@ static int ext4_ext_convert_to_initialized(handle_t *handle, | |||
2567 | eof_block = map->m_lblk + map->m_len; | 2962 | eof_block = map->m_lblk + map->m_len; |
2568 | 2963 | ||
2569 | depth = ext_depth(inode); | 2964 | depth = ext_depth(inode); |
2570 | eh = path[depth].p_hdr; | ||
2571 | ex = path[depth].p_ext; | 2965 | ex = path[depth].p_ext; |
2572 | ee_block = le32_to_cpu(ex->ee_block); | 2966 | ee_block = le32_to_cpu(ex->ee_block); |
2573 | ee_len = ext4_ext_get_actual_len(ex); | 2967 | ee_len = ext4_ext_get_actual_len(ex); |
2574 | allocated = ee_len - (map->m_lblk - ee_block); | 2968 | allocated = ee_len - (map->m_lblk - ee_block); |
2575 | newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex); | ||
2576 | |||
2577 | ex2 = ex; | ||
2578 | orig_ex.ee_block = ex->ee_block; | ||
2579 | orig_ex.ee_len = cpu_to_le16(ee_len); | ||
2580 | ext4_ext_store_pblock(&orig_ex, ext4_ext_pblock(ex)); | ||
2581 | 2969 | ||
2970 | WARN_ON(map->m_lblk < ee_block); | ||
2582 | /* | 2971 | /* |
2583 | * It is safe to convert extent to initialized via explicit | 2972 | * It is safe to convert extent to initialized via explicit |
2584 | * zeroout only if extent is fully insde i_size or new_size. | 2973 | * zeroout only if extent is fully insde i_size or new_size. |
2585 | */ | 2974 | */ |
2586 | may_zeroout = ee_block + ee_len <= eof_block; | 2975 | split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; |
2587 | 2976 | ||
2588 | err = ext4_ext_get_access(handle, inode, path + depth); | ||
2589 | if (err) | ||
2590 | goto out; | ||
2591 | /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ | 2977 | /* If extent has less than 2*EXT4_EXT_ZERO_LEN zerout directly */ |
2592 | if (ee_len <= 2*EXT4_EXT_ZERO_LEN && may_zeroout) { | 2978 | if (ee_len <= 2*EXT4_EXT_ZERO_LEN && |
2593 | err = ext4_ext_zeroout(inode, &orig_ex); | 2979 | (EXT4_EXT_MAY_ZEROOUT & split_flag)) { |
2980 | err = ext4_ext_zeroout(inode, ex); | ||
2594 | if (err) | 2981 | if (err) |
2595 | goto fix_extent_len; | ||
2596 | /* update the extent length and mark as initialized */ | ||
2597 | ex->ee_block = orig_ex.ee_block; | ||
2598 | ex->ee_len = orig_ex.ee_len; | ||
2599 | ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex)); | ||
2600 | ext4_ext_dirty(handle, inode, path + depth); | ||
2601 | /* zeroed the full extent */ | ||
2602 | return allocated; | ||
2603 | } | ||
2604 | |||
2605 | /* ex1: ee_block to map->m_lblk - 1 : uninitialized */ | ||
2606 | if (map->m_lblk > ee_block) { | ||
2607 | ex1 = ex; | ||
2608 | ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); | ||
2609 | ext4_ext_mark_uninitialized(ex1); | ||
2610 | ex2 = &newex; | ||
2611 | } | ||
2612 | /* | ||
2613 | * for sanity, update the length of the ex2 extent before | ||
2614 | * we insert ex3, if ex1 is NULL. This is to avoid temporary | ||
2615 | * overlap of blocks. | ||
2616 | */ | ||
2617 | if (!ex1 && allocated > map->m_len) | ||
2618 | ex2->ee_len = cpu_to_le16(map->m_len); | ||
2619 | /* ex3: to ee_block + ee_len : uninitialised */ | ||
2620 | if (allocated > map->m_len) { | ||
2621 | unsigned int newdepth; | ||
2622 | /* If extent has less than EXT4_EXT_ZERO_LEN zerout directly */ | ||
2623 | if (allocated <= EXT4_EXT_ZERO_LEN && may_zeroout) { | ||
2624 | /* | ||
2625 | * map->m_lblk == ee_block is handled by the zerouout | ||
2626 | * at the beginning. | ||
2627 | * Mark first half uninitialized. | ||
2628 | * Mark second half initialized and zero out the | ||
2629 | * initialized extent | ||
2630 | */ | ||
2631 | ex->ee_block = orig_ex.ee_block; | ||
2632 | ex->ee_len = cpu_to_le16(ee_len - allocated); | ||
2633 | ext4_ext_mark_uninitialized(ex); | ||
2634 | ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex)); | ||
2635 | ext4_ext_dirty(handle, inode, path + depth); | ||
2636 | |||
2637 | ex3 = &newex; | ||
2638 | ex3->ee_block = cpu_to_le32(map->m_lblk); | ||
2639 | ext4_ext_store_pblock(ex3, newblock); | ||
2640 | ex3->ee_len = cpu_to_le16(allocated); | ||
2641 | err = ext4_ext_insert_extent(handle, inode, path, | ||
2642 | ex3, 0); | ||
2643 | if (err == -ENOSPC) { | ||
2644 | err = ext4_ext_zeroout(inode, &orig_ex); | ||
2645 | if (err) | ||
2646 | goto fix_extent_len; | ||
2647 | ex->ee_block = orig_ex.ee_block; | ||
2648 | ex->ee_len = orig_ex.ee_len; | ||
2649 | ext4_ext_store_pblock(ex, | ||
2650 | ext4_ext_pblock(&orig_ex)); | ||
2651 | ext4_ext_dirty(handle, inode, path + depth); | ||
2652 | /* blocks available from map->m_lblk */ | ||
2653 | return allocated; | ||
2654 | |||
2655 | } else if (err) | ||
2656 | goto fix_extent_len; | ||
2657 | |||
2658 | /* | ||
2659 | * We need to zero out the second half because | ||
2660 | * an fallocate request can update file size and | ||
2661 | * converting the second half to initialized extent | ||
2662 | * implies that we can leak some junk data to user | ||
2663 | * space. | ||
2664 | */ | ||
2665 | err = ext4_ext_zeroout(inode, ex3); | ||
2666 | if (err) { | ||
2667 | /* | ||
2668 | * We should actually mark the | ||
2669 | * second half as uninit and return error | ||
2670 | * Insert would have changed the extent | ||
2671 | */ | ||
2672 | depth = ext_depth(inode); | ||
2673 | ext4_ext_drop_refs(path); | ||
2674 | path = ext4_ext_find_extent(inode, map->m_lblk, | ||
2675 | path); | ||
2676 | if (IS_ERR(path)) { | ||
2677 | err = PTR_ERR(path); | ||
2678 | return err; | ||
2679 | } | ||
2680 | /* get the second half extent details */ | ||
2681 | ex = path[depth].p_ext; | ||
2682 | err = ext4_ext_get_access(handle, inode, | ||
2683 | path + depth); | ||
2684 | if (err) | ||
2685 | return err; | ||
2686 | ext4_ext_mark_uninitialized(ex); | ||
2687 | ext4_ext_dirty(handle, inode, path + depth); | ||
2688 | return err; | ||
2689 | } | ||
2690 | |||
2691 | /* zeroed the second half */ | ||
2692 | return allocated; | ||
2693 | } | ||
2694 | ex3 = &newex; | ||
2695 | ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len); | ||
2696 | ext4_ext_store_pblock(ex3, newblock + map->m_len); | ||
2697 | ex3->ee_len = cpu_to_le16(allocated - map->m_len); | ||
2698 | ext4_ext_mark_uninitialized(ex3); | ||
2699 | err = ext4_ext_insert_extent(handle, inode, path, ex3, 0); | ||
2700 | if (err == -ENOSPC && may_zeroout) { | ||
2701 | err = ext4_ext_zeroout(inode, &orig_ex); | ||
2702 | if (err) | ||
2703 | goto fix_extent_len; | ||
2704 | /* update the extent length and mark as initialized */ | ||
2705 | ex->ee_block = orig_ex.ee_block; | ||
2706 | ex->ee_len = orig_ex.ee_len; | ||
2707 | ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex)); | ||
2708 | ext4_ext_dirty(handle, inode, path + depth); | ||
2709 | /* zeroed the full extent */ | ||
2710 | /* blocks available from map->m_lblk */ | ||
2711 | return allocated; | ||
2712 | |||
2713 | } else if (err) | ||
2714 | goto fix_extent_len; | ||
2715 | /* | ||
2716 | * The depth, and hence eh & ex might change | ||
2717 | * as part of the insert above. | ||
2718 | */ | ||
2719 | newdepth = ext_depth(inode); | ||
2720 | /* | ||
2721 | * update the extent length after successful insert of the | ||
2722 | * split extent | ||
2723 | */ | ||
2724 | ee_len -= ext4_ext_get_actual_len(ex3); | ||
2725 | orig_ex.ee_len = cpu_to_le16(ee_len); | ||
2726 | may_zeroout = ee_block + ee_len <= eof_block; | ||
2727 | |||
2728 | depth = newdepth; | ||
2729 | ext4_ext_drop_refs(path); | ||
2730 | path = ext4_ext_find_extent(inode, map->m_lblk, path); | ||
2731 | if (IS_ERR(path)) { | ||
2732 | err = PTR_ERR(path); | ||
2733 | goto out; | 2982 | goto out; |
2734 | } | ||
2735 | eh = path[depth].p_hdr; | ||
2736 | ex = path[depth].p_ext; | ||
2737 | if (ex2 != &newex) | ||
2738 | ex2 = ex; | ||
2739 | 2983 | ||
2740 | err = ext4_ext_get_access(handle, inode, path + depth); | 2984 | err = ext4_ext_get_access(handle, inode, path + depth); |
2741 | if (err) | 2985 | if (err) |
2742 | goto out; | 2986 | goto out; |
2743 | 2987 | ext4_ext_mark_initialized(ex); | |
2744 | allocated = map->m_len; | 2988 | ext4_ext_try_to_merge(inode, path, ex); |
2745 | 2989 | err = ext4_ext_dirty(handle, inode, path + depth); | |
2746 | /* If extent has less than EXT4_EXT_ZERO_LEN and we are trying | 2990 | goto out; |
2747 | * to insert a extent in the middle zerout directly | ||
2748 | * otherwise give the extent a chance to merge to left | ||
2749 | */ | ||
2750 | if (le16_to_cpu(orig_ex.ee_len) <= EXT4_EXT_ZERO_LEN && | ||
2751 | map->m_lblk != ee_block && may_zeroout) { | ||
2752 | err = ext4_ext_zeroout(inode, &orig_ex); | ||
2753 | if (err) | ||
2754 | goto fix_extent_len; | ||
2755 | /* update the extent length and mark as initialized */ | ||
2756 | ex->ee_block = orig_ex.ee_block; | ||
2757 | ex->ee_len = orig_ex.ee_len; | ||
2758 | ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex)); | ||
2759 | ext4_ext_dirty(handle, inode, path + depth); | ||
2760 | /* zero out the first half */ | ||
2761 | /* blocks available from map->m_lblk */ | ||
2762 | return allocated; | ||
2763 | } | ||
2764 | } | ||
2765 | /* | ||
2766 | * If there was a change of depth as part of the | ||
2767 | * insertion of ex3 above, we need to update the length | ||
2768 | * of the ex1 extent again here | ||
2769 | */ | ||
2770 | if (ex1 && ex1 != ex) { | ||
2771 | ex1 = ex; | ||
2772 | ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); | ||
2773 | ext4_ext_mark_uninitialized(ex1); | ||
2774 | ex2 = &newex; | ||
2775 | } | ||
2776 | /* ex2: map->m_lblk to map->m_lblk + maxblocks-1 : initialised */ | ||
2777 | ex2->ee_block = cpu_to_le32(map->m_lblk); | ||
2778 | ext4_ext_store_pblock(ex2, newblock); | ||
2779 | ex2->ee_len = cpu_to_le16(allocated); | ||
2780 | if (ex2 != ex) | ||
2781 | goto insert; | ||
2782 | /* | ||
2783 | * New (initialized) extent starts from the first block | ||
2784 | * in the current extent. i.e., ex2 == ex | ||
2785 | * We have to see if it can be merged with the extent | ||
2786 | * on the left. | ||
2787 | */ | ||
2788 | if (ex2 > EXT_FIRST_EXTENT(eh)) { | ||
2789 | /* | ||
2790 | * To merge left, pass "ex2 - 1" to try_to_merge(), | ||
2791 | * since it merges towards right _only_. | ||
2792 | */ | ||
2793 | ret = ext4_ext_try_to_merge(inode, path, ex2 - 1); | ||
2794 | if (ret) { | ||
2795 | err = ext4_ext_correct_indexes(handle, inode, path); | ||
2796 | if (err) | ||
2797 | goto out; | ||
2798 | depth = ext_depth(inode); | ||
2799 | ex2--; | ||
2800 | } | ||
2801 | } | 2991 | } |
2992 | |||
2802 | /* | 2993 | /* |
2803 | * Try to Merge towards right. This might be required | 2994 | * four cases: |
2804 | * only when the whole extent is being written to. | 2995 | * 1. split the extent into three extents. |
2805 | * i.e. ex2 == ex and ex3 == NULL. | 2996 | * 2. split the extent into two extents, zeroout the first half. |
2997 | * 3. split the extent into two extents, zeroout the second half. | ||
2998 | * 4. split the extent into two extents with out zeroout. | ||
2806 | */ | 2999 | */ |
2807 | if (!ex3) { | 3000 | split_map.m_lblk = map->m_lblk; |
2808 | ret = ext4_ext_try_to_merge(inode, path, ex2); | 3001 | split_map.m_len = map->m_len; |
2809 | if (ret) { | 3002 | |
2810 | err = ext4_ext_correct_indexes(handle, inode, path); | 3003 | if (allocated > map->m_len) { |
3004 | if (allocated <= EXT4_EXT_ZERO_LEN && | ||
3005 | (EXT4_EXT_MAY_ZEROOUT & split_flag)) { | ||
3006 | /* case 3 */ | ||
3007 | zero_ex.ee_block = | ||
3008 | cpu_to_le32(map->m_lblk); | ||
3009 | zero_ex.ee_len = cpu_to_le16(allocated); | ||
3010 | ext4_ext_store_pblock(&zero_ex, | ||
3011 | ext4_ext_pblock(ex) + map->m_lblk - ee_block); | ||
3012 | err = ext4_ext_zeroout(inode, &zero_ex); | ||
2811 | if (err) | 3013 | if (err) |
2812 | goto out; | 3014 | goto out; |
3015 | split_map.m_lblk = map->m_lblk; | ||
3016 | split_map.m_len = allocated; | ||
3017 | } else if ((map->m_lblk - ee_block + map->m_len < | ||
3018 | EXT4_EXT_ZERO_LEN) && | ||
3019 | (EXT4_EXT_MAY_ZEROOUT & split_flag)) { | ||
3020 | /* case 2 */ | ||
3021 | if (map->m_lblk != ee_block) { | ||
3022 | zero_ex.ee_block = ex->ee_block; | ||
3023 | zero_ex.ee_len = cpu_to_le16(map->m_lblk - | ||
3024 | ee_block); | ||
3025 | ext4_ext_store_pblock(&zero_ex, | ||
3026 | ext4_ext_pblock(ex)); | ||
3027 | err = ext4_ext_zeroout(inode, &zero_ex); | ||
3028 | if (err) | ||
3029 | goto out; | ||
3030 | } | ||
3031 | |||
3032 | split_map.m_lblk = ee_block; | ||
3033 | split_map.m_len = map->m_lblk - ee_block + map->m_len; | ||
3034 | allocated = map->m_len; | ||
2813 | } | 3035 | } |
2814 | } | 3036 | } |
2815 | /* Mark modified extent as dirty */ | 3037 | |
2816 | err = ext4_ext_dirty(handle, inode, path + depth); | 3038 | allocated = ext4_split_extent(handle, inode, path, |
2817 | goto out; | 3039 | &split_map, split_flag, 0); |
2818 | insert: | 3040 | if (allocated < 0) |
2819 | err = ext4_ext_insert_extent(handle, inode, path, &newex, 0); | 3041 | err = allocated; |
2820 | if (err == -ENOSPC && may_zeroout) { | 3042 | |
2821 | err = ext4_ext_zeroout(inode, &orig_ex); | ||
2822 | if (err) | ||
2823 | goto fix_extent_len; | ||
2824 | /* update the extent length and mark as initialized */ | ||
2825 | ex->ee_block = orig_ex.ee_block; | ||
2826 | ex->ee_len = orig_ex.ee_len; | ||
2827 | ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex)); | ||
2828 | ext4_ext_dirty(handle, inode, path + depth); | ||
2829 | /* zero out the first half */ | ||
2830 | return allocated; | ||
2831 | } else if (err) | ||
2832 | goto fix_extent_len; | ||
2833 | out: | 3043 | out: |
2834 | ext4_ext_show_leaf(inode, path); | ||
2835 | return err ? err : allocated; | 3044 | return err ? err : allocated; |
2836 | |||
2837 | fix_extent_len: | ||
2838 | ex->ee_block = orig_ex.ee_block; | ||
2839 | ex->ee_len = orig_ex.ee_len; | ||
2840 | ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex)); | ||
2841 | ext4_ext_mark_uninitialized(ex); | ||
2842 | ext4_ext_dirty(handle, inode, path + depth); | ||
2843 | return err; | ||
2844 | } | 3045 | } |
2845 | 3046 | ||
2846 | /* | 3047 | /* |
@@ -2871,15 +3072,11 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
2871 | struct ext4_ext_path *path, | 3072 | struct ext4_ext_path *path, |
2872 | int flags) | 3073 | int flags) |
2873 | { | 3074 | { |
2874 | struct ext4_extent *ex, newex, orig_ex; | 3075 | ext4_lblk_t eof_block; |
2875 | struct ext4_extent *ex1 = NULL; | 3076 | ext4_lblk_t ee_block; |
2876 | struct ext4_extent *ex2 = NULL; | 3077 | struct ext4_extent *ex; |
2877 | struct ext4_extent *ex3 = NULL; | 3078 | unsigned int ee_len; |
2878 | ext4_lblk_t ee_block, eof_block; | 3079 | int split_flag = 0, depth; |
2879 | unsigned int allocated, ee_len, depth; | ||
2880 | ext4_fsblk_t newblock; | ||
2881 | int err = 0; | ||
2882 | int may_zeroout; | ||
2883 | 3080 | ||
2884 | ext_debug("ext4_split_unwritten_extents: inode %lu, logical" | 3081 | ext_debug("ext4_split_unwritten_extents: inode %lu, logical" |
2885 | "block %llu, max_blocks %u\n", inode->i_ino, | 3082 | "block %llu, max_blocks %u\n", inode->i_ino, |
@@ -2889,156 +3086,22 @@ static int ext4_split_unwritten_extents(handle_t *handle, | |||
2889 | inode->i_sb->s_blocksize_bits; | 3086 | inode->i_sb->s_blocksize_bits; |
2890 | if (eof_block < map->m_lblk + map->m_len) | 3087 | if (eof_block < map->m_lblk + map->m_len) |
2891 | eof_block = map->m_lblk + map->m_len; | 3088 | eof_block = map->m_lblk + map->m_len; |
2892 | |||
2893 | depth = ext_depth(inode); | ||
2894 | ex = path[depth].p_ext; | ||
2895 | ee_block = le32_to_cpu(ex->ee_block); | ||
2896 | ee_len = ext4_ext_get_actual_len(ex); | ||
2897 | allocated = ee_len - (map->m_lblk - ee_block); | ||
2898 | newblock = map->m_lblk - ee_block + ext4_ext_pblock(ex); | ||
2899 | |||
2900 | ex2 = ex; | ||
2901 | orig_ex.ee_block = ex->ee_block; | ||
2902 | orig_ex.ee_len = cpu_to_le16(ee_len); | ||
2903 | ext4_ext_store_pblock(&orig_ex, ext4_ext_pblock(ex)); | ||
2904 | |||
2905 | /* | 3089 | /* |
2906 | * It is safe to convert extent to initialized via explicit | 3090 | * It is safe to convert extent to initialized via explicit |
2907 | * zeroout only if extent is fully insde i_size or new_size. | 3091 | * zeroout only if extent is fully insde i_size or new_size. |
2908 | */ | 3092 | */ |
2909 | may_zeroout = ee_block + ee_len <= eof_block; | 3093 | depth = ext_depth(inode); |
2910 | 3094 | ex = path[depth].p_ext; | |
2911 | /* | 3095 | ee_block = le32_to_cpu(ex->ee_block); |
2912 | * If the uninitialized extent begins at the same logical | 3096 | ee_len = ext4_ext_get_actual_len(ex); |
2913 | * block where the write begins, and the write completely | ||
2914 | * covers the extent, then we don't need to split it. | ||
2915 | */ | ||
2916 | if ((map->m_lblk == ee_block) && (allocated <= map->m_len)) | ||
2917 | return allocated; | ||
2918 | |||
2919 | err = ext4_ext_get_access(handle, inode, path + depth); | ||
2920 | if (err) | ||
2921 | goto out; | ||
2922 | /* ex1: ee_block to map->m_lblk - 1 : uninitialized */ | ||
2923 | if (map->m_lblk > ee_block) { | ||
2924 | ex1 = ex; | ||
2925 | ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); | ||
2926 | ext4_ext_mark_uninitialized(ex1); | ||
2927 | ex2 = &newex; | ||
2928 | } | ||
2929 | /* | ||
2930 | * for sanity, update the length of the ex2 extent before | ||
2931 | * we insert ex3, if ex1 is NULL. This is to avoid temporary | ||
2932 | * overlap of blocks. | ||
2933 | */ | ||
2934 | if (!ex1 && allocated > map->m_len) | ||
2935 | ex2->ee_len = cpu_to_le16(map->m_len); | ||
2936 | /* ex3: to ee_block + ee_len : uninitialised */ | ||
2937 | if (allocated > map->m_len) { | ||
2938 | unsigned int newdepth; | ||
2939 | ex3 = &newex; | ||
2940 | ex3->ee_block = cpu_to_le32(map->m_lblk + map->m_len); | ||
2941 | ext4_ext_store_pblock(ex3, newblock + map->m_len); | ||
2942 | ex3->ee_len = cpu_to_le16(allocated - map->m_len); | ||
2943 | ext4_ext_mark_uninitialized(ex3); | ||
2944 | err = ext4_ext_insert_extent(handle, inode, path, ex3, flags); | ||
2945 | if (err == -ENOSPC && may_zeroout) { | ||
2946 | err = ext4_ext_zeroout(inode, &orig_ex); | ||
2947 | if (err) | ||
2948 | goto fix_extent_len; | ||
2949 | /* update the extent length and mark as initialized */ | ||
2950 | ex->ee_block = orig_ex.ee_block; | ||
2951 | ex->ee_len = orig_ex.ee_len; | ||
2952 | ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex)); | ||
2953 | ext4_ext_dirty(handle, inode, path + depth); | ||
2954 | /* zeroed the full extent */ | ||
2955 | /* blocks available from map->m_lblk */ | ||
2956 | return allocated; | ||
2957 | |||
2958 | } else if (err) | ||
2959 | goto fix_extent_len; | ||
2960 | /* | ||
2961 | * The depth, and hence eh & ex might change | ||
2962 | * as part of the insert above. | ||
2963 | */ | ||
2964 | newdepth = ext_depth(inode); | ||
2965 | /* | ||
2966 | * update the extent length after successful insert of the | ||
2967 | * split extent | ||
2968 | */ | ||
2969 | ee_len -= ext4_ext_get_actual_len(ex3); | ||
2970 | orig_ex.ee_len = cpu_to_le16(ee_len); | ||
2971 | may_zeroout = ee_block + ee_len <= eof_block; | ||
2972 | |||
2973 | depth = newdepth; | ||
2974 | ext4_ext_drop_refs(path); | ||
2975 | path = ext4_ext_find_extent(inode, map->m_lblk, path); | ||
2976 | if (IS_ERR(path)) { | ||
2977 | err = PTR_ERR(path); | ||
2978 | goto out; | ||
2979 | } | ||
2980 | ex = path[depth].p_ext; | ||
2981 | if (ex2 != &newex) | ||
2982 | ex2 = ex; | ||
2983 | 3097 | ||
2984 | err = ext4_ext_get_access(handle, inode, path + depth); | 3098 | split_flag |= ee_block + ee_len <= eof_block ? EXT4_EXT_MAY_ZEROOUT : 0; |
2985 | if (err) | 3099 | split_flag |= EXT4_EXT_MARK_UNINIT2; |
2986 | goto out; | ||
2987 | 3100 | ||
2988 | allocated = map->m_len; | 3101 | flags |= EXT4_GET_BLOCKS_PRE_IO; |
2989 | } | 3102 | return ext4_split_extent(handle, inode, path, map, split_flag, flags); |
2990 | /* | ||
2991 | * If there was a change of depth as part of the | ||
2992 | * insertion of ex3 above, we need to update the length | ||
2993 | * of the ex1 extent again here | ||
2994 | */ | ||
2995 | if (ex1 && ex1 != ex) { | ||
2996 | ex1 = ex; | ||
2997 | ex1->ee_len = cpu_to_le16(map->m_lblk - ee_block); | ||
2998 | ext4_ext_mark_uninitialized(ex1); | ||
2999 | ex2 = &newex; | ||
3000 | } | ||
3001 | /* | ||
3002 | * ex2: map->m_lblk to map->m_lblk + map->m_len-1 : to be written | ||
3003 | * using direct I/O, uninitialised still. | ||
3004 | */ | ||
3005 | ex2->ee_block = cpu_to_le32(map->m_lblk); | ||
3006 | ext4_ext_store_pblock(ex2, newblock); | ||
3007 | ex2->ee_len = cpu_to_le16(allocated); | ||
3008 | ext4_ext_mark_uninitialized(ex2); | ||
3009 | if (ex2 != ex) | ||
3010 | goto insert; | ||
3011 | /* Mark modified extent as dirty */ | ||
3012 | err = ext4_ext_dirty(handle, inode, path + depth); | ||
3013 | ext_debug("out here\n"); | ||
3014 | goto out; | ||
3015 | insert: | ||
3016 | err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); | ||
3017 | if (err == -ENOSPC && may_zeroout) { | ||
3018 | err = ext4_ext_zeroout(inode, &orig_ex); | ||
3019 | if (err) | ||
3020 | goto fix_extent_len; | ||
3021 | /* update the extent length and mark as initialized */ | ||
3022 | ex->ee_block = orig_ex.ee_block; | ||
3023 | ex->ee_len = orig_ex.ee_len; | ||
3024 | ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex)); | ||
3025 | ext4_ext_dirty(handle, inode, path + depth); | ||
3026 | /* zero out the first half */ | ||
3027 | return allocated; | ||
3028 | } else if (err) | ||
3029 | goto fix_extent_len; | ||
3030 | out: | ||
3031 | ext4_ext_show_leaf(inode, path); | ||
3032 | return err ? err : allocated; | ||
3033 | |||
3034 | fix_extent_len: | ||
3035 | ex->ee_block = orig_ex.ee_block; | ||
3036 | ex->ee_len = orig_ex.ee_len; | ||
3037 | ext4_ext_store_pblock(ex, ext4_ext_pblock(&orig_ex)); | ||
3038 | ext4_ext_mark_uninitialized(ex); | ||
3039 | ext4_ext_dirty(handle, inode, path + depth); | ||
3040 | return err; | ||
3041 | } | 3103 | } |
3104 | |||
3042 | static int ext4_convert_unwritten_extents_endio(handle_t *handle, | 3105 | static int ext4_convert_unwritten_extents_endio(handle_t *handle, |
3043 | struct inode *inode, | 3106 | struct inode *inode, |
3044 | struct ext4_ext_path *path) | 3107 | struct ext4_ext_path *path) |
@@ -3047,46 +3110,27 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle, | |||
3047 | struct ext4_extent_header *eh; | 3110 | struct ext4_extent_header *eh; |
3048 | int depth; | 3111 | int depth; |
3049 | int err = 0; | 3112 | int err = 0; |
3050 | int ret = 0; | ||
3051 | 3113 | ||
3052 | depth = ext_depth(inode); | 3114 | depth = ext_depth(inode); |
3053 | eh = path[depth].p_hdr; | 3115 | eh = path[depth].p_hdr; |
3054 | ex = path[depth].p_ext; | 3116 | ex = path[depth].p_ext; |
3055 | 3117 | ||
3118 | ext_debug("ext4_convert_unwritten_extents_endio: inode %lu, logical" | ||
3119 | "block %llu, max_blocks %u\n", inode->i_ino, | ||
3120 | (unsigned long long)le32_to_cpu(ex->ee_block), | ||
3121 | ext4_ext_get_actual_len(ex)); | ||
3122 | |||
3056 | err = ext4_ext_get_access(handle, inode, path + depth); | 3123 | err = ext4_ext_get_access(handle, inode, path + depth); |
3057 | if (err) | 3124 | if (err) |
3058 | goto out; | 3125 | goto out; |
3059 | /* first mark the extent as initialized */ | 3126 | /* first mark the extent as initialized */ |
3060 | ext4_ext_mark_initialized(ex); | 3127 | ext4_ext_mark_initialized(ex); |
3061 | 3128 | ||
3062 | /* | 3129 | /* note: ext4_ext_correct_indexes() isn't needed here because |
3063 | * We have to see if it can be merged with the extent | 3130 | * borders are not changed |
3064 | * on the left. | ||
3065 | */ | ||
3066 | if (ex > EXT_FIRST_EXTENT(eh)) { | ||
3067 | /* | ||
3068 | * To merge left, pass "ex - 1" to try_to_merge(), | ||
3069 | * since it merges towards right _only_. | ||
3070 | */ | ||
3071 | ret = ext4_ext_try_to_merge(inode, path, ex - 1); | ||
3072 | if (ret) { | ||
3073 | err = ext4_ext_correct_indexes(handle, inode, path); | ||
3074 | if (err) | ||
3075 | goto out; | ||
3076 | depth = ext_depth(inode); | ||
3077 | ex--; | ||
3078 | } | ||
3079 | } | ||
3080 | /* | ||
3081 | * Try to Merge towards right. | ||
3082 | */ | 3131 | */ |
3083 | ret = ext4_ext_try_to_merge(inode, path, ex); | 3132 | ext4_ext_try_to_merge(inode, path, ex); |
3084 | if (ret) { | 3133 | |
3085 | err = ext4_ext_correct_indexes(handle, inode, path); | ||
3086 | if (err) | ||
3087 | goto out; | ||
3088 | depth = ext_depth(inode); | ||
3089 | } | ||
3090 | /* Mark modified extent as dirty */ | 3134 | /* Mark modified extent as dirty */ |
3091 | err = ext4_ext_dirty(handle, inode, path + depth); | 3135 | err = ext4_ext_dirty(handle, inode, path + depth); |
3092 | out: | 3136 | out: |
@@ -3302,15 +3346,19 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
3302 | ext4_fsblk_t newblock = 0; | 3346 | ext4_fsblk_t newblock = 0; |
3303 | int err = 0, depth, ret; | 3347 | int err = 0, depth, ret; |
3304 | unsigned int allocated = 0; | 3348 | unsigned int allocated = 0; |
3349 | unsigned int punched_out = 0; | ||
3350 | unsigned int result = 0; | ||
3305 | struct ext4_allocation_request ar; | 3351 | struct ext4_allocation_request ar; |
3306 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; | 3352 | ext4_io_end_t *io = EXT4_I(inode)->cur_aio_dio; |
3353 | struct ext4_map_blocks punch_map; | ||
3307 | 3354 | ||
3308 | ext_debug("blocks %u/%u requested for inode %lu\n", | 3355 | ext_debug("blocks %u/%u requested for inode %lu\n", |
3309 | map->m_lblk, map->m_len, inode->i_ino); | 3356 | map->m_lblk, map->m_len, inode->i_ino); |
3310 | trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); | 3357 | trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); |
3311 | 3358 | ||
3312 | /* check in cache */ | 3359 | /* check in cache */ |
3313 | if (ext4_ext_in_cache(inode, map->m_lblk, &newex)) { | 3360 | if (ext4_ext_in_cache(inode, map->m_lblk, &newex) && |
3361 | ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0)) { | ||
3314 | if (!newex.ee_start_lo && !newex.ee_start_hi) { | 3362 | if (!newex.ee_start_lo && !newex.ee_start_hi) { |
3315 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { | 3363 | if ((flags & EXT4_GET_BLOCKS_CREATE) == 0) { |
3316 | /* | 3364 | /* |
@@ -3375,16 +3423,84 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
3375 | ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, | 3423 | ext_debug("%u fit into %u:%d -> %llu\n", map->m_lblk, |
3376 | ee_block, ee_len, newblock); | 3424 | ee_block, ee_len, newblock); |
3377 | 3425 | ||
3378 | /* Do not put uninitialized extent in the cache */ | 3426 | if ((flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) == 0) { |
3379 | if (!ext4_ext_is_uninitialized(ex)) { | 3427 | /* |
3380 | ext4_ext_put_in_cache(inode, ee_block, | 3428 | * Do not put uninitialized extent |
3381 | ee_len, ee_start); | 3429 | * in the cache |
3382 | goto out; | 3430 | */ |
3431 | if (!ext4_ext_is_uninitialized(ex)) { | ||
3432 | ext4_ext_put_in_cache(inode, ee_block, | ||
3433 | ee_len, ee_start); | ||
3434 | goto out; | ||
3435 | } | ||
3436 | ret = ext4_ext_handle_uninitialized_extents( | ||
3437 | handle, inode, map, path, flags, | ||
3438 | allocated, newblock); | ||
3439 | return ret; | ||
3383 | } | 3440 | } |
3384 | ret = ext4_ext_handle_uninitialized_extents(handle, | 3441 | |
3385 | inode, map, path, flags, allocated, | 3442 | /* |
3386 | newblock); | 3443 | * Punch out the map length, but only to the |
3387 | return ret; | 3444 | * end of the extent |
3445 | */ | ||
3446 | punched_out = allocated < map->m_len ? | ||
3447 | allocated : map->m_len; | ||
3448 | |||
3449 | /* | ||
3450 | * Sense extents need to be converted to | ||
3451 | * uninitialized, they must fit in an | ||
3452 | * uninitialized extent | ||
3453 | */ | ||
3454 | if (punched_out > EXT_UNINIT_MAX_LEN) | ||
3455 | punched_out = EXT_UNINIT_MAX_LEN; | ||
3456 | |||
3457 | punch_map.m_lblk = map->m_lblk; | ||
3458 | punch_map.m_pblk = newblock; | ||
3459 | punch_map.m_len = punched_out; | ||
3460 | punch_map.m_flags = 0; | ||
3461 | |||
3462 | /* Check to see if the extent needs to be split */ | ||
3463 | if (punch_map.m_len != ee_len || | ||
3464 | punch_map.m_lblk != ee_block) { | ||
3465 | |||
3466 | ret = ext4_split_extent(handle, inode, | ||
3467 | path, &punch_map, 0, | ||
3468 | EXT4_GET_BLOCKS_PUNCH_OUT_EXT | | ||
3469 | EXT4_GET_BLOCKS_PRE_IO); | ||
3470 | |||
3471 | if (ret < 0) { | ||
3472 | err = ret; | ||
3473 | goto out2; | ||
3474 | } | ||
3475 | /* | ||
3476 | * find extent for the block at | ||
3477 | * the start of the hole | ||
3478 | */ | ||
3479 | ext4_ext_drop_refs(path); | ||
3480 | kfree(path); | ||
3481 | |||
3482 | path = ext4_ext_find_extent(inode, | ||
3483 | map->m_lblk, NULL); | ||
3484 | if (IS_ERR(path)) { | ||
3485 | err = PTR_ERR(path); | ||
3486 | path = NULL; | ||
3487 | goto out2; | ||
3488 | } | ||
3489 | |||
3490 | depth = ext_depth(inode); | ||
3491 | ex = path[depth].p_ext; | ||
3492 | ee_len = ext4_ext_get_actual_len(ex); | ||
3493 | ee_block = le32_to_cpu(ex->ee_block); | ||
3494 | ee_start = ext4_ext_pblock(ex); | ||
3495 | |||
3496 | } | ||
3497 | |||
3498 | ext4_ext_mark_uninitialized(ex); | ||
3499 | |||
3500 | err = ext4_ext_remove_space(inode, map->m_lblk, | ||
3501 | map->m_lblk + punched_out); | ||
3502 | |||
3503 | goto out2; | ||
3388 | } | 3504 | } |
3389 | } | 3505 | } |
3390 | 3506 | ||
@@ -3446,6 +3562,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, | |||
3446 | else | 3562 | else |
3447 | /* disable in-core preallocation for non-regular files */ | 3563 | /* disable in-core preallocation for non-regular files */ |
3448 | ar.flags = 0; | 3564 | ar.flags = 0; |
3565 | if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) | ||
3566 | ar.flags |= EXT4_MB_HINT_NOPREALLOC; | ||
3449 | newblock = ext4_mb_new_blocks(handle, &ar, &err); | 3567 | newblock = ext4_mb_new_blocks(handle, &ar, &err); |
3450 | if (!newblock) | 3568 | if (!newblock) |
3451 | goto out2; | 3569 | goto out2; |
@@ -3529,7 +3647,11 @@ out2: | |||
3529 | } | 3647 | } |
3530 | trace_ext4_ext_map_blocks_exit(inode, map->m_lblk, | 3648 | trace_ext4_ext_map_blocks_exit(inode, map->m_lblk, |
3531 | newblock, map->m_len, err ? err : allocated); | 3649 | newblock, map->m_len, err ? err : allocated); |
3532 | return err ? err : allocated; | 3650 | |
3651 | result = (flags & EXT4_GET_BLOCKS_PUNCH_OUT_EXT) ? | ||
3652 | punched_out : allocated; | ||
3653 | |||
3654 | return err ? err : result; | ||
3533 | } | 3655 | } |
3534 | 3656 | ||
3535 | void ext4_ext_truncate(struct inode *inode) | 3657 | void ext4_ext_truncate(struct inode *inode) |
@@ -3577,7 +3699,7 @@ void ext4_ext_truncate(struct inode *inode) | |||
3577 | 3699 | ||
3578 | last_block = (inode->i_size + sb->s_blocksize - 1) | 3700 | last_block = (inode->i_size + sb->s_blocksize - 1) |
3579 | >> EXT4_BLOCK_SIZE_BITS(sb); | 3701 | >> EXT4_BLOCK_SIZE_BITS(sb); |
3580 | err = ext4_ext_remove_space(inode, last_block); | 3702 | err = ext4_ext_remove_space(inode, last_block, EXT_MAX_BLOCK); |
3581 | 3703 | ||
3582 | /* In a multi-transaction truncate, we only make the final | 3704 | /* In a multi-transaction truncate, we only make the final |
3583 | * transaction synchronous. | 3705 | * transaction synchronous. |
@@ -3585,8 +3707,9 @@ void ext4_ext_truncate(struct inode *inode) | |||
3585 | if (IS_SYNC(inode)) | 3707 | if (IS_SYNC(inode)) |
3586 | ext4_handle_sync(handle); | 3708 | ext4_handle_sync(handle); |
3587 | 3709 | ||
3588 | out_stop: | ||
3589 | up_write(&EXT4_I(inode)->i_data_sem); | 3710 | up_write(&EXT4_I(inode)->i_data_sem); |
3711 | |||
3712 | out_stop: | ||
3590 | /* | 3713 | /* |
3591 | * If this was a simple ftruncate() and the file will remain alive, | 3714 | * If this was a simple ftruncate() and the file will remain alive, |
3592 | * then we need to clear up the orphan record which we created above. | 3715 | * then we need to clear up the orphan record which we created above. |
@@ -3651,10 +3774,6 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) | |||
3651 | struct ext4_map_blocks map; | 3774 | struct ext4_map_blocks map; |
3652 | unsigned int credits, blkbits = inode->i_blkbits; | 3775 | unsigned int credits, blkbits = inode->i_blkbits; |
3653 | 3776 | ||
3654 | /* We only support the FALLOC_FL_KEEP_SIZE mode */ | ||
3655 | if (mode & ~FALLOC_FL_KEEP_SIZE) | ||
3656 | return -EOPNOTSUPP; | ||
3657 | |||
3658 | /* | 3777 | /* |
3659 | * currently supporting (pre)allocate mode for extent-based | 3778 | * currently supporting (pre)allocate mode for extent-based |
3660 | * files _only_ | 3779 | * files _only_ |
@@ -3662,6 +3781,13 @@ long ext4_fallocate(struct file *file, int mode, loff_t offset, loff_t len) | |||
3662 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) | 3781 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
3663 | return -EOPNOTSUPP; | 3782 | return -EOPNOTSUPP; |
3664 | 3783 | ||
3784 | /* Return error if mode is not supported */ | ||
3785 | if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE)) | ||
3786 | return -EOPNOTSUPP; | ||
3787 | |||
3788 | if (mode & FALLOC_FL_PUNCH_HOLE) | ||
3789 | return ext4_punch_hole(file, offset, len); | ||
3790 | |||
3665 | trace_ext4_fallocate_enter(inode, offset, len, mode); | 3791 | trace_ext4_fallocate_enter(inode, offset, len, mode); |
3666 | map.m_lblk = offset >> blkbits; | 3792 | map.m_lblk = offset >> blkbits; |
3667 | /* | 3793 | /* |
@@ -3691,7 +3817,8 @@ retry: | |||
3691 | break; | 3817 | break; |
3692 | } | 3818 | } |
3693 | ret = ext4_map_blocks(handle, inode, &map, | 3819 | ret = ext4_map_blocks(handle, inode, &map, |
3694 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT); | 3820 | EXT4_GET_BLOCKS_CREATE_UNINIT_EXT | |
3821 | EXT4_GET_BLOCKS_NO_NORMALIZE); | ||
3695 | if (ret <= 0) { | 3822 | if (ret <= 0) { |
3696 | #ifdef EXT4FS_DEBUG | 3823 | #ifdef EXT4FS_DEBUG |
3697 | WARN_ON(ret <= 0); | 3824 | WARN_ON(ret <= 0); |
@@ -3822,6 +3949,7 @@ static int ext4_ext_fiemap_cb(struct inode *inode, struct ext4_ext_path *path, | |||
3822 | pgoff_t last_offset; | 3949 | pgoff_t last_offset; |
3823 | pgoff_t offset; | 3950 | pgoff_t offset; |
3824 | pgoff_t index; | 3951 | pgoff_t index; |
3952 | pgoff_t start_index = 0; | ||
3825 | struct page **pages = NULL; | 3953 | struct page **pages = NULL; |
3826 | struct buffer_head *bh = NULL; | 3954 | struct buffer_head *bh = NULL; |
3827 | struct buffer_head *head = NULL; | 3955 | struct buffer_head *head = NULL; |
@@ -3848,39 +3976,57 @@ out: | |||
3848 | kfree(pages); | 3976 | kfree(pages); |
3849 | return EXT_CONTINUE; | 3977 | return EXT_CONTINUE; |
3850 | } | 3978 | } |
3979 | index = 0; | ||
3851 | 3980 | ||
3981 | next_page: | ||
3852 | /* Try to find the 1st mapped buffer. */ | 3982 | /* Try to find the 1st mapped buffer. */ |
3853 | end = ((__u64)pages[0]->index << PAGE_SHIFT) >> | 3983 | end = ((__u64)pages[index]->index << PAGE_SHIFT) >> |
3854 | blksize_bits; | 3984 | blksize_bits; |
3855 | if (!page_has_buffers(pages[0])) | 3985 | if (!page_has_buffers(pages[index])) |
3856 | goto out; | 3986 | goto out; |
3857 | head = page_buffers(pages[0]); | 3987 | head = page_buffers(pages[index]); |
3858 | if (!head) | 3988 | if (!head) |
3859 | goto out; | 3989 | goto out; |
3860 | 3990 | ||
3991 | index++; | ||
3861 | bh = head; | 3992 | bh = head; |
3862 | do { | 3993 | do { |
3863 | if (buffer_mapped(bh)) { | 3994 | if (end >= newex->ec_block + |
3995 | newex->ec_len) | ||
3996 | /* The buffer is out of | ||
3997 | * the request range. | ||
3998 | */ | ||
3999 | goto out; | ||
4000 | |||
4001 | if (buffer_mapped(bh) && | ||
4002 | end >= newex->ec_block) { | ||
4003 | start_index = index - 1; | ||
3864 | /* get the 1st mapped buffer. */ | 4004 | /* get the 1st mapped buffer. */ |
3865 | if (end > newex->ec_block + | ||
3866 | newex->ec_len) | ||
3867 | /* The buffer is out of | ||
3868 | * the request range. | ||
3869 | */ | ||
3870 | goto out; | ||
3871 | goto found_mapped_buffer; | 4005 | goto found_mapped_buffer; |
3872 | } | 4006 | } |
4007 | |||
3873 | bh = bh->b_this_page; | 4008 | bh = bh->b_this_page; |
3874 | end++; | 4009 | end++; |
3875 | } while (bh != head); | 4010 | } while (bh != head); |
3876 | 4011 | ||
3877 | /* No mapped buffer found. */ | 4012 | /* No mapped buffer in the range found in this page, |
3878 | goto out; | 4013 | * We need to look up next page. |
4014 | */ | ||
4015 | if (index >= ret) { | ||
4016 | /* There is no page left, but we need to limit | ||
4017 | * newex->ec_len. | ||
4018 | */ | ||
4019 | newex->ec_len = end - newex->ec_block; | ||
4020 | goto out; | ||
4021 | } | ||
4022 | goto next_page; | ||
3879 | } else { | 4023 | } else { |
3880 | /*Find contiguous delayed buffers. */ | 4024 | /*Find contiguous delayed buffers. */ |
3881 | if (ret > 0 && pages[0]->index == last_offset) | 4025 | if (ret > 0 && pages[0]->index == last_offset) |
3882 | head = page_buffers(pages[0]); | 4026 | head = page_buffers(pages[0]); |
3883 | bh = head; | 4027 | bh = head; |
4028 | index = 1; | ||
4029 | start_index = 0; | ||
3884 | } | 4030 | } |
3885 | 4031 | ||
3886 | found_mapped_buffer: | 4032 | found_mapped_buffer: |
@@ -3903,7 +4049,7 @@ found_mapped_buffer: | |||
3903 | end++; | 4049 | end++; |
3904 | } while (bh != head); | 4050 | } while (bh != head); |
3905 | 4051 | ||
3906 | for (index = 1; index < ret; index++) { | 4052 | for (; index < ret; index++) { |
3907 | if (!page_has_buffers(pages[index])) { | 4053 | if (!page_has_buffers(pages[index])) { |
3908 | bh = NULL; | 4054 | bh = NULL; |
3909 | break; | 4055 | break; |
@@ -3913,8 +4059,10 @@ found_mapped_buffer: | |||
3913 | bh = NULL; | 4059 | bh = NULL; |
3914 | break; | 4060 | break; |
3915 | } | 4061 | } |
4062 | |||
3916 | if (pages[index]->index != | 4063 | if (pages[index]->index != |
3917 | pages[0]->index + index) { | 4064 | pages[start_index]->index + index |
4065 | - start_index) { | ||
3918 | /* Blocks are not contiguous. */ | 4066 | /* Blocks are not contiguous. */ |
3919 | bh = NULL; | 4067 | bh = NULL; |
3920 | break; | 4068 | break; |
@@ -4006,6 +4154,177 @@ static int ext4_xattr_fiemap(struct inode *inode, | |||
4006 | return (error < 0 ? error : 0); | 4154 | return (error < 0 ? error : 0); |
4007 | } | 4155 | } |
4008 | 4156 | ||
4157 | /* | ||
4158 | * ext4_ext_punch_hole | ||
4159 | * | ||
4160 | * Punches a hole of "length" bytes in a file starting | ||
4161 | * at byte "offset" | ||
4162 | * | ||
4163 | * @inode: The inode of the file to punch a hole in | ||
4164 | * @offset: The starting byte offset of the hole | ||
4165 | * @length: The length of the hole | ||
4166 | * | ||
4167 | * Returns the number of blocks removed or negative on err | ||
4168 | */ | ||
4169 | int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length) | ||
4170 | { | ||
4171 | struct inode *inode = file->f_path.dentry->d_inode; | ||
4172 | struct super_block *sb = inode->i_sb; | ||
4173 | struct ext4_ext_cache cache_ex; | ||
4174 | ext4_lblk_t first_block, last_block, num_blocks, iblock, max_blocks; | ||
4175 | struct address_space *mapping = inode->i_mapping; | ||
4176 | struct ext4_map_blocks map; | ||
4177 | handle_t *handle; | ||
4178 | loff_t first_block_offset, last_block_offset, block_len; | ||
4179 | loff_t first_page, last_page, first_page_offset, last_page_offset; | ||
4180 | int ret, credits, blocks_released, err = 0; | ||
4181 | |||
4182 | first_block = (offset + sb->s_blocksize - 1) >> | ||
4183 | EXT4_BLOCK_SIZE_BITS(sb); | ||
4184 | last_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb); | ||
4185 | |||
4186 | first_block_offset = first_block << EXT4_BLOCK_SIZE_BITS(sb); | ||
4187 | last_block_offset = last_block << EXT4_BLOCK_SIZE_BITS(sb); | ||
4188 | |||
4189 | first_page = (offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | ||
4190 | last_page = (offset + length) >> PAGE_CACHE_SHIFT; | ||
4191 | |||
4192 | first_page_offset = first_page << PAGE_CACHE_SHIFT; | ||
4193 | last_page_offset = last_page << PAGE_CACHE_SHIFT; | ||
4194 | |||
4195 | /* | ||
4196 | * Write out all dirty pages to avoid race conditions | ||
4197 | * Then release them. | ||
4198 | */ | ||
4199 | if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) { | ||
4200 | err = filemap_write_and_wait_range(mapping, | ||
4201 | first_page_offset == 0 ? 0 : first_page_offset-1, | ||
4202 | last_page_offset); | ||
4203 | |||
4204 | if (err) | ||
4205 | return err; | ||
4206 | } | ||
4207 | |||
4208 | /* Now release the pages */ | ||
4209 | if (last_page_offset > first_page_offset) { | ||
4210 | truncate_inode_pages_range(mapping, first_page_offset, | ||
4211 | last_page_offset-1); | ||
4212 | } | ||
4213 | |||
4214 | /* finish any pending end_io work */ | ||
4215 | ext4_flush_completed_IO(inode); | ||
4216 | |||
4217 | credits = ext4_writepage_trans_blocks(inode); | ||
4218 | handle = ext4_journal_start(inode, credits); | ||
4219 | if (IS_ERR(handle)) | ||
4220 | return PTR_ERR(handle); | ||
4221 | |||
4222 | err = ext4_orphan_add(handle, inode); | ||
4223 | if (err) | ||
4224 | goto out; | ||
4225 | |||
4226 | /* | ||
4227 | * Now we need to zero out the un block aligned data. | ||
4228 | * If the file is smaller than a block, just | ||
4229 | * zero out the middle | ||
4230 | */ | ||
4231 | if (first_block > last_block) | ||
4232 | ext4_block_zero_page_range(handle, mapping, offset, length); | ||
4233 | else { | ||
4234 | /* zero out the head of the hole before the first block */ | ||
4235 | block_len = first_block_offset - offset; | ||
4236 | if (block_len > 0) | ||
4237 | ext4_block_zero_page_range(handle, mapping, | ||
4238 | offset, block_len); | ||
4239 | |||
4240 | /* zero out the tail of the hole after the last block */ | ||
4241 | block_len = offset + length - last_block_offset; | ||
4242 | if (block_len > 0) { | ||
4243 | ext4_block_zero_page_range(handle, mapping, | ||
4244 | last_block_offset, block_len); | ||
4245 | } | ||
4246 | } | ||
4247 | |||
4248 | /* If there are no blocks to remove, return now */ | ||
4249 | if (first_block >= last_block) | ||
4250 | goto out; | ||
4251 | |||
4252 | down_write(&EXT4_I(inode)->i_data_sem); | ||
4253 | ext4_ext_invalidate_cache(inode); | ||
4254 | ext4_discard_preallocations(inode); | ||
4255 | |||
4256 | /* | ||
4257 | * Loop over all the blocks and identify blocks | ||
4258 | * that need to be punched out | ||
4259 | */ | ||
4260 | iblock = first_block; | ||
4261 | blocks_released = 0; | ||
4262 | while (iblock < last_block) { | ||
4263 | max_blocks = last_block - iblock; | ||
4264 | num_blocks = 1; | ||
4265 | memset(&map, 0, sizeof(map)); | ||
4266 | map.m_lblk = iblock; | ||
4267 | map.m_len = max_blocks; | ||
4268 | ret = ext4_ext_map_blocks(handle, inode, &map, | ||
4269 | EXT4_GET_BLOCKS_PUNCH_OUT_EXT); | ||
4270 | |||
4271 | if (ret > 0) { | ||
4272 | blocks_released += ret; | ||
4273 | num_blocks = ret; | ||
4274 | } else if (ret == 0) { | ||
4275 | /* | ||
4276 | * If map blocks could not find the block, | ||
4277 | * then it is in a hole. If the hole was | ||
4278 | * not already cached, then map blocks should | ||
4279 | * put it in the cache. So we can get the hole | ||
4280 | * out of the cache | ||
4281 | */ | ||
4282 | memset(&cache_ex, 0, sizeof(cache_ex)); | ||
4283 | if ((ext4_ext_check_cache(inode, iblock, &cache_ex)) && | ||
4284 | !cache_ex.ec_start) { | ||
4285 | |||
4286 | /* The hole is cached */ | ||
4287 | num_blocks = cache_ex.ec_block + | ||
4288 | cache_ex.ec_len - iblock; | ||
4289 | |||
4290 | } else { | ||
4291 | /* The block could not be identified */ | ||
4292 | err = -EIO; | ||
4293 | break; | ||
4294 | } | ||
4295 | } else { | ||
4296 | /* Map blocks error */ | ||
4297 | err = ret; | ||
4298 | break; | ||
4299 | } | ||
4300 | |||
4301 | if (num_blocks == 0) { | ||
4302 | /* This condition should never happen */ | ||
4303 | ext_debug("Block lookup failed"); | ||
4304 | err = -EIO; | ||
4305 | break; | ||
4306 | } | ||
4307 | |||
4308 | iblock += num_blocks; | ||
4309 | } | ||
4310 | |||
4311 | if (blocks_released > 0) { | ||
4312 | ext4_ext_invalidate_cache(inode); | ||
4313 | ext4_discard_preallocations(inode); | ||
4314 | } | ||
4315 | |||
4316 | if (IS_SYNC(inode)) | ||
4317 | ext4_handle_sync(handle); | ||
4318 | |||
4319 | up_write(&EXT4_I(inode)->i_data_sem); | ||
4320 | |||
4321 | out: | ||
4322 | ext4_orphan_del(handle, inode); | ||
4323 | inode->i_mtime = inode->i_ctime = ext4_current_time(inode); | ||
4324 | ext4_mark_inode_dirty(handle, inode); | ||
4325 | ext4_journal_stop(handle); | ||
4326 | return err; | ||
4327 | } | ||
4009 | int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | 4328 | int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, |
4010 | __u64 start, __u64 len) | 4329 | __u64 start, __u64 len) |
4011 | { | 4330 | { |
@@ -4042,4 +4361,3 @@ int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, | |||
4042 | 4361 | ||
4043 | return error; | 4362 | return error; |
4044 | } | 4363 | } |
4045 | |||
diff --git a/fs/ext4/file.c b/fs/ext4/file.c index 7b80d543b89e..2c0972322009 100644 --- a/fs/ext4/file.c +++ b/fs/ext4/file.c | |||
@@ -272,7 +272,6 @@ const struct file_operations ext4_file_operations = { | |||
272 | }; | 272 | }; |
273 | 273 | ||
274 | const struct inode_operations ext4_file_inode_operations = { | 274 | const struct inode_operations ext4_file_inode_operations = { |
275 | .truncate = ext4_truncate, | ||
276 | .setattr = ext4_setattr, | 275 | .setattr = ext4_setattr, |
277 | .getattr = ext4_getattr, | 276 | .getattr = ext4_getattr, |
278 | #ifdef CONFIG_EXT4_FS_XATTR | 277 | #ifdef CONFIG_EXT4_FS_XATTR |
diff --git a/fs/ext4/fsync.c b/fs/ext4/fsync.c index e9473cbe80df..ce66d2fe826c 100644 --- a/fs/ext4/fsync.c +++ b/fs/ext4/fsync.c | |||
@@ -36,7 +36,7 @@ | |||
36 | 36 | ||
37 | static void dump_completed_IO(struct inode * inode) | 37 | static void dump_completed_IO(struct inode * inode) |
38 | { | 38 | { |
39 | #ifdef EXT4_DEBUG | 39 | #ifdef EXT4FS_DEBUG |
40 | struct list_head *cur, *before, *after; | 40 | struct list_head *cur, *before, *after; |
41 | ext4_io_end_t *io, *io0, *io1; | 41 | ext4_io_end_t *io, *io0, *io1; |
42 | unsigned long flags; | 42 | unsigned long flags; |
@@ -172,6 +172,7 @@ int ext4_sync_file(struct file *file, int datasync) | |||
172 | journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; | 172 | journal_t *journal = EXT4_SB(inode->i_sb)->s_journal; |
173 | int ret; | 173 | int ret; |
174 | tid_t commit_tid; | 174 | tid_t commit_tid; |
175 | bool needs_barrier = false; | ||
175 | 176 | ||
176 | J_ASSERT(ext4_journal_current_handle() == NULL); | 177 | J_ASSERT(ext4_journal_current_handle() == NULL); |
177 | 178 | ||
@@ -211,22 +212,12 @@ int ext4_sync_file(struct file *file, int datasync) | |||
211 | } | 212 | } |
212 | 213 | ||
213 | commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; | 214 | commit_tid = datasync ? ei->i_datasync_tid : ei->i_sync_tid; |
214 | if (jbd2_log_start_commit(journal, commit_tid)) { | 215 | if (journal->j_flags & JBD2_BARRIER && |
215 | /* | 216 | !jbd2_trans_will_send_data_barrier(journal, commit_tid)) |
216 | * When the journal is on a different device than the | 217 | needs_barrier = true; |
217 | * fs data disk, we need to issue the barrier in | 218 | jbd2_log_start_commit(journal, commit_tid); |
218 | * writeback mode. (In ordered mode, the jbd2 layer | 219 | ret = jbd2_log_wait_commit(journal, commit_tid); |
219 | * will take care of issuing the barrier. In | 220 | if (needs_barrier) |
220 | * data=journal, all of the data blocks are written to | ||
221 | * the journal device.) | ||
222 | */ | ||
223 | if (ext4_should_writeback_data(inode) && | ||
224 | (journal->j_fs_dev != journal->j_dev) && | ||
225 | (journal->j_flags & JBD2_BARRIER)) | ||
226 | blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, | ||
227 | NULL); | ||
228 | ret = jbd2_log_wait_commit(journal, commit_tid); | ||
229 | } else if (journal->j_flags & JBD2_BARRIER) | ||
230 | blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); | 221 | blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL); |
231 | out: | 222 | out: |
232 | trace_ext4_sync_file_exit(inode, ret); | 223 | trace_ext4_sync_file_exit(inode, ret); |
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index f2fa5e8a582c..50d0e9c64584 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c | |||
@@ -639,8 +639,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode, | |||
639 | while (target > 0) { | 639 | while (target > 0) { |
640 | count = target; | 640 | count = target; |
641 | /* allocating blocks for indirect blocks and direct blocks */ | 641 | /* allocating blocks for indirect blocks and direct blocks */ |
642 | current_block = ext4_new_meta_blocks(handle, inode, | 642 | current_block = ext4_new_meta_blocks(handle, inode, goal, |
643 | goal, &count, err); | 643 | 0, &count, err); |
644 | if (*err) | 644 | if (*err) |
645 | goto failed_out; | 645 | goto failed_out; |
646 | 646 | ||
@@ -1930,7 +1930,7 @@ repeat: | |||
1930 | * We do still charge estimated metadata to the sb though; | 1930 | * We do still charge estimated metadata to the sb though; |
1931 | * we cannot afford to run out of free blocks. | 1931 | * we cannot afford to run out of free blocks. |
1932 | */ | 1932 | */ |
1933 | if (ext4_claim_free_blocks(sbi, md_needed + 1)) { | 1933 | if (ext4_claim_free_blocks(sbi, md_needed + 1, 0)) { |
1934 | dquot_release_reservation_block(inode, 1); | 1934 | dquot_release_reservation_block(inode, 1); |
1935 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { | 1935 | if (ext4_should_retry_alloc(inode->i_sb, &retries)) { |
1936 | yield(); | 1936 | yield(); |
@@ -2796,9 +2796,7 @@ static int write_cache_pages_da(struct address_space *mapping, | |||
2796 | continue; | 2796 | continue; |
2797 | } | 2797 | } |
2798 | 2798 | ||
2799 | if (PageWriteback(page)) | 2799 | wait_on_page_writeback(page); |
2800 | wait_on_page_writeback(page); | ||
2801 | |||
2802 | BUG_ON(PageWriteback(page)); | 2800 | BUG_ON(PageWriteback(page)); |
2803 | 2801 | ||
2804 | if (mpd->next_page != page->index) | 2802 | if (mpd->next_page != page->index) |
@@ -3513,7 +3511,7 @@ retry: | |||
3513 | loff_t end = offset + iov_length(iov, nr_segs); | 3511 | loff_t end = offset + iov_length(iov, nr_segs); |
3514 | 3512 | ||
3515 | if (end > isize) | 3513 | if (end > isize) |
3516 | vmtruncate(inode, isize); | 3514 | ext4_truncate_failed_write(inode); |
3517 | } | 3515 | } |
3518 | } | 3516 | } |
3519 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) | 3517 | if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) |
@@ -3916,9 +3914,30 @@ void ext4_set_aops(struct inode *inode) | |||
3916 | int ext4_block_truncate_page(handle_t *handle, | 3914 | int ext4_block_truncate_page(handle_t *handle, |
3917 | struct address_space *mapping, loff_t from) | 3915 | struct address_space *mapping, loff_t from) |
3918 | { | 3916 | { |
3917 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | ||
3918 | unsigned length; | ||
3919 | unsigned blocksize; | ||
3920 | struct inode *inode = mapping->host; | ||
3921 | |||
3922 | blocksize = inode->i_sb->s_blocksize; | ||
3923 | length = blocksize - (offset & (blocksize - 1)); | ||
3924 | |||
3925 | return ext4_block_zero_page_range(handle, mapping, from, length); | ||
3926 | } | ||
3927 | |||
3928 | /* | ||
3929 | * ext4_block_zero_page_range() zeros out a mapping of length 'length' | ||
3930 | * starting from file offset 'from'. The range to be zero'd must | ||
3931 | * be contained with in one block. If the specified range exceeds | ||
3932 | * the end of the block it will be shortened to end of the block | ||
3933 | * that cooresponds to 'from' | ||
3934 | */ | ||
3935 | int ext4_block_zero_page_range(handle_t *handle, | ||
3936 | struct address_space *mapping, loff_t from, loff_t length) | ||
3937 | { | ||
3919 | ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; | 3938 | ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT; |
3920 | unsigned offset = from & (PAGE_CACHE_SIZE-1); | 3939 | unsigned offset = from & (PAGE_CACHE_SIZE-1); |
3921 | unsigned blocksize, length, pos; | 3940 | unsigned blocksize, max, pos; |
3922 | ext4_lblk_t iblock; | 3941 | ext4_lblk_t iblock; |
3923 | struct inode *inode = mapping->host; | 3942 | struct inode *inode = mapping->host; |
3924 | struct buffer_head *bh; | 3943 | struct buffer_head *bh; |
@@ -3931,7 +3950,15 @@ int ext4_block_truncate_page(handle_t *handle, | |||
3931 | return -EINVAL; | 3950 | return -EINVAL; |
3932 | 3951 | ||
3933 | blocksize = inode->i_sb->s_blocksize; | 3952 | blocksize = inode->i_sb->s_blocksize; |
3934 | length = blocksize - (offset & (blocksize - 1)); | 3953 | max = blocksize - (offset & (blocksize - 1)); |
3954 | |||
3955 | /* | ||
3956 | * correct length if it does not fall between | ||
3957 | * 'from' and the end of the block | ||
3958 | */ | ||
3959 | if (length > max || length < 0) | ||
3960 | length = max; | ||
3961 | |||
3935 | iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); | 3962 | iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits); |
3936 | 3963 | ||
3937 | if (!page_has_buffers(page)) | 3964 | if (!page_has_buffers(page)) |
@@ -4380,8 +4407,6 @@ static void ext4_free_branches(handle_t *handle, struct inode *inode, | |||
4380 | 4407 | ||
4381 | int ext4_can_truncate(struct inode *inode) | 4408 | int ext4_can_truncate(struct inode *inode) |
4382 | { | 4409 | { |
4383 | if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) | ||
4384 | return 0; | ||
4385 | if (S_ISREG(inode->i_mode)) | 4410 | if (S_ISREG(inode->i_mode)) |
4386 | return 1; | 4411 | return 1; |
4387 | if (S_ISDIR(inode->i_mode)) | 4412 | if (S_ISDIR(inode->i_mode)) |
@@ -4392,6 +4417,31 @@ int ext4_can_truncate(struct inode *inode) | |||
4392 | } | 4417 | } |
4393 | 4418 | ||
4394 | /* | 4419 | /* |
4420 | * ext4_punch_hole: punches a hole in a file by releaseing the blocks | ||
4421 | * associated with the given offset and length | ||
4422 | * | ||
4423 | * @inode: File inode | ||
4424 | * @offset: The offset where the hole will begin | ||
4425 | * @len: The length of the hole | ||
4426 | * | ||
4427 | * Returns: 0 on sucess or negative on failure | ||
4428 | */ | ||
4429 | |||
4430 | int ext4_punch_hole(struct file *file, loff_t offset, loff_t length) | ||
4431 | { | ||
4432 | struct inode *inode = file->f_path.dentry->d_inode; | ||
4433 | if (!S_ISREG(inode->i_mode)) | ||
4434 | return -ENOTSUPP; | ||
4435 | |||
4436 | if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) { | ||
4437 | /* TODO: Add support for non extent hole punching */ | ||
4438 | return -ENOTSUPP; | ||
4439 | } | ||
4440 | |||
4441 | return ext4_ext_punch_hole(file, offset, length); | ||
4442 | } | ||
4443 | |||
4444 | /* | ||
4395 | * ext4_truncate() | 4445 | * ext4_truncate() |
4396 | * | 4446 | * |
4397 | * We block out ext4_get_block() block instantiations across the entire | 4447 | * We block out ext4_get_block() block instantiations across the entire |
@@ -4617,7 +4667,7 @@ static int __ext4_get_inode_loc(struct inode *inode, | |||
4617 | /* | 4667 | /* |
4618 | * Figure out the offset within the block group inode table | 4668 | * Figure out the offset within the block group inode table |
4619 | */ | 4669 | */ |
4620 | inodes_per_block = (EXT4_BLOCK_SIZE(sb) / EXT4_INODE_SIZE(sb)); | 4670 | inodes_per_block = EXT4_SB(sb)->s_inodes_per_block; |
4621 | inode_offset = ((inode->i_ino - 1) % | 4671 | inode_offset = ((inode->i_ino - 1) % |
4622 | EXT4_INODES_PER_GROUP(sb)); | 4672 | EXT4_INODES_PER_GROUP(sb)); |
4623 | block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); | 4673 | block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block); |
@@ -5311,8 +5361,7 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | |||
5311 | 5361 | ||
5312 | if (S_ISREG(inode->i_mode) && | 5362 | if (S_ISREG(inode->i_mode) && |
5313 | attr->ia_valid & ATTR_SIZE && | 5363 | attr->ia_valid & ATTR_SIZE && |
5314 | (attr->ia_size < inode->i_size || | 5364 | (attr->ia_size < inode->i_size)) { |
5315 | (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)))) { | ||
5316 | handle_t *handle; | 5365 | handle_t *handle; |
5317 | 5366 | ||
5318 | handle = ext4_journal_start(inode, 3); | 5367 | handle = ext4_journal_start(inode, 3); |
@@ -5346,14 +5395,15 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr) | |||
5346 | goto err_out; | 5395 | goto err_out; |
5347 | } | 5396 | } |
5348 | } | 5397 | } |
5349 | /* ext4_truncate will clear the flag */ | ||
5350 | if ((ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS))) | ||
5351 | ext4_truncate(inode); | ||
5352 | } | 5398 | } |
5353 | 5399 | ||
5354 | if ((attr->ia_valid & ATTR_SIZE) && | 5400 | if (attr->ia_valid & ATTR_SIZE) { |
5355 | attr->ia_size != i_size_read(inode)) | 5401 | if (attr->ia_size != i_size_read(inode)) { |
5356 | rc = vmtruncate(inode, attr->ia_size); | 5402 | truncate_setsize(inode, attr->ia_size); |
5403 | ext4_truncate(inode); | ||
5404 | } else if (ext4_test_inode_flag(inode, EXT4_INODE_EOFBLOCKS)) | ||
5405 | ext4_truncate(inode); | ||
5406 | } | ||
5357 | 5407 | ||
5358 | if (!rc) { | 5408 | if (!rc) { |
5359 | setattr_copy(inode, attr); | 5409 | setattr_copy(inode, attr); |
@@ -5811,15 +5861,19 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
5811 | goto out_unlock; | 5861 | goto out_unlock; |
5812 | } | 5862 | } |
5813 | ret = 0; | 5863 | ret = 0; |
5814 | if (PageMappedToDisk(page)) | 5864 | |
5815 | goto out_unlock; | 5865 | lock_page(page); |
5866 | wait_on_page_writeback(page); | ||
5867 | if (PageMappedToDisk(page)) { | ||
5868 | up_read(&inode->i_alloc_sem); | ||
5869 | return VM_FAULT_LOCKED; | ||
5870 | } | ||
5816 | 5871 | ||
5817 | if (page->index == size >> PAGE_CACHE_SHIFT) | 5872 | if (page->index == size >> PAGE_CACHE_SHIFT) |
5818 | len = size & ~PAGE_CACHE_MASK; | 5873 | len = size & ~PAGE_CACHE_MASK; |
5819 | else | 5874 | else |
5820 | len = PAGE_CACHE_SIZE; | 5875 | len = PAGE_CACHE_SIZE; |
5821 | 5876 | ||
5822 | lock_page(page); | ||
5823 | /* | 5877 | /* |
5824 | * return if we have all the buffers mapped. This avoid | 5878 | * return if we have all the buffers mapped. This avoid |
5825 | * the need to call write_begin/write_end which does a | 5879 | * the need to call write_begin/write_end which does a |
@@ -5829,8 +5883,8 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
5829 | if (page_has_buffers(page)) { | 5883 | if (page_has_buffers(page)) { |
5830 | if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, | 5884 | if (!walk_page_buffers(NULL, page_buffers(page), 0, len, NULL, |
5831 | ext4_bh_unmapped)) { | 5885 | ext4_bh_unmapped)) { |
5832 | unlock_page(page); | 5886 | up_read(&inode->i_alloc_sem); |
5833 | goto out_unlock; | 5887 | return VM_FAULT_LOCKED; |
5834 | } | 5888 | } |
5835 | } | 5889 | } |
5836 | unlock_page(page); | 5890 | unlock_page(page); |
@@ -5850,6 +5904,16 @@ int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
5850 | if (ret < 0) | 5904 | if (ret < 0) |
5851 | goto out_unlock; | 5905 | goto out_unlock; |
5852 | ret = 0; | 5906 | ret = 0; |
5907 | |||
5908 | /* | ||
5909 | * write_begin/end might have created a dirty page and someone | ||
5910 | * could wander in and start the IO. Make sure that hasn't | ||
5911 | * happened. | ||
5912 | */ | ||
5913 | lock_page(page); | ||
5914 | wait_on_page_writeback(page); | ||
5915 | up_read(&inode->i_alloc_sem); | ||
5916 | return VM_FAULT_LOCKED; | ||
5853 | out_unlock: | 5917 | out_unlock: |
5854 | if (ret) | 5918 | if (ret) |
5855 | ret = VM_FAULT_SIGBUS; | 5919 | ret = VM_FAULT_SIGBUS; |
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index d8a16eecf1d5..859f2ae8864e 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c | |||
@@ -787,6 +787,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore) | |||
787 | struct inode *inode; | 787 | struct inode *inode; |
788 | char *data; | 788 | char *data; |
789 | char *bitmap; | 789 | char *bitmap; |
790 | struct ext4_group_info *grinfo; | ||
790 | 791 | ||
791 | mb_debug(1, "init page %lu\n", page->index); | 792 | mb_debug(1, "init page %lu\n", page->index); |
792 | 793 | ||
@@ -819,6 +820,18 @@ static int ext4_mb_init_cache(struct page *page, char *incore) | |||
819 | if (first_group + i >= ngroups) | 820 | if (first_group + i >= ngroups) |
820 | break; | 821 | break; |
821 | 822 | ||
823 | grinfo = ext4_get_group_info(sb, first_group + i); | ||
824 | /* | ||
825 | * If page is uptodate then we came here after online resize | ||
826 | * which added some new uninitialized group info structs, so | ||
827 | * we must skip all initialized uptodate buddies on the page, | ||
828 | * which may be currently in use by an allocating task. | ||
829 | */ | ||
830 | if (PageUptodate(page) && !EXT4_MB_GRP_NEED_INIT(grinfo)) { | ||
831 | bh[i] = NULL; | ||
832 | continue; | ||
833 | } | ||
834 | |||
822 | err = -EIO; | 835 | err = -EIO; |
823 | desc = ext4_get_group_desc(sb, first_group + i, NULL); | 836 | desc = ext4_get_group_desc(sb, first_group + i, NULL); |
824 | if (desc == NULL) | 837 | if (desc == NULL) |
@@ -871,26 +884,28 @@ static int ext4_mb_init_cache(struct page *page, char *incore) | |||
871 | } | 884 | } |
872 | 885 | ||
873 | /* wait for I/O completion */ | 886 | /* wait for I/O completion */ |
874 | for (i = 0; i < groups_per_page && bh[i]; i++) | 887 | for (i = 0; i < groups_per_page; i++) |
875 | wait_on_buffer(bh[i]); | 888 | if (bh[i]) |
889 | wait_on_buffer(bh[i]); | ||
876 | 890 | ||
877 | err = -EIO; | 891 | err = -EIO; |
878 | for (i = 0; i < groups_per_page && bh[i]; i++) | 892 | for (i = 0; i < groups_per_page; i++) |
879 | if (!buffer_uptodate(bh[i])) | 893 | if (bh[i] && !buffer_uptodate(bh[i])) |
880 | goto out; | 894 | goto out; |
881 | 895 | ||
882 | err = 0; | 896 | err = 0; |
883 | first_block = page->index * blocks_per_page; | 897 | first_block = page->index * blocks_per_page; |
884 | /* init the page */ | ||
885 | memset(page_address(page), 0xff, PAGE_CACHE_SIZE); | ||
886 | for (i = 0; i < blocks_per_page; i++) { | 898 | for (i = 0; i < blocks_per_page; i++) { |
887 | int group; | 899 | int group; |
888 | struct ext4_group_info *grinfo; | ||
889 | 900 | ||
890 | group = (first_block + i) >> 1; | 901 | group = (first_block + i) >> 1; |
891 | if (group >= ngroups) | 902 | if (group >= ngroups) |
892 | break; | 903 | break; |
893 | 904 | ||
905 | if (!bh[group - first_group]) | ||
906 | /* skip initialized uptodate buddy */ | ||
907 | continue; | ||
908 | |||
894 | /* | 909 | /* |
895 | * data carry information regarding this | 910 | * data carry information regarding this |
896 | * particular group in the format specified | 911 | * particular group in the format specified |
@@ -919,6 +934,8 @@ static int ext4_mb_init_cache(struct page *page, char *incore) | |||
919 | * incore got set to the group block bitmap below | 934 | * incore got set to the group block bitmap below |
920 | */ | 935 | */ |
921 | ext4_lock_group(sb, group); | 936 | ext4_lock_group(sb, group); |
937 | /* init the buddy */ | ||
938 | memset(data, 0xff, blocksize); | ||
922 | ext4_mb_generate_buddy(sb, data, incore, group); | 939 | ext4_mb_generate_buddy(sb, data, incore, group); |
923 | ext4_unlock_group(sb, group); | 940 | ext4_unlock_group(sb, group); |
924 | incore = NULL; | 941 | incore = NULL; |
@@ -948,7 +965,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore) | |||
948 | 965 | ||
949 | out: | 966 | out: |
950 | if (bh) { | 967 | if (bh) { |
951 | for (i = 0; i < groups_per_page && bh[i]; i++) | 968 | for (i = 0; i < groups_per_page; i++) |
952 | brelse(bh[i]); | 969 | brelse(bh[i]); |
953 | if (bh != &bhs) | 970 | if (bh != &bhs) |
954 | kfree(bh); | 971 | kfree(bh); |
@@ -957,22 +974,21 @@ out: | |||
957 | } | 974 | } |
958 | 975 | ||
959 | /* | 976 | /* |
960 | * lock the group_info alloc_sem of all the groups | 977 | * Lock the buddy and bitmap pages. This make sure other parallel init_group |
961 | * belonging to the same buddy cache page. This | 978 | * on the same buddy page doesn't happen whild holding the buddy page lock. |
962 | * make sure other parallel operation on the buddy | 979 | * Return locked buddy and bitmap pages on e4b struct. If buddy and bitmap |
963 | * cache doesn't happen whild holding the buddy cache | 980 | * are on the same page e4b->bd_buddy_page is NULL and return value is 0. |
964 | * lock | ||
965 | */ | 981 | */ |
966 | static int ext4_mb_get_buddy_cache_lock(struct super_block *sb, | 982 | static int ext4_mb_get_buddy_page_lock(struct super_block *sb, |
967 | ext4_group_t group) | 983 | ext4_group_t group, struct ext4_buddy *e4b) |
968 | { | 984 | { |
969 | int i; | 985 | struct inode *inode = EXT4_SB(sb)->s_buddy_cache; |
970 | int block, pnum; | 986 | int block, pnum, poff; |
971 | int blocks_per_page; | 987 | int blocks_per_page; |
972 | int groups_per_page; | 988 | struct page *page; |
973 | ext4_group_t ngroups = ext4_get_groups_count(sb); | 989 | |
974 | ext4_group_t first_group; | 990 | e4b->bd_buddy_page = NULL; |
975 | struct ext4_group_info *grp; | 991 | e4b->bd_bitmap_page = NULL; |
976 | 992 | ||
977 | blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; | 993 | blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; |
978 | /* | 994 | /* |
@@ -982,57 +998,40 @@ static int ext4_mb_get_buddy_cache_lock(struct super_block *sb, | |||
982 | */ | 998 | */ |
983 | block = group * 2; | 999 | block = group * 2; |
984 | pnum = block / blocks_per_page; | 1000 | pnum = block / blocks_per_page; |
985 | first_group = pnum * blocks_per_page / 2; | 1001 | poff = block % blocks_per_page; |
986 | 1002 | page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); | |
987 | groups_per_page = blocks_per_page >> 1; | 1003 | if (!page) |
988 | if (groups_per_page == 0) | 1004 | return -EIO; |
989 | groups_per_page = 1; | 1005 | BUG_ON(page->mapping != inode->i_mapping); |
990 | /* read all groups the page covers into the cache */ | 1006 | e4b->bd_bitmap_page = page; |
991 | for (i = 0; i < groups_per_page; i++) { | 1007 | e4b->bd_bitmap = page_address(page) + (poff * sb->s_blocksize); |
992 | 1008 | ||
993 | if ((first_group + i) >= ngroups) | 1009 | if (blocks_per_page >= 2) { |
994 | break; | 1010 | /* buddy and bitmap are on the same page */ |
995 | grp = ext4_get_group_info(sb, first_group + i); | 1011 | return 0; |
996 | /* take all groups write allocation | ||
997 | * semaphore. This make sure there is | ||
998 | * no block allocation going on in any | ||
999 | * of that groups | ||
1000 | */ | ||
1001 | down_write_nested(&grp->alloc_sem, i); | ||
1002 | } | 1012 | } |
1003 | return i; | 1013 | |
1014 | block++; | ||
1015 | pnum = block / blocks_per_page; | ||
1016 | poff = block % blocks_per_page; | ||
1017 | page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); | ||
1018 | if (!page) | ||
1019 | return -EIO; | ||
1020 | BUG_ON(page->mapping != inode->i_mapping); | ||
1021 | e4b->bd_buddy_page = page; | ||
1022 | return 0; | ||
1004 | } | 1023 | } |
1005 | 1024 | ||
1006 | static void ext4_mb_put_buddy_cache_lock(struct super_block *sb, | 1025 | static void ext4_mb_put_buddy_page_lock(struct ext4_buddy *e4b) |
1007 | ext4_group_t group, int locked_group) | ||
1008 | { | 1026 | { |
1009 | int i; | 1027 | if (e4b->bd_bitmap_page) { |
1010 | int block, pnum; | 1028 | unlock_page(e4b->bd_bitmap_page); |
1011 | int blocks_per_page; | 1029 | page_cache_release(e4b->bd_bitmap_page); |
1012 | ext4_group_t first_group; | 1030 | } |
1013 | struct ext4_group_info *grp; | 1031 | if (e4b->bd_buddy_page) { |
1014 | 1032 | unlock_page(e4b->bd_buddy_page); | |
1015 | blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; | 1033 | page_cache_release(e4b->bd_buddy_page); |
1016 | /* | ||
1017 | * the buddy cache inode stores the block bitmap | ||
1018 | * and buddy information in consecutive blocks. | ||
1019 | * So for each group we need two blocks. | ||
1020 | */ | ||
1021 | block = group * 2; | ||
1022 | pnum = block / blocks_per_page; | ||
1023 | first_group = pnum * blocks_per_page / 2; | ||
1024 | /* release locks on all the groups */ | ||
1025 | for (i = 0; i < locked_group; i++) { | ||
1026 | |||
1027 | grp = ext4_get_group_info(sb, first_group + i); | ||
1028 | /* take all groups write allocation | ||
1029 | * semaphore. This make sure there is | ||
1030 | * no block allocation going on in any | ||
1031 | * of that groups | ||
1032 | */ | ||
1033 | up_write(&grp->alloc_sem); | ||
1034 | } | 1034 | } |
1035 | |||
1036 | } | 1035 | } |
1037 | 1036 | ||
1038 | /* | 1037 | /* |
@@ -1044,93 +1043,60 @@ static noinline_for_stack | |||
1044 | int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) | 1043 | int ext4_mb_init_group(struct super_block *sb, ext4_group_t group) |
1045 | { | 1044 | { |
1046 | 1045 | ||
1047 | int ret = 0; | ||
1048 | void *bitmap; | ||
1049 | int blocks_per_page; | ||
1050 | int block, pnum, poff; | ||
1051 | int num_grp_locked = 0; | ||
1052 | struct ext4_group_info *this_grp; | 1046 | struct ext4_group_info *this_grp; |
1053 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 1047 | struct ext4_buddy e4b; |
1054 | struct inode *inode = sbi->s_buddy_cache; | 1048 | struct page *page; |
1055 | struct page *page = NULL, *bitmap_page = NULL; | 1049 | int ret = 0; |
1056 | 1050 | ||
1057 | mb_debug(1, "init group %u\n", group); | 1051 | mb_debug(1, "init group %u\n", group); |
1058 | blocks_per_page = PAGE_CACHE_SIZE / sb->s_blocksize; | ||
1059 | this_grp = ext4_get_group_info(sb, group); | 1052 | this_grp = ext4_get_group_info(sb, group); |
1060 | /* | 1053 | /* |
1061 | * This ensures that we don't reinit the buddy cache | 1054 | * This ensures that we don't reinit the buddy cache |
1062 | * page which map to the group from which we are already | 1055 | * page which map to the group from which we are already |
1063 | * allocating. If we are looking at the buddy cache we would | 1056 | * allocating. If we are looking at the buddy cache we would |
1064 | * have taken a reference using ext4_mb_load_buddy and that | 1057 | * have taken a reference using ext4_mb_load_buddy and that |
1065 | * would have taken the alloc_sem lock. | 1058 | * would have pinned buddy page to page cache. |
1066 | */ | 1059 | */ |
1067 | num_grp_locked = ext4_mb_get_buddy_cache_lock(sb, group); | 1060 | ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b); |
1068 | if (!EXT4_MB_GRP_NEED_INIT(this_grp)) { | 1061 | if (ret || !EXT4_MB_GRP_NEED_INIT(this_grp)) { |
1069 | /* | 1062 | /* |
1070 | * somebody initialized the group | 1063 | * somebody initialized the group |
1071 | * return without doing anything | 1064 | * return without doing anything |
1072 | */ | 1065 | */ |
1073 | ret = 0; | ||
1074 | goto err; | 1066 | goto err; |
1075 | } | 1067 | } |
1076 | /* | 1068 | |
1077 | * the buddy cache inode stores the block bitmap | 1069 | page = e4b.bd_bitmap_page; |
1078 | * and buddy information in consecutive blocks. | 1070 | ret = ext4_mb_init_cache(page, NULL); |
1079 | * So for each group we need two blocks. | 1071 | if (ret) |
1080 | */ | 1072 | goto err; |
1081 | block = group * 2; | 1073 | if (!PageUptodate(page)) { |
1082 | pnum = block / blocks_per_page; | ||
1083 | poff = block % blocks_per_page; | ||
1084 | page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); | ||
1085 | if (page) { | ||
1086 | BUG_ON(page->mapping != inode->i_mapping); | ||
1087 | ret = ext4_mb_init_cache(page, NULL); | ||
1088 | if (ret) { | ||
1089 | unlock_page(page); | ||
1090 | goto err; | ||
1091 | } | ||
1092 | unlock_page(page); | ||
1093 | } | ||
1094 | if (page == NULL || !PageUptodate(page)) { | ||
1095 | ret = -EIO; | 1074 | ret = -EIO; |
1096 | goto err; | 1075 | goto err; |
1097 | } | 1076 | } |
1098 | mark_page_accessed(page); | 1077 | mark_page_accessed(page); |
1099 | bitmap_page = page; | ||
1100 | bitmap = page_address(page) + (poff * sb->s_blocksize); | ||
1101 | 1078 | ||
1102 | /* init buddy cache */ | 1079 | if (e4b.bd_buddy_page == NULL) { |
1103 | block++; | ||
1104 | pnum = block / blocks_per_page; | ||
1105 | poff = block % blocks_per_page; | ||
1106 | page = find_or_create_page(inode->i_mapping, pnum, GFP_NOFS); | ||
1107 | if (page == bitmap_page) { | ||
1108 | /* | 1080 | /* |
1109 | * If both the bitmap and buddy are in | 1081 | * If both the bitmap and buddy are in |
1110 | * the same page we don't need to force | 1082 | * the same page we don't need to force |
1111 | * init the buddy | 1083 | * init the buddy |
1112 | */ | 1084 | */ |
1113 | unlock_page(page); | 1085 | ret = 0; |
1114 | } else if (page) { | 1086 | goto err; |
1115 | BUG_ON(page->mapping != inode->i_mapping); | ||
1116 | ret = ext4_mb_init_cache(page, bitmap); | ||
1117 | if (ret) { | ||
1118 | unlock_page(page); | ||
1119 | goto err; | ||
1120 | } | ||
1121 | unlock_page(page); | ||
1122 | } | 1087 | } |
1123 | if (page == NULL || !PageUptodate(page)) { | 1088 | /* init buddy cache */ |
1089 | page = e4b.bd_buddy_page; | ||
1090 | ret = ext4_mb_init_cache(page, e4b.bd_bitmap); | ||
1091 | if (ret) | ||
1092 | goto err; | ||
1093 | if (!PageUptodate(page)) { | ||
1124 | ret = -EIO; | 1094 | ret = -EIO; |
1125 | goto err; | 1095 | goto err; |
1126 | } | 1096 | } |
1127 | mark_page_accessed(page); | 1097 | mark_page_accessed(page); |
1128 | err: | 1098 | err: |
1129 | ext4_mb_put_buddy_cache_lock(sb, group, num_grp_locked); | 1099 | ext4_mb_put_buddy_page_lock(&e4b); |
1130 | if (bitmap_page) | ||
1131 | page_cache_release(bitmap_page); | ||
1132 | if (page) | ||
1133 | page_cache_release(page); | ||
1134 | return ret; | 1100 | return ret; |
1135 | } | 1101 | } |
1136 | 1102 | ||
@@ -1164,24 +1130,8 @@ ext4_mb_load_buddy(struct super_block *sb, ext4_group_t group, | |||
1164 | e4b->bd_group = group; | 1130 | e4b->bd_group = group; |
1165 | e4b->bd_buddy_page = NULL; | 1131 | e4b->bd_buddy_page = NULL; |
1166 | e4b->bd_bitmap_page = NULL; | 1132 | e4b->bd_bitmap_page = NULL; |
1167 | e4b->alloc_semp = &grp->alloc_sem; | ||
1168 | |||
1169 | /* Take the read lock on the group alloc | ||
1170 | * sem. This would make sure a parallel | ||
1171 | * ext4_mb_init_group happening on other | ||
1172 | * groups mapped by the page is blocked | ||
1173 | * till we are done with allocation | ||
1174 | */ | ||
1175 | repeat_load_buddy: | ||
1176 | down_read(e4b->alloc_semp); | ||
1177 | 1133 | ||
1178 | if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { | 1134 | if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { |
1179 | /* we need to check for group need init flag | ||
1180 | * with alloc_semp held so that we can be sure | ||
1181 | * that new blocks didn't get added to the group | ||
1182 | * when we are loading the buddy cache | ||
1183 | */ | ||
1184 | up_read(e4b->alloc_semp); | ||
1185 | /* | 1135 | /* |
1186 | * we need full data about the group | 1136 | * we need full data about the group |
1187 | * to make a good selection | 1137 | * to make a good selection |
@@ -1189,7 +1139,6 @@ repeat_load_buddy: | |||
1189 | ret = ext4_mb_init_group(sb, group); | 1139 | ret = ext4_mb_init_group(sb, group); |
1190 | if (ret) | 1140 | if (ret) |
1191 | return ret; | 1141 | return ret; |
1192 | goto repeat_load_buddy; | ||
1193 | } | 1142 | } |
1194 | 1143 | ||
1195 | /* | 1144 | /* |
@@ -1273,15 +1222,14 @@ repeat_load_buddy: | |||
1273 | return 0; | 1222 | return 0; |
1274 | 1223 | ||
1275 | err: | 1224 | err: |
1225 | if (page) | ||
1226 | page_cache_release(page); | ||
1276 | if (e4b->bd_bitmap_page) | 1227 | if (e4b->bd_bitmap_page) |
1277 | page_cache_release(e4b->bd_bitmap_page); | 1228 | page_cache_release(e4b->bd_bitmap_page); |
1278 | if (e4b->bd_buddy_page) | 1229 | if (e4b->bd_buddy_page) |
1279 | page_cache_release(e4b->bd_buddy_page); | 1230 | page_cache_release(e4b->bd_buddy_page); |
1280 | e4b->bd_buddy = NULL; | 1231 | e4b->bd_buddy = NULL; |
1281 | e4b->bd_bitmap = NULL; | 1232 | e4b->bd_bitmap = NULL; |
1282 | |||
1283 | /* Done with the buddy cache */ | ||
1284 | up_read(e4b->alloc_semp); | ||
1285 | return ret; | 1233 | return ret; |
1286 | } | 1234 | } |
1287 | 1235 | ||
@@ -1291,9 +1239,6 @@ static void ext4_mb_unload_buddy(struct ext4_buddy *e4b) | |||
1291 | page_cache_release(e4b->bd_bitmap_page); | 1239 | page_cache_release(e4b->bd_bitmap_page); |
1292 | if (e4b->bd_buddy_page) | 1240 | if (e4b->bd_buddy_page) |
1293 | page_cache_release(e4b->bd_buddy_page); | 1241 | page_cache_release(e4b->bd_buddy_page); |
1294 | /* Done with the buddy cache */ | ||
1295 | if (e4b->alloc_semp) | ||
1296 | up_read(e4b->alloc_semp); | ||
1297 | } | 1242 | } |
1298 | 1243 | ||
1299 | 1244 | ||
@@ -1606,9 +1551,6 @@ static void ext4_mb_use_best_found(struct ext4_allocation_context *ac, | |||
1606 | get_page(ac->ac_bitmap_page); | 1551 | get_page(ac->ac_bitmap_page); |
1607 | ac->ac_buddy_page = e4b->bd_buddy_page; | 1552 | ac->ac_buddy_page = e4b->bd_buddy_page; |
1608 | get_page(ac->ac_buddy_page); | 1553 | get_page(ac->ac_buddy_page); |
1609 | /* on allocation we use ac to track the held semaphore */ | ||
1610 | ac->alloc_semp = e4b->alloc_semp; | ||
1611 | e4b->alloc_semp = NULL; | ||
1612 | /* store last allocated for subsequent stream allocation */ | 1554 | /* store last allocated for subsequent stream allocation */ |
1613 | if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { | 1555 | if (ac->ac_flags & EXT4_MB_STREAM_ALLOC) { |
1614 | spin_lock(&sbi->s_md_lock); | 1556 | spin_lock(&sbi->s_md_lock); |
@@ -2659,7 +2601,7 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) | |||
2659 | struct super_block *sb = journal->j_private; | 2601 | struct super_block *sb = journal->j_private; |
2660 | struct ext4_buddy e4b; | 2602 | struct ext4_buddy e4b; |
2661 | struct ext4_group_info *db; | 2603 | struct ext4_group_info *db; |
2662 | int err, ret, count = 0, count2 = 0; | 2604 | int err, count = 0, count2 = 0; |
2663 | struct ext4_free_data *entry; | 2605 | struct ext4_free_data *entry; |
2664 | struct list_head *l, *ltmp; | 2606 | struct list_head *l, *ltmp; |
2665 | 2607 | ||
@@ -2669,15 +2611,9 @@ static void release_blocks_on_commit(journal_t *journal, transaction_t *txn) | |||
2669 | mb_debug(1, "gonna free %u blocks in group %u (0x%p):", | 2611 | mb_debug(1, "gonna free %u blocks in group %u (0x%p):", |
2670 | entry->count, entry->group, entry); | 2612 | entry->count, entry->group, entry); |
2671 | 2613 | ||
2672 | if (test_opt(sb, DISCARD)) { | 2614 | if (test_opt(sb, DISCARD)) |
2673 | ret = ext4_issue_discard(sb, entry->group, | 2615 | ext4_issue_discard(sb, entry->group, |
2674 | entry->start_blk, entry->count); | 2616 | entry->start_blk, entry->count); |
2675 | if (unlikely(ret == -EOPNOTSUPP)) { | ||
2676 | ext4_warning(sb, "discard not supported, " | ||
2677 | "disabling"); | ||
2678 | clear_opt(sb, DISCARD); | ||
2679 | } | ||
2680 | } | ||
2681 | 2617 | ||
2682 | err = ext4_mb_load_buddy(sb, entry->group, &e4b); | 2618 | err = ext4_mb_load_buddy(sb, entry->group, &e4b); |
2683 | /* we expect to find existing buddy because it's pinned */ | 2619 | /* we expect to find existing buddy because it's pinned */ |
@@ -4226,15 +4162,12 @@ static int ext4_mb_release_context(struct ext4_allocation_context *ac) | |||
4226 | spin_unlock(&pa->pa_lock); | 4162 | spin_unlock(&pa->pa_lock); |
4227 | } | 4163 | } |
4228 | } | 4164 | } |
4229 | if (ac->alloc_semp) | ||
4230 | up_read(ac->alloc_semp); | ||
4231 | if (pa) { | 4165 | if (pa) { |
4232 | /* | 4166 | /* |
4233 | * We want to add the pa to the right bucket. | 4167 | * We want to add the pa to the right bucket. |
4234 | * Remove it from the list and while adding | 4168 | * Remove it from the list and while adding |
4235 | * make sure the list to which we are adding | 4169 | * make sure the list to which we are adding |
4236 | * doesn't grow big. We need to release | 4170 | * doesn't grow big. |
4237 | * alloc_semp before calling ext4_mb_add_n_trim() | ||
4238 | */ | 4171 | */ |
4239 | if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { | 4172 | if ((pa->pa_type == MB_GROUP_PA) && likely(pa->pa_free)) { |
4240 | spin_lock(pa->pa_obj_lock); | 4173 | spin_lock(pa->pa_obj_lock); |
@@ -4303,7 +4236,9 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, | |||
4303 | * there is enough free blocks to do block allocation | 4236 | * there is enough free blocks to do block allocation |
4304 | * and verify allocation doesn't exceed the quota limits. | 4237 | * and verify allocation doesn't exceed the quota limits. |
4305 | */ | 4238 | */ |
4306 | while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) { | 4239 | while (ar->len && |
4240 | ext4_claim_free_blocks(sbi, ar->len, ar->flags)) { | ||
4241 | |||
4307 | /* let others to free the space */ | 4242 | /* let others to free the space */ |
4308 | yield(); | 4243 | yield(); |
4309 | ar->len = ar->len >> 1; | 4244 | ar->len = ar->len >> 1; |
@@ -4313,9 +4248,15 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle, | |||
4313 | return 0; | 4248 | return 0; |
4314 | } | 4249 | } |
4315 | reserv_blks = ar->len; | 4250 | reserv_blks = ar->len; |
4316 | while (ar->len && dquot_alloc_block(ar->inode, ar->len)) { | 4251 | if (ar->flags & EXT4_MB_USE_ROOT_BLOCKS) { |
4317 | ar->flags |= EXT4_MB_HINT_NOPREALLOC; | 4252 | dquot_alloc_block_nofail(ar->inode, ar->len); |
4318 | ar->len--; | 4253 | } else { |
4254 | while (ar->len && | ||
4255 | dquot_alloc_block(ar->inode, ar->len)) { | ||
4256 | |||
4257 | ar->flags |= EXT4_MB_HINT_NOPREALLOC; | ||
4258 | ar->len--; | ||
4259 | } | ||
4319 | } | 4260 | } |
4320 | inquota = ar->len; | 4261 | inquota = ar->len; |
4321 | if (ar->len == 0) { | 4262 | if (ar->len == 0) { |
@@ -4704,6 +4645,127 @@ error_return: | |||
4704 | } | 4645 | } |
4705 | 4646 | ||
4706 | /** | 4647 | /** |
4648 | * ext4_add_groupblocks() -- Add given blocks to an existing group | ||
4649 | * @handle: handle to this transaction | ||
4650 | * @sb: super block | ||
4651 | * @block: start physcial block to add to the block group | ||
4652 | * @count: number of blocks to free | ||
4653 | * | ||
4654 | * This marks the blocks as free in the bitmap and buddy. | ||
4655 | */ | ||
4656 | void ext4_add_groupblocks(handle_t *handle, struct super_block *sb, | ||
4657 | ext4_fsblk_t block, unsigned long count) | ||
4658 | { | ||
4659 | struct buffer_head *bitmap_bh = NULL; | ||
4660 | struct buffer_head *gd_bh; | ||
4661 | ext4_group_t block_group; | ||
4662 | ext4_grpblk_t bit; | ||
4663 | unsigned int i; | ||
4664 | struct ext4_group_desc *desc; | ||
4665 | struct ext4_sb_info *sbi = EXT4_SB(sb); | ||
4666 | struct ext4_buddy e4b; | ||
4667 | int err = 0, ret, blk_free_count; | ||
4668 | ext4_grpblk_t blocks_freed; | ||
4669 | struct ext4_group_info *grp; | ||
4670 | |||
4671 | ext4_debug("Adding block(s) %llu-%llu\n", block, block + count - 1); | ||
4672 | |||
4673 | ext4_get_group_no_and_offset(sb, block, &block_group, &bit); | ||
4674 | grp = ext4_get_group_info(sb, block_group); | ||
4675 | /* | ||
4676 | * Check to see if we are freeing blocks across a group | ||
4677 | * boundary. | ||
4678 | */ | ||
4679 | if (bit + count > EXT4_BLOCKS_PER_GROUP(sb)) | ||
4680 | goto error_return; | ||
4681 | |||
4682 | bitmap_bh = ext4_read_block_bitmap(sb, block_group); | ||
4683 | if (!bitmap_bh) | ||
4684 | goto error_return; | ||
4685 | desc = ext4_get_group_desc(sb, block_group, &gd_bh); | ||
4686 | if (!desc) | ||
4687 | goto error_return; | ||
4688 | |||
4689 | if (in_range(ext4_block_bitmap(sb, desc), block, count) || | ||
4690 | in_range(ext4_inode_bitmap(sb, desc), block, count) || | ||
4691 | in_range(block, ext4_inode_table(sb, desc), sbi->s_itb_per_group) || | ||
4692 | in_range(block + count - 1, ext4_inode_table(sb, desc), | ||
4693 | sbi->s_itb_per_group)) { | ||
4694 | ext4_error(sb, "Adding blocks in system zones - " | ||
4695 | "Block = %llu, count = %lu", | ||
4696 | block, count); | ||
4697 | goto error_return; | ||
4698 | } | ||
4699 | |||
4700 | BUFFER_TRACE(bitmap_bh, "getting write access"); | ||
4701 | err = ext4_journal_get_write_access(handle, bitmap_bh); | ||
4702 | if (err) | ||
4703 | goto error_return; | ||
4704 | |||
4705 | /* | ||
4706 | * We are about to modify some metadata. Call the journal APIs | ||
4707 | * to unshare ->b_data if a currently-committing transaction is | ||
4708 | * using it | ||
4709 | */ | ||
4710 | BUFFER_TRACE(gd_bh, "get_write_access"); | ||
4711 | err = ext4_journal_get_write_access(handle, gd_bh); | ||
4712 | if (err) | ||
4713 | goto error_return; | ||
4714 | |||
4715 | for (i = 0, blocks_freed = 0; i < count; i++) { | ||
4716 | BUFFER_TRACE(bitmap_bh, "clear bit"); | ||
4717 | if (!mb_test_bit(bit + i, bitmap_bh->b_data)) { | ||
4718 | ext4_error(sb, "bit already cleared for block %llu", | ||
4719 | (ext4_fsblk_t)(block + i)); | ||
4720 | BUFFER_TRACE(bitmap_bh, "bit already cleared"); | ||
4721 | } else { | ||
4722 | blocks_freed++; | ||
4723 | } | ||
4724 | } | ||
4725 | |||
4726 | err = ext4_mb_load_buddy(sb, block_group, &e4b); | ||
4727 | if (err) | ||
4728 | goto error_return; | ||
4729 | |||
4730 | /* | ||
4731 | * need to update group_info->bb_free and bitmap | ||
4732 | * with group lock held. generate_buddy look at | ||
4733 | * them with group lock_held | ||
4734 | */ | ||
4735 | ext4_lock_group(sb, block_group); | ||
4736 | mb_clear_bits(bitmap_bh->b_data, bit, count); | ||
4737 | mb_free_blocks(NULL, &e4b, bit, count); | ||
4738 | blk_free_count = blocks_freed + ext4_free_blks_count(sb, desc); | ||
4739 | ext4_free_blks_set(sb, desc, blk_free_count); | ||
4740 | desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); | ||
4741 | ext4_unlock_group(sb, block_group); | ||
4742 | percpu_counter_add(&sbi->s_freeblocks_counter, blocks_freed); | ||
4743 | |||
4744 | if (sbi->s_log_groups_per_flex) { | ||
4745 | ext4_group_t flex_group = ext4_flex_group(sbi, block_group); | ||
4746 | atomic_add(blocks_freed, | ||
4747 | &sbi->s_flex_groups[flex_group].free_blocks); | ||
4748 | } | ||
4749 | |||
4750 | ext4_mb_unload_buddy(&e4b); | ||
4751 | |||
4752 | /* We dirtied the bitmap block */ | ||
4753 | BUFFER_TRACE(bitmap_bh, "dirtied bitmap block"); | ||
4754 | err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh); | ||
4755 | |||
4756 | /* And the group descriptor block */ | ||
4757 | BUFFER_TRACE(gd_bh, "dirtied group descriptor block"); | ||
4758 | ret = ext4_handle_dirty_metadata(handle, NULL, gd_bh); | ||
4759 | if (!err) | ||
4760 | err = ret; | ||
4761 | |||
4762 | error_return: | ||
4763 | brelse(bitmap_bh); | ||
4764 | ext4_std_error(sb, err); | ||
4765 | return; | ||
4766 | } | ||
4767 | |||
4768 | /** | ||
4707 | * ext4_trim_extent -- function to TRIM one single free extent in the group | 4769 | * ext4_trim_extent -- function to TRIM one single free extent in the group |
4708 | * @sb: super block for the file system | 4770 | * @sb: super block for the file system |
4709 | * @start: starting block of the free extent in the alloc. group | 4771 | * @start: starting block of the free extent in the alloc. group |
@@ -4715,11 +4777,10 @@ error_return: | |||
4715 | * one will allocate those blocks, mark it as used in buddy bitmap. This must | 4777 | * one will allocate those blocks, mark it as used in buddy bitmap. This must |
4716 | * be called with under the group lock. | 4778 | * be called with under the group lock. |
4717 | */ | 4779 | */ |
4718 | static int ext4_trim_extent(struct super_block *sb, int start, int count, | 4780 | static void ext4_trim_extent(struct super_block *sb, int start, int count, |
4719 | ext4_group_t group, struct ext4_buddy *e4b) | 4781 | ext4_group_t group, struct ext4_buddy *e4b) |
4720 | { | 4782 | { |
4721 | struct ext4_free_extent ex; | 4783 | struct ext4_free_extent ex; |
4722 | int ret = 0; | ||
4723 | 4784 | ||
4724 | assert_spin_locked(ext4_group_lock_ptr(sb, group)); | 4785 | assert_spin_locked(ext4_group_lock_ptr(sb, group)); |
4725 | 4786 | ||
@@ -4733,12 +4794,9 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count, | |||
4733 | */ | 4794 | */ |
4734 | mb_mark_used(e4b, &ex); | 4795 | mb_mark_used(e4b, &ex); |
4735 | ext4_unlock_group(sb, group); | 4796 | ext4_unlock_group(sb, group); |
4736 | 4797 | ext4_issue_discard(sb, group, start, count); | |
4737 | ret = ext4_issue_discard(sb, group, start, count); | ||
4738 | |||
4739 | ext4_lock_group(sb, group); | 4798 | ext4_lock_group(sb, group); |
4740 | mb_free_blocks(NULL, e4b, start, ex.fe_len); | 4799 | mb_free_blocks(NULL, e4b, start, ex.fe_len); |
4741 | return ret; | ||
4742 | } | 4800 | } |
4743 | 4801 | ||
4744 | /** | 4802 | /** |
@@ -4760,21 +4818,26 @@ static int ext4_trim_extent(struct super_block *sb, int start, int count, | |||
4760 | * the group buddy bitmap. This is done until whole group is scanned. | 4818 | * the group buddy bitmap. This is done until whole group is scanned. |
4761 | */ | 4819 | */ |
4762 | static ext4_grpblk_t | 4820 | static ext4_grpblk_t |
4763 | ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b, | 4821 | ext4_trim_all_free(struct super_block *sb, ext4_group_t group, |
4764 | ext4_grpblk_t start, ext4_grpblk_t max, ext4_grpblk_t minblocks) | 4822 | ext4_grpblk_t start, ext4_grpblk_t max, |
4823 | ext4_grpblk_t minblocks) | ||
4765 | { | 4824 | { |
4766 | void *bitmap; | 4825 | void *bitmap; |
4767 | ext4_grpblk_t next, count = 0; | 4826 | ext4_grpblk_t next, count = 0; |
4768 | ext4_group_t group; | 4827 | struct ext4_buddy e4b; |
4769 | int ret = 0; | 4828 | int ret; |
4770 | 4829 | ||
4771 | BUG_ON(e4b == NULL); | 4830 | ret = ext4_mb_load_buddy(sb, group, &e4b); |
4831 | if (ret) { | ||
4832 | ext4_error(sb, "Error in loading buddy " | ||
4833 | "information for %u", group); | ||
4834 | return ret; | ||
4835 | } | ||
4836 | bitmap = e4b.bd_bitmap; | ||
4772 | 4837 | ||
4773 | bitmap = e4b->bd_bitmap; | ||
4774 | group = e4b->bd_group; | ||
4775 | start = (e4b->bd_info->bb_first_free > start) ? | ||
4776 | e4b->bd_info->bb_first_free : start; | ||
4777 | ext4_lock_group(sb, group); | 4838 | ext4_lock_group(sb, group); |
4839 | start = (e4b.bd_info->bb_first_free > start) ? | ||
4840 | e4b.bd_info->bb_first_free : start; | ||
4778 | 4841 | ||
4779 | while (start < max) { | 4842 | while (start < max) { |
4780 | start = mb_find_next_zero_bit(bitmap, max, start); | 4843 | start = mb_find_next_zero_bit(bitmap, max, start); |
@@ -4783,10 +4846,8 @@ ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b, | |||
4783 | next = mb_find_next_bit(bitmap, max, start); | 4846 | next = mb_find_next_bit(bitmap, max, start); |
4784 | 4847 | ||
4785 | if ((next - start) >= minblocks) { | 4848 | if ((next - start) >= minblocks) { |
4786 | ret = ext4_trim_extent(sb, start, | 4849 | ext4_trim_extent(sb, start, |
4787 | next - start, group, e4b); | 4850 | next - start, group, &e4b); |
4788 | if (ret < 0) | ||
4789 | break; | ||
4790 | count += next - start; | 4851 | count += next - start; |
4791 | } | 4852 | } |
4792 | start = next + 1; | 4853 | start = next + 1; |
@@ -4802,17 +4863,15 @@ ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b, | |||
4802 | ext4_lock_group(sb, group); | 4863 | ext4_lock_group(sb, group); |
4803 | } | 4864 | } |
4804 | 4865 | ||
4805 | if ((e4b->bd_info->bb_free - count) < minblocks) | 4866 | if ((e4b.bd_info->bb_free - count) < minblocks) |
4806 | break; | 4867 | break; |
4807 | } | 4868 | } |
4808 | ext4_unlock_group(sb, group); | 4869 | ext4_unlock_group(sb, group); |
4870 | ext4_mb_unload_buddy(&e4b); | ||
4809 | 4871 | ||
4810 | ext4_debug("trimmed %d blocks in the group %d\n", | 4872 | ext4_debug("trimmed %d blocks in the group %d\n", |
4811 | count, group); | 4873 | count, group); |
4812 | 4874 | ||
4813 | if (ret < 0) | ||
4814 | count = ret; | ||
4815 | |||
4816 | return count; | 4875 | return count; |
4817 | } | 4876 | } |
4818 | 4877 | ||
@@ -4830,11 +4889,11 @@ ext4_trim_all_free(struct super_block *sb, struct ext4_buddy *e4b, | |||
4830 | */ | 4889 | */ |
4831 | int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) | 4890 | int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) |
4832 | { | 4891 | { |
4833 | struct ext4_buddy e4b; | 4892 | struct ext4_group_info *grp; |
4834 | ext4_group_t first_group, last_group; | 4893 | ext4_group_t first_group, last_group; |
4835 | ext4_group_t group, ngroups = ext4_get_groups_count(sb); | 4894 | ext4_group_t group, ngroups = ext4_get_groups_count(sb); |
4836 | ext4_grpblk_t cnt = 0, first_block, last_block; | 4895 | ext4_grpblk_t cnt = 0, first_block, last_block; |
4837 | uint64_t start, len, minlen, trimmed; | 4896 | uint64_t start, len, minlen, trimmed = 0; |
4838 | ext4_fsblk_t first_data_blk = | 4897 | ext4_fsblk_t first_data_blk = |
4839 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); | 4898 | le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block); |
4840 | int ret = 0; | 4899 | int ret = 0; |
@@ -4842,7 +4901,6 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) | |||
4842 | start = range->start >> sb->s_blocksize_bits; | 4901 | start = range->start >> sb->s_blocksize_bits; |
4843 | len = range->len >> sb->s_blocksize_bits; | 4902 | len = range->len >> sb->s_blocksize_bits; |
4844 | minlen = range->minlen >> sb->s_blocksize_bits; | 4903 | minlen = range->minlen >> sb->s_blocksize_bits; |
4845 | trimmed = 0; | ||
4846 | 4904 | ||
4847 | if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb))) | 4905 | if (unlikely(minlen > EXT4_BLOCKS_PER_GROUP(sb))) |
4848 | return -EINVAL; | 4906 | return -EINVAL; |
@@ -4863,11 +4921,12 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) | |||
4863 | return -EINVAL; | 4921 | return -EINVAL; |
4864 | 4922 | ||
4865 | for (group = first_group; group <= last_group; group++) { | 4923 | for (group = first_group; group <= last_group; group++) { |
4866 | ret = ext4_mb_load_buddy(sb, group, &e4b); | 4924 | grp = ext4_get_group_info(sb, group); |
4867 | if (ret) { | 4925 | /* We only do this if the grp has never been initialized */ |
4868 | ext4_error(sb, "Error in loading buddy " | 4926 | if (unlikely(EXT4_MB_GRP_NEED_INIT(grp))) { |
4869 | "information for %u", group); | 4927 | ret = ext4_mb_init_group(sb, group); |
4870 | break; | 4928 | if (ret) |
4929 | break; | ||
4871 | } | 4930 | } |
4872 | 4931 | ||
4873 | /* | 4932 | /* |
@@ -4880,16 +4939,14 @@ int ext4_trim_fs(struct super_block *sb, struct fstrim_range *range) | |||
4880 | last_block = first_block + len; | 4939 | last_block = first_block + len; |
4881 | len -= last_block - first_block; | 4940 | len -= last_block - first_block; |
4882 | 4941 | ||
4883 | if (e4b.bd_info->bb_free >= minlen) { | 4942 | if (grp->bb_free >= minlen) { |
4884 | cnt = ext4_trim_all_free(sb, &e4b, first_block, | 4943 | cnt = ext4_trim_all_free(sb, group, first_block, |
4885 | last_block, minlen); | 4944 | last_block, minlen); |
4886 | if (cnt < 0) { | 4945 | if (cnt < 0) { |
4887 | ret = cnt; | 4946 | ret = cnt; |
4888 | ext4_mb_unload_buddy(&e4b); | ||
4889 | break; | 4947 | break; |
4890 | } | 4948 | } |
4891 | } | 4949 | } |
4892 | ext4_mb_unload_buddy(&e4b); | ||
4893 | trimmed += cnt; | 4950 | trimmed += cnt; |
4894 | first_block = 0; | 4951 | first_block = 0; |
4895 | } | 4952 | } |
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h index 22bd4d7f289b..20b5e7bfebd1 100644 --- a/fs/ext4/mballoc.h +++ b/fs/ext4/mballoc.h | |||
@@ -193,11 +193,6 @@ struct ext4_allocation_context { | |||
193 | __u8 ac_op; /* operation, for history only */ | 193 | __u8 ac_op; /* operation, for history only */ |
194 | struct page *ac_bitmap_page; | 194 | struct page *ac_bitmap_page; |
195 | struct page *ac_buddy_page; | 195 | struct page *ac_buddy_page; |
196 | /* | ||
197 | * pointer to the held semaphore upon successful | ||
198 | * block allocation | ||
199 | */ | ||
200 | struct rw_semaphore *alloc_semp; | ||
201 | struct ext4_prealloc_space *ac_pa; | 196 | struct ext4_prealloc_space *ac_pa; |
202 | struct ext4_locality_group *ac_lg; | 197 | struct ext4_locality_group *ac_lg; |
203 | }; | 198 | }; |
@@ -215,7 +210,6 @@ struct ext4_buddy { | |||
215 | struct super_block *bd_sb; | 210 | struct super_block *bd_sb; |
216 | __u16 bd_blkbits; | 211 | __u16 bd_blkbits; |
217 | ext4_group_t bd_group; | 212 | ext4_group_t bd_group; |
218 | struct rw_semaphore *alloc_semp; | ||
219 | }; | 213 | }; |
220 | #define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap) | 214 | #define EXT4_MB_BITMAP(e4b) ((e4b)->bd_bitmap) |
221 | #define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy) | 215 | #define EXT4_MB_BUDDY(e4b) ((e4b)->bd_buddy) |
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c index 92816b4e0f16..b57b98fb44d1 100644 --- a/fs/ext4/migrate.c +++ b/fs/ext4/migrate.c | |||
@@ -376,7 +376,7 @@ static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode, | |||
376 | * We have the extent map build with the tmp inode. | 376 | * We have the extent map build with the tmp inode. |
377 | * Now copy the i_data across | 377 | * Now copy the i_data across |
378 | */ | 378 | */ |
379 | ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS); | 379 | ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS); |
380 | memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data)); | 380 | memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data)); |
381 | 381 | ||
382 | /* | 382 | /* |
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c new file mode 100644 index 000000000000..9bdef3f537c5 --- /dev/null +++ b/fs/ext4/mmp.c | |||
@@ -0,0 +1,351 @@ | |||
1 | #include <linux/fs.h> | ||
2 | #include <linux/random.h> | ||
3 | #include <linux/buffer_head.h> | ||
4 | #include <linux/utsname.h> | ||
5 | #include <linux/kthread.h> | ||
6 | |||
7 | #include "ext4.h" | ||
8 | |||
9 | /* | ||
10 | * Write the MMP block using WRITE_SYNC to try to get the block on-disk | ||
11 | * faster. | ||
12 | */ | ||
13 | static int write_mmp_block(struct buffer_head *bh) | ||
14 | { | ||
15 | mark_buffer_dirty(bh); | ||
16 | lock_buffer(bh); | ||
17 | bh->b_end_io = end_buffer_write_sync; | ||
18 | get_bh(bh); | ||
19 | submit_bh(WRITE_SYNC, bh); | ||
20 | wait_on_buffer(bh); | ||
21 | if (unlikely(!buffer_uptodate(bh))) | ||
22 | return 1; | ||
23 | |||
24 | return 0; | ||
25 | } | ||
26 | |||
27 | /* | ||
28 | * Read the MMP block. It _must_ be read from disk and hence we clear the | ||
29 | * uptodate flag on the buffer. | ||
30 | */ | ||
31 | static int read_mmp_block(struct super_block *sb, struct buffer_head **bh, | ||
32 | ext4_fsblk_t mmp_block) | ||
33 | { | ||
34 | struct mmp_struct *mmp; | ||
35 | |||
36 | if (*bh) | ||
37 | clear_buffer_uptodate(*bh); | ||
38 | |||
39 | /* This would be sb_bread(sb, mmp_block), except we need to be sure | ||
40 | * that the MD RAID device cache has been bypassed, and that the read | ||
41 | * is not blocked in the elevator. */ | ||
42 | if (!*bh) | ||
43 | *bh = sb_getblk(sb, mmp_block); | ||
44 | if (*bh) { | ||
45 | get_bh(*bh); | ||
46 | lock_buffer(*bh); | ||
47 | (*bh)->b_end_io = end_buffer_read_sync; | ||
48 | submit_bh(READ_SYNC, *bh); | ||
49 | wait_on_buffer(*bh); | ||
50 | if (!buffer_uptodate(*bh)) { | ||
51 | brelse(*bh); | ||
52 | *bh = NULL; | ||
53 | } | ||
54 | } | ||
55 | if (!*bh) { | ||
56 | ext4_warning(sb, "Error while reading MMP block %llu", | ||
57 | mmp_block); | ||
58 | return -EIO; | ||
59 | } | ||
60 | |||
61 | mmp = (struct mmp_struct *)((*bh)->b_data); | ||
62 | if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) | ||
63 | return -EINVAL; | ||
64 | |||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | /* | ||
69 | * Dump as much information as possible to help the admin. | ||
70 | */ | ||
71 | void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp, | ||
72 | const char *function, unsigned int line, const char *msg) | ||
73 | { | ||
74 | __ext4_warning(sb, function, line, msg); | ||
75 | __ext4_warning(sb, function, line, | ||
76 | "MMP failure info: last update time: %llu, last update " | ||
77 | "node: %s, last update device: %s\n", | ||
78 | (long long unsigned int) le64_to_cpu(mmp->mmp_time), | ||
79 | mmp->mmp_nodename, mmp->mmp_bdevname); | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * kmmpd will update the MMP sequence every s_mmp_update_interval seconds | ||
84 | */ | ||
85 | static int kmmpd(void *data) | ||
86 | { | ||
87 | struct super_block *sb = ((struct mmpd_data *) data)->sb; | ||
88 | struct buffer_head *bh = ((struct mmpd_data *) data)->bh; | ||
89 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; | ||
90 | struct mmp_struct *mmp; | ||
91 | ext4_fsblk_t mmp_block; | ||
92 | u32 seq = 0; | ||
93 | unsigned long failed_writes = 0; | ||
94 | int mmp_update_interval = le16_to_cpu(es->s_mmp_update_interval); | ||
95 | unsigned mmp_check_interval; | ||
96 | unsigned long last_update_time; | ||
97 | unsigned long diff; | ||
98 | int retval; | ||
99 | |||
100 | mmp_block = le64_to_cpu(es->s_mmp_block); | ||
101 | mmp = (struct mmp_struct *)(bh->b_data); | ||
102 | mmp->mmp_time = cpu_to_le64(get_seconds()); | ||
103 | /* | ||
104 | * Start with the higher mmp_check_interval and reduce it if | ||
105 | * the MMP block is being updated on time. | ||
106 | */ | ||
107 | mmp_check_interval = max(EXT4_MMP_CHECK_MULT * mmp_update_interval, | ||
108 | EXT4_MMP_MIN_CHECK_INTERVAL); | ||
109 | mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval); | ||
110 | bdevname(bh->b_bdev, mmp->mmp_bdevname); | ||
111 | |||
112 | memcpy(mmp->mmp_nodename, init_utsname()->sysname, | ||
113 | sizeof(mmp->mmp_nodename)); | ||
114 | |||
115 | while (!kthread_should_stop()) { | ||
116 | if (++seq > EXT4_MMP_SEQ_MAX) | ||
117 | seq = 1; | ||
118 | |||
119 | mmp->mmp_seq = cpu_to_le32(seq); | ||
120 | mmp->mmp_time = cpu_to_le64(get_seconds()); | ||
121 | last_update_time = jiffies; | ||
122 | |||
123 | retval = write_mmp_block(bh); | ||
124 | /* | ||
125 | * Don't spew too many error messages. Print one every | ||
126 | * (s_mmp_update_interval * 60) seconds. | ||
127 | */ | ||
128 | if (retval && (failed_writes % 60) == 0) { | ||
129 | ext4_error(sb, "Error writing to MMP block"); | ||
130 | failed_writes++; | ||
131 | } | ||
132 | |||
133 | if (!(le32_to_cpu(es->s_feature_incompat) & | ||
134 | EXT4_FEATURE_INCOMPAT_MMP)) { | ||
135 | ext4_warning(sb, "kmmpd being stopped since MMP feature" | ||
136 | " has been disabled."); | ||
137 | EXT4_SB(sb)->s_mmp_tsk = NULL; | ||
138 | goto failed; | ||
139 | } | ||
140 | |||
141 | if (sb->s_flags & MS_RDONLY) { | ||
142 | ext4_warning(sb, "kmmpd being stopped since filesystem " | ||
143 | "has been remounted as readonly."); | ||
144 | EXT4_SB(sb)->s_mmp_tsk = NULL; | ||
145 | goto failed; | ||
146 | } | ||
147 | |||
148 | diff = jiffies - last_update_time; | ||
149 | if (diff < mmp_update_interval * HZ) | ||
150 | schedule_timeout_interruptible(mmp_update_interval * | ||
151 | HZ - diff); | ||
152 | |||
153 | /* | ||
154 | * We need to make sure that more than mmp_check_interval | ||
155 | * seconds have not passed since writing. If that has happened | ||
156 | * we need to check if the MMP block is as we left it. | ||
157 | */ | ||
158 | diff = jiffies - last_update_time; | ||
159 | if (diff > mmp_check_interval * HZ) { | ||
160 | struct buffer_head *bh_check = NULL; | ||
161 | struct mmp_struct *mmp_check; | ||
162 | |||
163 | retval = read_mmp_block(sb, &bh_check, mmp_block); | ||
164 | if (retval) { | ||
165 | ext4_error(sb, "error reading MMP data: %d", | ||
166 | retval); | ||
167 | |||
168 | EXT4_SB(sb)->s_mmp_tsk = NULL; | ||
169 | goto failed; | ||
170 | } | ||
171 | |||
172 | mmp_check = (struct mmp_struct *)(bh_check->b_data); | ||
173 | if (mmp->mmp_seq != mmp_check->mmp_seq || | ||
174 | memcmp(mmp->mmp_nodename, mmp_check->mmp_nodename, | ||
175 | sizeof(mmp->mmp_nodename))) { | ||
176 | dump_mmp_msg(sb, mmp_check, | ||
177 | "Error while updating MMP info. " | ||
178 | "The filesystem seems to have been" | ||
179 | " multiply mounted."); | ||
180 | ext4_error(sb, "abort"); | ||
181 | goto failed; | ||
182 | } | ||
183 | put_bh(bh_check); | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * Adjust the mmp_check_interval depending on how much time | ||
188 | * it took for the MMP block to be written. | ||
189 | */ | ||
190 | mmp_check_interval = max(min(EXT4_MMP_CHECK_MULT * diff / HZ, | ||
191 | EXT4_MMP_MAX_CHECK_INTERVAL), | ||
192 | EXT4_MMP_MIN_CHECK_INTERVAL); | ||
193 | mmp->mmp_check_interval = cpu_to_le16(mmp_check_interval); | ||
194 | } | ||
195 | |||
196 | /* | ||
197 | * Unmount seems to be clean. | ||
198 | */ | ||
199 | mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN); | ||
200 | mmp->mmp_time = cpu_to_le64(get_seconds()); | ||
201 | |||
202 | retval = write_mmp_block(bh); | ||
203 | |||
204 | failed: | ||
205 | kfree(data); | ||
206 | brelse(bh); | ||
207 | return retval; | ||
208 | } | ||
209 | |||
210 | /* | ||
211 | * Get a random new sequence number but make sure it is not greater than | ||
212 | * EXT4_MMP_SEQ_MAX. | ||
213 | */ | ||
214 | static unsigned int mmp_new_seq(void) | ||
215 | { | ||
216 | u32 new_seq; | ||
217 | |||
218 | do { | ||
219 | get_random_bytes(&new_seq, sizeof(u32)); | ||
220 | } while (new_seq > EXT4_MMP_SEQ_MAX); | ||
221 | |||
222 | return new_seq; | ||
223 | } | ||
224 | |||
225 | /* | ||
226 | * Protect the filesystem from being mounted more than once. | ||
227 | */ | ||
228 | int ext4_multi_mount_protect(struct super_block *sb, | ||
229 | ext4_fsblk_t mmp_block) | ||
230 | { | ||
231 | struct ext4_super_block *es = EXT4_SB(sb)->s_es; | ||
232 | struct buffer_head *bh = NULL; | ||
233 | struct mmp_struct *mmp = NULL; | ||
234 | struct mmpd_data *mmpd_data; | ||
235 | u32 seq; | ||
236 | unsigned int mmp_check_interval = le16_to_cpu(es->s_mmp_update_interval); | ||
237 | unsigned int wait_time = 0; | ||
238 | int retval; | ||
239 | |||
240 | if (mmp_block < le32_to_cpu(es->s_first_data_block) || | ||
241 | mmp_block >= ext4_blocks_count(es)) { | ||
242 | ext4_warning(sb, "Invalid MMP block in superblock"); | ||
243 | goto failed; | ||
244 | } | ||
245 | |||
246 | retval = read_mmp_block(sb, &bh, mmp_block); | ||
247 | if (retval) | ||
248 | goto failed; | ||
249 | |||
250 | mmp = (struct mmp_struct *)(bh->b_data); | ||
251 | |||
252 | if (mmp_check_interval < EXT4_MMP_MIN_CHECK_INTERVAL) | ||
253 | mmp_check_interval = EXT4_MMP_MIN_CHECK_INTERVAL; | ||
254 | |||
255 | /* | ||
256 | * If check_interval in MMP block is larger, use that instead of | ||
257 | * update_interval from the superblock. | ||
258 | */ | ||
259 | if (mmp->mmp_check_interval > mmp_check_interval) | ||
260 | mmp_check_interval = mmp->mmp_check_interval; | ||
261 | |||
262 | seq = le32_to_cpu(mmp->mmp_seq); | ||
263 | if (seq == EXT4_MMP_SEQ_CLEAN) | ||
264 | goto skip; | ||
265 | |||
266 | if (seq == EXT4_MMP_SEQ_FSCK) { | ||
267 | dump_mmp_msg(sb, mmp, "fsck is running on the filesystem"); | ||
268 | goto failed; | ||
269 | } | ||
270 | |||
271 | wait_time = min(mmp_check_interval * 2 + 1, | ||
272 | mmp_check_interval + 60); | ||
273 | |||
274 | /* Print MMP interval if more than 20 secs. */ | ||
275 | if (wait_time > EXT4_MMP_MIN_CHECK_INTERVAL * 4) | ||
276 | ext4_warning(sb, "MMP interval %u higher than expected, please" | ||
277 | " wait.\n", wait_time * 2); | ||
278 | |||
279 | if (schedule_timeout_interruptible(HZ * wait_time) != 0) { | ||
280 | ext4_warning(sb, "MMP startup interrupted, failing mount\n"); | ||
281 | goto failed; | ||
282 | } | ||
283 | |||
284 | retval = read_mmp_block(sb, &bh, mmp_block); | ||
285 | if (retval) | ||
286 | goto failed; | ||
287 | mmp = (struct mmp_struct *)(bh->b_data); | ||
288 | if (seq != le32_to_cpu(mmp->mmp_seq)) { | ||
289 | dump_mmp_msg(sb, mmp, | ||
290 | "Device is already active on another node."); | ||
291 | goto failed; | ||
292 | } | ||
293 | |||
294 | skip: | ||
295 | /* | ||
296 | * write a new random sequence number. | ||
297 | */ | ||
298 | mmp->mmp_seq = seq = cpu_to_le32(mmp_new_seq()); | ||
299 | |||
300 | retval = write_mmp_block(bh); | ||
301 | if (retval) | ||
302 | goto failed; | ||
303 | |||
304 | /* | ||
305 | * wait for MMP interval and check mmp_seq. | ||
306 | */ | ||
307 | if (schedule_timeout_interruptible(HZ * wait_time) != 0) { | ||
308 | ext4_warning(sb, "MMP startup interrupted, failing mount\n"); | ||
309 | goto failed; | ||
310 | } | ||
311 | |||
312 | retval = read_mmp_block(sb, &bh, mmp_block); | ||
313 | if (retval) | ||
314 | goto failed; | ||
315 | mmp = (struct mmp_struct *)(bh->b_data); | ||
316 | if (seq != le32_to_cpu(mmp->mmp_seq)) { | ||
317 | dump_mmp_msg(sb, mmp, | ||
318 | "Device is already active on another node."); | ||
319 | goto failed; | ||
320 | } | ||
321 | |||
322 | mmpd_data = kmalloc(sizeof(struct mmpd_data), GFP_KERNEL); | ||
323 | if (!mmpd_data) { | ||
324 | ext4_warning(sb, "not enough memory for mmpd_data"); | ||
325 | goto failed; | ||
326 | } | ||
327 | mmpd_data->sb = sb; | ||
328 | mmpd_data->bh = bh; | ||
329 | |||
330 | /* | ||
331 | * Start a kernel thread to update the MMP block periodically. | ||
332 | */ | ||
333 | EXT4_SB(sb)->s_mmp_tsk = kthread_run(kmmpd, mmpd_data, "kmmpd-%s", | ||
334 | bdevname(bh->b_bdev, | ||
335 | mmp->mmp_bdevname)); | ||
336 | if (IS_ERR(EXT4_SB(sb)->s_mmp_tsk)) { | ||
337 | EXT4_SB(sb)->s_mmp_tsk = NULL; | ||
338 | kfree(mmpd_data); | ||
339 | ext4_warning(sb, "Unable to create kmmpd thread for %s.", | ||
340 | sb->s_id); | ||
341 | goto failed; | ||
342 | } | ||
343 | |||
344 | return 0; | ||
345 | |||
346 | failed: | ||
347 | brelse(bh); | ||
348 | return 1; | ||
349 | } | ||
350 | |||
351 | |||
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c index b9f3e7862f13..2b8304bf3c50 100644 --- a/fs/ext4/move_extent.c +++ b/fs/ext4/move_extent.c | |||
@@ -876,8 +876,7 @@ move_extent_per_page(struct file *o_filp, struct inode *donor_inode, | |||
876 | * It needs to call wait_on_page_writeback() to wait for the | 876 | * It needs to call wait_on_page_writeback() to wait for the |
877 | * writeback of the page. | 877 | * writeback of the page. |
878 | */ | 878 | */ |
879 | if (PageWriteback(page)) | 879 | wait_on_page_writeback(page); |
880 | wait_on_page_writeback(page); | ||
881 | 880 | ||
882 | /* Release old bh and drop refs */ | 881 | /* Release old bh and drop refs */ |
883 | try_to_release_page(page, 0); | 882 | try_to_release_page(page, 0); |
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 67fd0b025858..b754b7721f51 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c | |||
@@ -1413,10 +1413,22 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry, | |||
1413 | frame->at = entries; | 1413 | frame->at = entries; |
1414 | frame->bh = bh; | 1414 | frame->bh = bh; |
1415 | bh = bh2; | 1415 | bh = bh2; |
1416 | |||
1417 | ext4_handle_dirty_metadata(handle, dir, frame->bh); | ||
1418 | ext4_handle_dirty_metadata(handle, dir, bh); | ||
1419 | |||
1416 | de = do_split(handle,dir, &bh, frame, &hinfo, &retval); | 1420 | de = do_split(handle,dir, &bh, frame, &hinfo, &retval); |
1417 | dx_release (frames); | 1421 | if (!de) { |
1418 | if (!(de)) | 1422 | /* |
1423 | * Even if the block split failed, we have to properly write | ||
1424 | * out all the changes we did so far. Otherwise we can end up | ||
1425 | * with corrupted filesystem. | ||
1426 | */ | ||
1427 | ext4_mark_inode_dirty(handle, dir); | ||
1428 | dx_release(frames); | ||
1419 | return retval; | 1429 | return retval; |
1430 | } | ||
1431 | dx_release(frames); | ||
1420 | 1432 | ||
1421 | retval = add_dirent_to_buf(handle, dentry, inode, de, bh); | 1433 | retval = add_dirent_to_buf(handle, dentry, inode, de, bh); |
1422 | brelse(bh); | 1434 | brelse(bh); |
@@ -2240,6 +2252,7 @@ static int ext4_symlink(struct inode *dir, | |||
2240 | handle_t *handle; | 2252 | handle_t *handle; |
2241 | struct inode *inode; | 2253 | struct inode *inode; |
2242 | int l, err, retries = 0; | 2254 | int l, err, retries = 0; |
2255 | int credits; | ||
2243 | 2256 | ||
2244 | l = strlen(symname)+1; | 2257 | l = strlen(symname)+1; |
2245 | if (l > dir->i_sb->s_blocksize) | 2258 | if (l > dir->i_sb->s_blocksize) |
@@ -2247,10 +2260,26 @@ static int ext4_symlink(struct inode *dir, | |||
2247 | 2260 | ||
2248 | dquot_initialize(dir); | 2261 | dquot_initialize(dir); |
2249 | 2262 | ||
2263 | if (l > EXT4_N_BLOCKS * 4) { | ||
2264 | /* | ||
2265 | * For non-fast symlinks, we just allocate inode and put it on | ||
2266 | * orphan list in the first transaction => we need bitmap, | ||
2267 | * group descriptor, sb, inode block, quota blocks. | ||
2268 | */ | ||
2269 | credits = 4 + EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb); | ||
2270 | } else { | ||
2271 | /* | ||
2272 | * Fast symlink. We have to add entry to directory | ||
2273 | * (EXT4_DATA_TRANS_BLOCKS + EXT4_INDEX_EXTRA_TRANS_BLOCKS), | ||
2274 | * allocate new inode (bitmap, group descriptor, inode block, | ||
2275 | * quota blocks, sb is already counted in previous macros). | ||
2276 | */ | ||
2277 | credits = EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | ||
2278 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 + | ||
2279 | EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb); | ||
2280 | } | ||
2250 | retry: | 2281 | retry: |
2251 | handle = ext4_journal_start(dir, EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | 2282 | handle = ext4_journal_start(dir, credits); |
2252 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 5 + | ||
2253 | EXT4_MAXQUOTAS_INIT_BLOCKS(dir->i_sb)); | ||
2254 | if (IS_ERR(handle)) | 2283 | if (IS_ERR(handle)) |
2255 | return PTR_ERR(handle); | 2284 | return PTR_ERR(handle); |
2256 | 2285 | ||
@@ -2263,21 +2292,44 @@ retry: | |||
2263 | if (IS_ERR(inode)) | 2292 | if (IS_ERR(inode)) |
2264 | goto out_stop; | 2293 | goto out_stop; |
2265 | 2294 | ||
2266 | if (l > sizeof(EXT4_I(inode)->i_data)) { | 2295 | if (l > EXT4_N_BLOCKS * 4) { |
2267 | inode->i_op = &ext4_symlink_inode_operations; | 2296 | inode->i_op = &ext4_symlink_inode_operations; |
2268 | ext4_set_aops(inode); | 2297 | ext4_set_aops(inode); |
2269 | /* | 2298 | /* |
2270 | * page_symlink() calls into ext4_prepare/commit_write. | 2299 | * We cannot call page_symlink() with transaction started |
2271 | * We have a transaction open. All is sweetness. It also sets | 2300 | * because it calls into ext4_write_begin() which can wait |
2272 | * i_size in generic_commit_write(). | 2301 | * for transaction commit if we are running out of space |
2302 | * and thus we deadlock. So we have to stop transaction now | ||
2303 | * and restart it when symlink contents is written. | ||
2304 | * | ||
2305 | * To keep fs consistent in case of crash, we have to put inode | ||
2306 | * to orphan list in the mean time. | ||
2273 | */ | 2307 | */ |
2308 | drop_nlink(inode); | ||
2309 | err = ext4_orphan_add(handle, inode); | ||
2310 | ext4_journal_stop(handle); | ||
2311 | if (err) | ||
2312 | goto err_drop_inode; | ||
2274 | err = __page_symlink(inode, symname, l, 1); | 2313 | err = __page_symlink(inode, symname, l, 1); |
2314 | if (err) | ||
2315 | goto err_drop_inode; | ||
2316 | /* | ||
2317 | * Now inode is being linked into dir (EXT4_DATA_TRANS_BLOCKS | ||
2318 | * + EXT4_INDEX_EXTRA_TRANS_BLOCKS), inode is also modified | ||
2319 | */ | ||
2320 | handle = ext4_journal_start(dir, | ||
2321 | EXT4_DATA_TRANS_BLOCKS(dir->i_sb) + | ||
2322 | EXT4_INDEX_EXTRA_TRANS_BLOCKS + 1); | ||
2323 | if (IS_ERR(handle)) { | ||
2324 | err = PTR_ERR(handle); | ||
2325 | goto err_drop_inode; | ||
2326 | } | ||
2327 | inc_nlink(inode); | ||
2328 | err = ext4_orphan_del(handle, inode); | ||
2275 | if (err) { | 2329 | if (err) { |
2330 | ext4_journal_stop(handle); | ||
2276 | clear_nlink(inode); | 2331 | clear_nlink(inode); |
2277 | unlock_new_inode(inode); | 2332 | goto err_drop_inode; |
2278 | ext4_mark_inode_dirty(handle, inode); | ||
2279 | iput(inode); | ||
2280 | goto out_stop; | ||
2281 | } | 2333 | } |
2282 | } else { | 2334 | } else { |
2283 | /* clear the extent format for fast symlink */ | 2335 | /* clear the extent format for fast symlink */ |
@@ -2293,6 +2345,10 @@ out_stop: | |||
2293 | if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) | 2345 | if (err == -ENOSPC && ext4_should_retry_alloc(dir->i_sb, &retries)) |
2294 | goto retry; | 2346 | goto retry; |
2295 | return err; | 2347 | return err; |
2348 | err_drop_inode: | ||
2349 | unlock_new_inode(inode); | ||
2350 | iput(inode); | ||
2351 | return err; | ||
2296 | } | 2352 | } |
2297 | 2353 | ||
2298 | static int ext4_link(struct dentry *old_dentry, | 2354 | static int ext4_link(struct dentry *old_dentry, |
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c index b6dbd056fcb1..7bb8f76d470a 100644 --- a/fs/ext4/page-io.c +++ b/fs/ext4/page-io.c | |||
@@ -203,46 +203,29 @@ static void ext4_end_bio(struct bio *bio, int error) | |||
203 | for (i = 0; i < io_end->num_io_pages; i++) { | 203 | for (i = 0; i < io_end->num_io_pages; i++) { |
204 | struct page *page = io_end->pages[i]->p_page; | 204 | struct page *page = io_end->pages[i]->p_page; |
205 | struct buffer_head *bh, *head; | 205 | struct buffer_head *bh, *head; |
206 | int partial_write = 0; | 206 | loff_t offset; |
207 | loff_t io_end_offset; | ||
207 | 208 | ||
208 | head = page_buffers(page); | 209 | if (error) { |
209 | if (error) | ||
210 | SetPageError(page); | 210 | SetPageError(page); |
211 | BUG_ON(!head); | 211 | set_bit(AS_EIO, &page->mapping->flags); |
212 | if (head->b_size != PAGE_CACHE_SIZE) { | 212 | head = page_buffers(page); |
213 | loff_t offset; | 213 | BUG_ON(!head); |
214 | loff_t io_end_offset = io_end->offset + io_end->size; | 214 | |
215 | io_end_offset = io_end->offset + io_end->size; | ||
215 | 216 | ||
216 | offset = (sector_t) page->index << PAGE_CACHE_SHIFT; | 217 | offset = (sector_t) page->index << PAGE_CACHE_SHIFT; |
217 | bh = head; | 218 | bh = head; |
218 | do { | 219 | do { |
219 | if ((offset >= io_end->offset) && | 220 | if ((offset >= io_end->offset) && |
220 | (offset+bh->b_size <= io_end_offset)) { | 221 | (offset+bh->b_size <= io_end_offset)) |
221 | if (error) | 222 | buffer_io_error(bh); |
222 | buffer_io_error(bh); | 223 | |
223 | |||
224 | } | ||
225 | if (buffer_delay(bh)) | ||
226 | partial_write = 1; | ||
227 | else if (!buffer_mapped(bh)) | ||
228 | clear_buffer_dirty(bh); | ||
229 | else if (buffer_dirty(bh)) | ||
230 | partial_write = 1; | ||
231 | offset += bh->b_size; | 224 | offset += bh->b_size; |
232 | bh = bh->b_this_page; | 225 | bh = bh->b_this_page; |
233 | } while (bh != head); | 226 | } while (bh != head); |
234 | } | 227 | } |
235 | 228 | ||
236 | /* | ||
237 | * If this is a partial write which happened to make | ||
238 | * all buffers uptodate then we can optimize away a | ||
239 | * bogus readpage() for the next read(). Here we | ||
240 | * 'discover' whether the page went uptodate as a | ||
241 | * result of this (potentially partial) write. | ||
242 | */ | ||
243 | if (!partial_write) | ||
244 | SetPageUptodate(page); | ||
245 | |||
246 | put_io_page(io_end->pages[i]); | 229 | put_io_page(io_end->pages[i]); |
247 | } | 230 | } |
248 | io_end->num_io_pages = 0; | 231 | io_end->num_io_pages = 0; |
diff --git a/fs/ext4/super.c b/fs/ext4/super.c index 8553dfb310af..cc5c157aa11d 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <linux/ctype.h> | 38 | #include <linux/ctype.h> |
39 | #include <linux/log2.h> | 39 | #include <linux/log2.h> |
40 | #include <linux/crc16.h> | 40 | #include <linux/crc16.h> |
41 | #include <linux/cleancache.h> | ||
41 | #include <asm/uaccess.h> | 42 | #include <asm/uaccess.h> |
42 | 43 | ||
43 | #include <linux/kthread.h> | 44 | #include <linux/kthread.h> |
@@ -75,11 +76,27 @@ static void ext4_write_super(struct super_block *sb); | |||
75 | static int ext4_freeze(struct super_block *sb); | 76 | static int ext4_freeze(struct super_block *sb); |
76 | static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, | 77 | static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, |
77 | const char *dev_name, void *data); | 78 | const char *dev_name, void *data); |
79 | static inline int ext2_feature_set_ok(struct super_block *sb); | ||
80 | static inline int ext3_feature_set_ok(struct super_block *sb); | ||
78 | static int ext4_feature_set_ok(struct super_block *sb, int readonly); | 81 | static int ext4_feature_set_ok(struct super_block *sb, int readonly); |
79 | static void ext4_destroy_lazyinit_thread(void); | 82 | static void ext4_destroy_lazyinit_thread(void); |
80 | static void ext4_unregister_li_request(struct super_block *sb); | 83 | static void ext4_unregister_li_request(struct super_block *sb); |
81 | static void ext4_clear_request_list(void); | 84 | static void ext4_clear_request_list(void); |
82 | 85 | ||
86 | #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) | ||
87 | static struct file_system_type ext2_fs_type = { | ||
88 | .owner = THIS_MODULE, | ||
89 | .name = "ext2", | ||
90 | .mount = ext4_mount, | ||
91 | .kill_sb = kill_block_super, | ||
92 | .fs_flags = FS_REQUIRES_DEV, | ||
93 | }; | ||
94 | #define IS_EXT2_SB(sb) ((sb)->s_bdev->bd_holder == &ext2_fs_type) | ||
95 | #else | ||
96 | #define IS_EXT2_SB(sb) (0) | ||
97 | #endif | ||
98 | |||
99 | |||
83 | #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) | 100 | #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) |
84 | static struct file_system_type ext3_fs_type = { | 101 | static struct file_system_type ext3_fs_type = { |
85 | .owner = THIS_MODULE, | 102 | .owner = THIS_MODULE, |
@@ -806,6 +823,8 @@ static void ext4_put_super(struct super_block *sb) | |||
806 | invalidate_bdev(sbi->journal_bdev); | 823 | invalidate_bdev(sbi->journal_bdev); |
807 | ext4_blkdev_remove(sbi); | 824 | ext4_blkdev_remove(sbi); |
808 | } | 825 | } |
826 | if (sbi->s_mmp_tsk) | ||
827 | kthread_stop(sbi->s_mmp_tsk); | ||
809 | sb->s_fs_info = NULL; | 828 | sb->s_fs_info = NULL; |
810 | /* | 829 | /* |
811 | * Now that we are completely done shutting down the | 830 | * Now that we are completely done shutting down the |
@@ -1096,7 +1115,7 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs) | |||
1096 | 1115 | ||
1097 | if (!test_opt(sb, INIT_INODE_TABLE)) | 1116 | if (!test_opt(sb, INIT_INODE_TABLE)) |
1098 | seq_puts(seq, ",noinit_inode_table"); | 1117 | seq_puts(seq, ",noinit_inode_table"); |
1099 | else if (sbi->s_li_wait_mult) | 1118 | else if (sbi->s_li_wait_mult != EXT4_DEF_LI_WAIT_MULT) |
1100 | seq_printf(seq, ",init_inode_table=%u", | 1119 | seq_printf(seq, ",init_inode_table=%u", |
1101 | (unsigned) sbi->s_li_wait_mult); | 1120 | (unsigned) sbi->s_li_wait_mult); |
1102 | 1121 | ||
@@ -1187,9 +1206,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type, | |||
1187 | const char *data, size_t len, loff_t off); | 1206 | const char *data, size_t len, loff_t off); |
1188 | 1207 | ||
1189 | static const struct dquot_operations ext4_quota_operations = { | 1208 | static const struct dquot_operations ext4_quota_operations = { |
1190 | #ifdef CONFIG_QUOTA | ||
1191 | .get_reserved_space = ext4_get_reserved_space, | 1209 | .get_reserved_space = ext4_get_reserved_space, |
1192 | #endif | ||
1193 | .write_dquot = ext4_write_dquot, | 1210 | .write_dquot = ext4_write_dquot, |
1194 | .acquire_dquot = ext4_acquire_dquot, | 1211 | .acquire_dquot = ext4_acquire_dquot, |
1195 | .release_dquot = ext4_release_dquot, | 1212 | .release_dquot = ext4_release_dquot, |
@@ -1900,7 +1917,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, | |||
1900 | ext4_msg(sb, KERN_WARNING, | 1917 | ext4_msg(sb, KERN_WARNING, |
1901 | "warning: mounting fs with errors, " | 1918 | "warning: mounting fs with errors, " |
1902 | "running e2fsck is recommended"); | 1919 | "running e2fsck is recommended"); |
1903 | else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 && | 1920 | else if ((__s16) le16_to_cpu(es->s_max_mnt_count) > 0 && |
1904 | le16_to_cpu(es->s_mnt_count) >= | 1921 | le16_to_cpu(es->s_mnt_count) >= |
1905 | (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) | 1922 | (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count)) |
1906 | ext4_msg(sb, KERN_WARNING, | 1923 | ext4_msg(sb, KERN_WARNING, |
@@ -1932,6 +1949,7 @@ static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es, | |||
1932 | EXT4_INODES_PER_GROUP(sb), | 1949 | EXT4_INODES_PER_GROUP(sb), |
1933 | sbi->s_mount_opt, sbi->s_mount_opt2); | 1950 | sbi->s_mount_opt, sbi->s_mount_opt2); |
1934 | 1951 | ||
1952 | cleancache_init_fs(sb); | ||
1935 | return res; | 1953 | return res; |
1936 | } | 1954 | } |
1937 | 1955 | ||
@@ -2425,6 +2443,18 @@ static ssize_t lifetime_write_kbytes_show(struct ext4_attr *a, | |||
2425 | EXT4_SB(sb)->s_sectors_written_start) >> 1))); | 2443 | EXT4_SB(sb)->s_sectors_written_start) >> 1))); |
2426 | } | 2444 | } |
2427 | 2445 | ||
2446 | static ssize_t extent_cache_hits_show(struct ext4_attr *a, | ||
2447 | struct ext4_sb_info *sbi, char *buf) | ||
2448 | { | ||
2449 | return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_hits); | ||
2450 | } | ||
2451 | |||
2452 | static ssize_t extent_cache_misses_show(struct ext4_attr *a, | ||
2453 | struct ext4_sb_info *sbi, char *buf) | ||
2454 | { | ||
2455 | return snprintf(buf, PAGE_SIZE, "%lu\n", sbi->extent_cache_misses); | ||
2456 | } | ||
2457 | |||
2428 | static ssize_t inode_readahead_blks_store(struct ext4_attr *a, | 2458 | static ssize_t inode_readahead_blks_store(struct ext4_attr *a, |
2429 | struct ext4_sb_info *sbi, | 2459 | struct ext4_sb_info *sbi, |
2430 | const char *buf, size_t count) | 2460 | const char *buf, size_t count) |
@@ -2482,6 +2512,8 @@ static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store) | |||
2482 | EXT4_RO_ATTR(delayed_allocation_blocks); | 2512 | EXT4_RO_ATTR(delayed_allocation_blocks); |
2483 | EXT4_RO_ATTR(session_write_kbytes); | 2513 | EXT4_RO_ATTR(session_write_kbytes); |
2484 | EXT4_RO_ATTR(lifetime_write_kbytes); | 2514 | EXT4_RO_ATTR(lifetime_write_kbytes); |
2515 | EXT4_RO_ATTR(extent_cache_hits); | ||
2516 | EXT4_RO_ATTR(extent_cache_misses); | ||
2485 | EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show, | 2517 | EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show, |
2486 | inode_readahead_blks_store, s_inode_readahead_blks); | 2518 | inode_readahead_blks_store, s_inode_readahead_blks); |
2487 | EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal); | 2519 | EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal); |
@@ -2497,6 +2529,8 @@ static struct attribute *ext4_attrs[] = { | |||
2497 | ATTR_LIST(delayed_allocation_blocks), | 2529 | ATTR_LIST(delayed_allocation_blocks), |
2498 | ATTR_LIST(session_write_kbytes), | 2530 | ATTR_LIST(session_write_kbytes), |
2499 | ATTR_LIST(lifetime_write_kbytes), | 2531 | ATTR_LIST(lifetime_write_kbytes), |
2532 | ATTR_LIST(extent_cache_hits), | ||
2533 | ATTR_LIST(extent_cache_misses), | ||
2500 | ATTR_LIST(inode_readahead_blks), | 2534 | ATTR_LIST(inode_readahead_blks), |
2501 | ATTR_LIST(inode_goal), | 2535 | ATTR_LIST(inode_goal), |
2502 | ATTR_LIST(mb_stats), | 2536 | ATTR_LIST(mb_stats), |
@@ -2659,12 +2693,6 @@ static void print_daily_error_info(unsigned long arg) | |||
2659 | mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ | 2693 | mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ); /* Once a day */ |
2660 | } | 2694 | } |
2661 | 2695 | ||
2662 | static void ext4_lazyinode_timeout(unsigned long data) | ||
2663 | { | ||
2664 | struct task_struct *p = (struct task_struct *)data; | ||
2665 | wake_up_process(p); | ||
2666 | } | ||
2667 | |||
2668 | /* Find next suitable group and run ext4_init_inode_table */ | 2696 | /* Find next suitable group and run ext4_init_inode_table */ |
2669 | static int ext4_run_li_request(struct ext4_li_request *elr) | 2697 | static int ext4_run_li_request(struct ext4_li_request *elr) |
2670 | { | 2698 | { |
@@ -2696,11 +2724,8 @@ static int ext4_run_li_request(struct ext4_li_request *elr) | |||
2696 | ret = ext4_init_inode_table(sb, group, | 2724 | ret = ext4_init_inode_table(sb, group, |
2697 | elr->lr_timeout ? 0 : 1); | 2725 | elr->lr_timeout ? 0 : 1); |
2698 | if (elr->lr_timeout == 0) { | 2726 | if (elr->lr_timeout == 0) { |
2699 | timeout = jiffies - timeout; | 2727 | timeout = (jiffies - timeout) * |
2700 | if (elr->lr_sbi->s_li_wait_mult) | 2728 | elr->lr_sbi->s_li_wait_mult; |
2701 | timeout *= elr->lr_sbi->s_li_wait_mult; | ||
2702 | else | ||
2703 | timeout *= 20; | ||
2704 | elr->lr_timeout = timeout; | 2729 | elr->lr_timeout = timeout; |
2705 | } | 2730 | } |
2706 | elr->lr_next_sched = jiffies + elr->lr_timeout; | 2731 | elr->lr_next_sched = jiffies + elr->lr_timeout; |
@@ -2712,7 +2737,7 @@ static int ext4_run_li_request(struct ext4_li_request *elr) | |||
2712 | 2737 | ||
2713 | /* | 2738 | /* |
2714 | * Remove lr_request from the list_request and free the | 2739 | * Remove lr_request from the list_request and free the |
2715 | * request tructure. Should be called with li_list_mtx held | 2740 | * request structure. Should be called with li_list_mtx held |
2716 | */ | 2741 | */ |
2717 | static void ext4_remove_li_request(struct ext4_li_request *elr) | 2742 | static void ext4_remove_li_request(struct ext4_li_request *elr) |
2718 | { | 2743 | { |
@@ -2730,14 +2755,16 @@ static void ext4_remove_li_request(struct ext4_li_request *elr) | |||
2730 | 2755 | ||
2731 | static void ext4_unregister_li_request(struct super_block *sb) | 2756 | static void ext4_unregister_li_request(struct super_block *sb) |
2732 | { | 2757 | { |
2733 | struct ext4_li_request *elr = EXT4_SB(sb)->s_li_request; | 2758 | mutex_lock(&ext4_li_mtx); |
2734 | 2759 | if (!ext4_li_info) { | |
2735 | if (!ext4_li_info) | 2760 | mutex_unlock(&ext4_li_mtx); |
2736 | return; | 2761 | return; |
2762 | } | ||
2737 | 2763 | ||
2738 | mutex_lock(&ext4_li_info->li_list_mtx); | 2764 | mutex_lock(&ext4_li_info->li_list_mtx); |
2739 | ext4_remove_li_request(elr); | 2765 | ext4_remove_li_request(EXT4_SB(sb)->s_li_request); |
2740 | mutex_unlock(&ext4_li_info->li_list_mtx); | 2766 | mutex_unlock(&ext4_li_info->li_list_mtx); |
2767 | mutex_unlock(&ext4_li_mtx); | ||
2741 | } | 2768 | } |
2742 | 2769 | ||
2743 | static struct task_struct *ext4_lazyinit_task; | 2770 | static struct task_struct *ext4_lazyinit_task; |
@@ -2756,17 +2783,10 @@ static int ext4_lazyinit_thread(void *arg) | |||
2756 | struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg; | 2783 | struct ext4_lazy_init *eli = (struct ext4_lazy_init *)arg; |
2757 | struct list_head *pos, *n; | 2784 | struct list_head *pos, *n; |
2758 | struct ext4_li_request *elr; | 2785 | struct ext4_li_request *elr; |
2759 | unsigned long next_wakeup; | 2786 | unsigned long next_wakeup, cur; |
2760 | DEFINE_WAIT(wait); | ||
2761 | 2787 | ||
2762 | BUG_ON(NULL == eli); | 2788 | BUG_ON(NULL == eli); |
2763 | 2789 | ||
2764 | eli->li_timer.data = (unsigned long)current; | ||
2765 | eli->li_timer.function = ext4_lazyinode_timeout; | ||
2766 | |||
2767 | eli->li_task = current; | ||
2768 | wake_up(&eli->li_wait_task); | ||
2769 | |||
2770 | cont_thread: | 2790 | cont_thread: |
2771 | while (true) { | 2791 | while (true) { |
2772 | next_wakeup = MAX_JIFFY_OFFSET; | 2792 | next_wakeup = MAX_JIFFY_OFFSET; |
@@ -2797,19 +2817,15 @@ cont_thread: | |||
2797 | if (freezing(current)) | 2817 | if (freezing(current)) |
2798 | refrigerator(); | 2818 | refrigerator(); |
2799 | 2819 | ||
2800 | if ((time_after_eq(jiffies, next_wakeup)) || | 2820 | cur = jiffies; |
2821 | if ((time_after_eq(cur, next_wakeup)) || | ||
2801 | (MAX_JIFFY_OFFSET == next_wakeup)) { | 2822 | (MAX_JIFFY_OFFSET == next_wakeup)) { |
2802 | cond_resched(); | 2823 | cond_resched(); |
2803 | continue; | 2824 | continue; |
2804 | } | 2825 | } |
2805 | 2826 | ||
2806 | eli->li_timer.expires = next_wakeup; | 2827 | schedule_timeout_interruptible(next_wakeup - cur); |
2807 | add_timer(&eli->li_timer); | 2828 | |
2808 | prepare_to_wait(&eli->li_wait_daemon, &wait, | ||
2809 | TASK_INTERRUPTIBLE); | ||
2810 | if (time_before(jiffies, next_wakeup)) | ||
2811 | schedule(); | ||
2812 | finish_wait(&eli->li_wait_daemon, &wait); | ||
2813 | if (kthread_should_stop()) { | 2829 | if (kthread_should_stop()) { |
2814 | ext4_clear_request_list(); | 2830 | ext4_clear_request_list(); |
2815 | goto exit_thread; | 2831 | goto exit_thread; |
@@ -2833,12 +2849,7 @@ exit_thread: | |||
2833 | goto cont_thread; | 2849 | goto cont_thread; |
2834 | } | 2850 | } |
2835 | mutex_unlock(&eli->li_list_mtx); | 2851 | mutex_unlock(&eli->li_list_mtx); |
2836 | del_timer_sync(&ext4_li_info->li_timer); | ||
2837 | eli->li_task = NULL; | ||
2838 | wake_up(&eli->li_wait_task); | ||
2839 | |||
2840 | kfree(ext4_li_info); | 2852 | kfree(ext4_li_info); |
2841 | ext4_lazyinit_task = NULL; | ||
2842 | ext4_li_info = NULL; | 2853 | ext4_li_info = NULL; |
2843 | mutex_unlock(&ext4_li_mtx); | 2854 | mutex_unlock(&ext4_li_mtx); |
2844 | 2855 | ||
@@ -2866,7 +2877,6 @@ static int ext4_run_lazyinit_thread(void) | |||
2866 | if (IS_ERR(ext4_lazyinit_task)) { | 2877 | if (IS_ERR(ext4_lazyinit_task)) { |
2867 | int err = PTR_ERR(ext4_lazyinit_task); | 2878 | int err = PTR_ERR(ext4_lazyinit_task); |
2868 | ext4_clear_request_list(); | 2879 | ext4_clear_request_list(); |
2869 | del_timer_sync(&ext4_li_info->li_timer); | ||
2870 | kfree(ext4_li_info); | 2880 | kfree(ext4_li_info); |
2871 | ext4_li_info = NULL; | 2881 | ext4_li_info = NULL; |
2872 | printk(KERN_CRIT "EXT4: error %d creating inode table " | 2882 | printk(KERN_CRIT "EXT4: error %d creating inode table " |
@@ -2875,8 +2885,6 @@ static int ext4_run_lazyinit_thread(void) | |||
2875 | return err; | 2885 | return err; |
2876 | } | 2886 | } |
2877 | ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING; | 2887 | ext4_li_info->li_state |= EXT4_LAZYINIT_RUNNING; |
2878 | |||
2879 | wait_event(ext4_li_info->li_wait_task, ext4_li_info->li_task != NULL); | ||
2880 | return 0; | 2888 | return 0; |
2881 | } | 2889 | } |
2882 | 2890 | ||
@@ -2911,13 +2919,9 @@ static int ext4_li_info_new(void) | |||
2911 | if (!eli) | 2919 | if (!eli) |
2912 | return -ENOMEM; | 2920 | return -ENOMEM; |
2913 | 2921 | ||
2914 | eli->li_task = NULL; | ||
2915 | INIT_LIST_HEAD(&eli->li_request_list); | 2922 | INIT_LIST_HEAD(&eli->li_request_list); |
2916 | mutex_init(&eli->li_list_mtx); | 2923 | mutex_init(&eli->li_list_mtx); |
2917 | 2924 | ||
2918 | init_waitqueue_head(&eli->li_wait_daemon); | ||
2919 | init_waitqueue_head(&eli->li_wait_task); | ||
2920 | init_timer(&eli->li_timer); | ||
2921 | eli->li_state |= EXT4_LAZYINIT_QUIT; | 2925 | eli->li_state |= EXT4_LAZYINIT_QUIT; |
2922 | 2926 | ||
2923 | ext4_li_info = eli; | 2927 | ext4_li_info = eli; |
@@ -2960,20 +2964,19 @@ static int ext4_register_li_request(struct super_block *sb, | |||
2960 | ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; | 2964 | ext4_group_t ngroups = EXT4_SB(sb)->s_groups_count; |
2961 | int ret = 0; | 2965 | int ret = 0; |
2962 | 2966 | ||
2963 | if (sbi->s_li_request != NULL) | 2967 | if (sbi->s_li_request != NULL) { |
2968 | /* | ||
2969 | * Reset timeout so it can be computed again, because | ||
2970 | * s_li_wait_mult might have changed. | ||
2971 | */ | ||
2972 | sbi->s_li_request->lr_timeout = 0; | ||
2964 | return 0; | 2973 | return 0; |
2974 | } | ||
2965 | 2975 | ||
2966 | if (first_not_zeroed == ngroups || | 2976 | if (first_not_zeroed == ngroups || |
2967 | (sb->s_flags & MS_RDONLY) || | 2977 | (sb->s_flags & MS_RDONLY) || |
2968 | !test_opt(sb, INIT_INODE_TABLE)) { | 2978 | !test_opt(sb, INIT_INODE_TABLE)) |
2969 | sbi->s_li_request = NULL; | ||
2970 | return 0; | 2979 | return 0; |
2971 | } | ||
2972 | |||
2973 | if (first_not_zeroed == ngroups) { | ||
2974 | sbi->s_li_request = NULL; | ||
2975 | return 0; | ||
2976 | } | ||
2977 | 2980 | ||
2978 | elr = ext4_li_request_new(sb, first_not_zeroed); | 2981 | elr = ext4_li_request_new(sb, first_not_zeroed); |
2979 | if (!elr) | 2982 | if (!elr) |
@@ -3166,6 +3169,12 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3166 | ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) | 3169 | ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0)) |
3167 | set_opt(sb, DELALLOC); | 3170 | set_opt(sb, DELALLOC); |
3168 | 3171 | ||
3172 | /* | ||
3173 | * set default s_li_wait_mult for lazyinit, for the case there is | ||
3174 | * no mount option specified. | ||
3175 | */ | ||
3176 | sbi->s_li_wait_mult = EXT4_DEF_LI_WAIT_MULT; | ||
3177 | |||
3169 | if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, | 3178 | if (!parse_options((char *) sbi->s_es->s_mount_opts, sb, |
3170 | &journal_devnum, &journal_ioprio, NULL, 0)) { | 3179 | &journal_devnum, &journal_ioprio, NULL, 0)) { |
3171 | ext4_msg(sb, KERN_WARNING, | 3180 | ext4_msg(sb, KERN_WARNING, |
@@ -3187,6 +3196,28 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3187 | "feature flags set on rev 0 fs, " | 3196 | "feature flags set on rev 0 fs, " |
3188 | "running e2fsck is recommended"); | 3197 | "running e2fsck is recommended"); |
3189 | 3198 | ||
3199 | if (IS_EXT2_SB(sb)) { | ||
3200 | if (ext2_feature_set_ok(sb)) | ||
3201 | ext4_msg(sb, KERN_INFO, "mounting ext2 file system " | ||
3202 | "using the ext4 subsystem"); | ||
3203 | else { | ||
3204 | ext4_msg(sb, KERN_ERR, "couldn't mount as ext2 due " | ||
3205 | "to feature incompatibilities"); | ||
3206 | goto failed_mount; | ||
3207 | } | ||
3208 | } | ||
3209 | |||
3210 | if (IS_EXT3_SB(sb)) { | ||
3211 | if (ext3_feature_set_ok(sb)) | ||
3212 | ext4_msg(sb, KERN_INFO, "mounting ext3 file system " | ||
3213 | "using the ext4 subsystem"); | ||
3214 | else { | ||
3215 | ext4_msg(sb, KERN_ERR, "couldn't mount as ext3 due " | ||
3216 | "to feature incompatibilities"); | ||
3217 | goto failed_mount; | ||
3218 | } | ||
3219 | } | ||
3220 | |||
3190 | /* | 3221 | /* |
3191 | * Check feature flags regardless of the revision level, since we | 3222 | * Check feature flags regardless of the revision level, since we |
3192 | * previously didn't change the revision level when setting the flags, | 3223 | * previously didn't change the revision level when setting the flags, |
@@ -3459,6 +3490,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3459 | EXT4_HAS_INCOMPAT_FEATURE(sb, | 3490 | EXT4_HAS_INCOMPAT_FEATURE(sb, |
3460 | EXT4_FEATURE_INCOMPAT_RECOVER)); | 3491 | EXT4_FEATURE_INCOMPAT_RECOVER)); |
3461 | 3492 | ||
3493 | if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) && | ||
3494 | !(sb->s_flags & MS_RDONLY)) | ||
3495 | if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) | ||
3496 | goto failed_mount3; | ||
3497 | |||
3462 | /* | 3498 | /* |
3463 | * The first inode we look at is the journal inode. Don't try | 3499 | * The first inode we look at is the journal inode. Don't try |
3464 | * root first: it may be modified in the journal! | 3500 | * root first: it may be modified in the journal! |
@@ -3474,7 +3510,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent) | |||
3474 | goto failed_mount_wq; | 3510 | goto failed_mount_wq; |
3475 | } else { | 3511 | } else { |
3476 | clear_opt(sb, DATA_FLAGS); | 3512 | clear_opt(sb, DATA_FLAGS); |
3477 | set_opt(sb, WRITEBACK_DATA); | ||
3478 | sbi->s_journal = NULL; | 3513 | sbi->s_journal = NULL; |
3479 | needs_recovery = 0; | 3514 | needs_recovery = 0; |
3480 | goto no_journal; | 3515 | goto no_journal; |
@@ -3707,6 +3742,8 @@ failed_mount3: | |||
3707 | percpu_counter_destroy(&sbi->s_freeinodes_counter); | 3742 | percpu_counter_destroy(&sbi->s_freeinodes_counter); |
3708 | percpu_counter_destroy(&sbi->s_dirs_counter); | 3743 | percpu_counter_destroy(&sbi->s_dirs_counter); |
3709 | percpu_counter_destroy(&sbi->s_dirtyblocks_counter); | 3744 | percpu_counter_destroy(&sbi->s_dirtyblocks_counter); |
3745 | if (sbi->s_mmp_tsk) | ||
3746 | kthread_stop(sbi->s_mmp_tsk); | ||
3710 | failed_mount2: | 3747 | failed_mount2: |
3711 | for (i = 0; i < db_count; i++) | 3748 | for (i = 0; i < db_count; i++) |
3712 | brelse(sbi->s_group_desc[i]); | 3749 | brelse(sbi->s_group_desc[i]); |
@@ -4242,7 +4279,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) | |||
4242 | int enable_quota = 0; | 4279 | int enable_quota = 0; |
4243 | ext4_group_t g; | 4280 | ext4_group_t g; |
4244 | unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; | 4281 | unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO; |
4245 | int err; | 4282 | int err = 0; |
4246 | #ifdef CONFIG_QUOTA | 4283 | #ifdef CONFIG_QUOTA |
4247 | int i; | 4284 | int i; |
4248 | #endif | 4285 | #endif |
@@ -4368,6 +4405,13 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data) | |||
4368 | goto restore_opts; | 4405 | goto restore_opts; |
4369 | if (!ext4_setup_super(sb, es, 0)) | 4406 | if (!ext4_setup_super(sb, es, 0)) |
4370 | sb->s_flags &= ~MS_RDONLY; | 4407 | sb->s_flags &= ~MS_RDONLY; |
4408 | if (EXT4_HAS_INCOMPAT_FEATURE(sb, | ||
4409 | EXT4_FEATURE_INCOMPAT_MMP)) | ||
4410 | if (ext4_multi_mount_protect(sb, | ||
4411 | le64_to_cpu(es->s_mmp_block))) { | ||
4412 | err = -EROFS; | ||
4413 | goto restore_opts; | ||
4414 | } | ||
4371 | enable_quota = 1; | 4415 | enable_quota = 1; |
4372 | } | 4416 | } |
4373 | } | 4417 | } |
@@ -4432,6 +4476,7 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
4432 | struct ext4_sb_info *sbi = EXT4_SB(sb); | 4476 | struct ext4_sb_info *sbi = EXT4_SB(sb); |
4433 | struct ext4_super_block *es = sbi->s_es; | 4477 | struct ext4_super_block *es = sbi->s_es; |
4434 | u64 fsid; | 4478 | u64 fsid; |
4479 | s64 bfree; | ||
4435 | 4480 | ||
4436 | if (test_opt(sb, MINIX_DF)) { | 4481 | if (test_opt(sb, MINIX_DF)) { |
4437 | sbi->s_overhead_last = 0; | 4482 | sbi->s_overhead_last = 0; |
@@ -4475,8 +4520,10 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf) | |||
4475 | buf->f_type = EXT4_SUPER_MAGIC; | 4520 | buf->f_type = EXT4_SUPER_MAGIC; |
4476 | buf->f_bsize = sb->s_blocksize; | 4521 | buf->f_bsize = sb->s_blocksize; |
4477 | buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last; | 4522 | buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last; |
4478 | buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) - | 4523 | bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) - |
4479 | percpu_counter_sum_positive(&sbi->s_dirtyblocks_counter); | 4524 | percpu_counter_sum_positive(&sbi->s_dirtyblocks_counter); |
4525 | /* prevent underflow in case that few free space is available */ | ||
4526 | buf->f_bfree = max_t(s64, bfree, 0); | ||
4480 | buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es); | 4527 | buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es); |
4481 | if (buf->f_bfree < ext4_r_blocks_count(es)) | 4528 | if (buf->f_bfree < ext4_r_blocks_count(es)) |
4482 | buf->f_bavail = 0; | 4529 | buf->f_bavail = 0; |
@@ -4652,6 +4699,9 @@ static int ext4_quota_off(struct super_block *sb, int type) | |||
4652 | if (test_opt(sb, DELALLOC)) | 4699 | if (test_opt(sb, DELALLOC)) |
4653 | sync_filesystem(sb); | 4700 | sync_filesystem(sb); |
4654 | 4701 | ||
4702 | if (!inode) | ||
4703 | goto out; | ||
4704 | |||
4655 | /* Update modification times of quota files when userspace can | 4705 | /* Update modification times of quota files when userspace can |
4656 | * start looking at them */ | 4706 | * start looking at them */ |
4657 | handle = ext4_journal_start(inode, 1); | 4707 | handle = ext4_journal_start(inode, 1); |
@@ -4772,14 +4822,6 @@ static struct dentry *ext4_mount(struct file_system_type *fs_type, int flags, | |||
4772 | } | 4822 | } |
4773 | 4823 | ||
4774 | #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) | 4824 | #if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) |
4775 | static struct file_system_type ext2_fs_type = { | ||
4776 | .owner = THIS_MODULE, | ||
4777 | .name = "ext2", | ||
4778 | .mount = ext4_mount, | ||
4779 | .kill_sb = kill_block_super, | ||
4780 | .fs_flags = FS_REQUIRES_DEV, | ||
4781 | }; | ||
4782 | |||
4783 | static inline void register_as_ext2(void) | 4825 | static inline void register_as_ext2(void) |
4784 | { | 4826 | { |
4785 | int err = register_filesystem(&ext2_fs_type); | 4827 | int err = register_filesystem(&ext2_fs_type); |
@@ -4792,10 +4834,22 @@ static inline void unregister_as_ext2(void) | |||
4792 | { | 4834 | { |
4793 | unregister_filesystem(&ext2_fs_type); | 4835 | unregister_filesystem(&ext2_fs_type); |
4794 | } | 4836 | } |
4837 | |||
4838 | static inline int ext2_feature_set_ok(struct super_block *sb) | ||
4839 | { | ||
4840 | if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT2_FEATURE_INCOMPAT_SUPP)) | ||
4841 | return 0; | ||
4842 | if (sb->s_flags & MS_RDONLY) | ||
4843 | return 1; | ||
4844 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT2_FEATURE_RO_COMPAT_SUPP)) | ||
4845 | return 0; | ||
4846 | return 1; | ||
4847 | } | ||
4795 | MODULE_ALIAS("ext2"); | 4848 | MODULE_ALIAS("ext2"); |
4796 | #else | 4849 | #else |
4797 | static inline void register_as_ext2(void) { } | 4850 | static inline void register_as_ext2(void) { } |
4798 | static inline void unregister_as_ext2(void) { } | 4851 | static inline void unregister_as_ext2(void) { } |
4852 | static inline int ext2_feature_set_ok(struct super_block *sb) { return 0; } | ||
4799 | #endif | 4853 | #endif |
4800 | 4854 | ||
4801 | #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) | 4855 | #if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23) |
@@ -4811,10 +4865,24 @@ static inline void unregister_as_ext3(void) | |||
4811 | { | 4865 | { |
4812 | unregister_filesystem(&ext3_fs_type); | 4866 | unregister_filesystem(&ext3_fs_type); |
4813 | } | 4867 | } |
4868 | |||
4869 | static inline int ext3_feature_set_ok(struct super_block *sb) | ||
4870 | { | ||
4871 | if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT3_FEATURE_INCOMPAT_SUPP)) | ||
4872 | return 0; | ||
4873 | if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) | ||
4874 | return 0; | ||
4875 | if (sb->s_flags & MS_RDONLY) | ||
4876 | return 1; | ||
4877 | if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT3_FEATURE_RO_COMPAT_SUPP)) | ||
4878 | return 0; | ||
4879 | return 1; | ||
4880 | } | ||
4814 | MODULE_ALIAS("ext3"); | 4881 | MODULE_ALIAS("ext3"); |
4815 | #else | 4882 | #else |
4816 | static inline void register_as_ext3(void) { } | 4883 | static inline void register_as_ext3(void) { } |
4817 | static inline void unregister_as_ext3(void) { } | 4884 | static inline void unregister_as_ext3(void) { } |
4885 | static inline int ext3_feature_set_ok(struct super_block *sb) { return 0; } | ||
4818 | #endif | 4886 | #endif |
4819 | 4887 | ||
4820 | static struct file_system_type ext4_fs_type = { | 4888 | static struct file_system_type ext4_fs_type = { |
@@ -4898,8 +4966,8 @@ static int __init ext4_init_fs(void) | |||
4898 | err = init_inodecache(); | 4966 | err = init_inodecache(); |
4899 | if (err) | 4967 | if (err) |
4900 | goto out1; | 4968 | goto out1; |
4901 | register_as_ext2(); | ||
4902 | register_as_ext3(); | 4969 | register_as_ext3(); |
4970 | register_as_ext2(); | ||
4903 | err = register_filesystem(&ext4_fs_type); | 4971 | err = register_filesystem(&ext4_fs_type); |
4904 | if (err) | 4972 | if (err) |
4905 | goto out; | 4973 | goto out; |
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index b545ca1c459c..c757adc97250 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c | |||
@@ -820,8 +820,8 @@ inserted: | |||
820 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) | 820 | if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) |
821 | goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; | 821 | goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; |
822 | 822 | ||
823 | block = ext4_new_meta_blocks(handle, inode, | 823 | block = ext4_new_meta_blocks(handle, inode, goal, 0, |
824 | goal, NULL, &error); | 824 | NULL, &error); |
825 | if (error) | 825 | if (error) |
826 | goto cleanup; | 826 | goto cleanup; |
827 | 827 | ||
diff --git a/fs/fat/namei_msdos.c b/fs/fat/namei_msdos.c index 3b222dafd15b..be15437c272e 100644 --- a/fs/fat/namei_msdos.c +++ b/fs/fat/namei_msdos.c | |||
@@ -326,6 +326,8 @@ static int msdos_rmdir(struct inode *dir, struct dentry *dentry) | |||
326 | struct fat_slot_info sinfo; | 326 | struct fat_slot_info sinfo; |
327 | int err; | 327 | int err; |
328 | 328 | ||
329 | dentry_unhash(dentry); | ||
330 | |||
329 | lock_super(sb); | 331 | lock_super(sb); |
330 | /* | 332 | /* |
331 | * Check whether the directory is not in use, then check | 333 | * Check whether the directory is not in use, then check |
@@ -457,6 +459,9 @@ static int do_msdos_rename(struct inode *old_dir, unsigned char *old_name, | |||
457 | old_inode = old_dentry->d_inode; | 459 | old_inode = old_dentry->d_inode; |
458 | new_inode = new_dentry->d_inode; | 460 | new_inode = new_dentry->d_inode; |
459 | 461 | ||
462 | if (new_inode && S_ISDIR(new_inode->i_mode)) | ||
463 | dentry_unhash(new_dentry); | ||
464 | |||
460 | err = fat_scan(old_dir, old_name, &old_sinfo); | 465 | err = fat_scan(old_dir, old_name, &old_sinfo); |
461 | if (err) { | 466 | if (err) { |
462 | err = -EIO; | 467 | err = -EIO; |
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c index 20b4ea53fdc4..c61a6789f36c 100644 --- a/fs/fat/namei_vfat.c +++ b/fs/fat/namei_vfat.c | |||
@@ -824,6 +824,8 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry) | |||
824 | struct fat_slot_info sinfo; | 824 | struct fat_slot_info sinfo; |
825 | int err; | 825 | int err; |
826 | 826 | ||
827 | dentry_unhash(dentry); | ||
828 | |||
827 | lock_super(sb); | 829 | lock_super(sb); |
828 | 830 | ||
829 | err = fat_dir_empty(inode); | 831 | err = fat_dir_empty(inode); |
@@ -931,6 +933,9 @@ static int vfat_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
931 | int err, is_dir, update_dotdot, corrupt = 0; | 933 | int err, is_dir, update_dotdot, corrupt = 0; |
932 | struct super_block *sb = old_dir->i_sb; | 934 | struct super_block *sb = old_dir->i_sb; |
933 | 935 | ||
936 | if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) | ||
937 | dentry_unhash(new_dentry); | ||
938 | |||
934 | old_sinfo.bh = sinfo.bh = dotdot_bh = NULL; | 939 | old_sinfo.bh = sinfo.bh = dotdot_bh = NULL; |
935 | old_inode = old_dentry->d_inode; | 940 | old_inode = old_dentry->d_inode; |
936 | new_inode = new_dentry->d_inode; | 941 | new_inode = new_dentry->d_inode; |
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index b32eb29a4e6f..0d0e3faddcfa 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c | |||
@@ -667,6 +667,8 @@ static int fuse_rmdir(struct inode *dir, struct dentry *entry) | |||
667 | if (IS_ERR(req)) | 667 | if (IS_ERR(req)) |
668 | return PTR_ERR(req); | 668 | return PTR_ERR(req); |
669 | 669 | ||
670 | dentry_unhash(entry); | ||
671 | |||
670 | req->in.h.opcode = FUSE_RMDIR; | 672 | req->in.h.opcode = FUSE_RMDIR; |
671 | req->in.h.nodeid = get_node_id(dir); | 673 | req->in.h.nodeid = get_node_id(dir); |
672 | req->in.numargs = 1; | 674 | req->in.numargs = 1; |
@@ -691,6 +693,10 @@ static int fuse_rename(struct inode *olddir, struct dentry *oldent, | |||
691 | struct fuse_rename_in inarg; | 693 | struct fuse_rename_in inarg; |
692 | struct fuse_conn *fc = get_fuse_conn(olddir); | 694 | struct fuse_conn *fc = get_fuse_conn(olddir); |
693 | struct fuse_req *req = fuse_get_req(fc); | 695 | struct fuse_req *req = fuse_get_req(fc); |
696 | |||
697 | if (newent->d_inode && S_ISDIR(newent->d_inode->i_mode)) | ||
698 | dentry_unhash(newent); | ||
699 | |||
694 | if (IS_ERR(req)) | 700 | if (IS_ERR(req)) |
695 | return PTR_ERR(req); | 701 | return PTR_ERR(req); |
696 | 702 | ||
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c index cfa327d33194..c2b34cd2abe0 100644 --- a/fs/gfs2/main.c +++ b/fs/gfs2/main.c | |||
@@ -146,7 +146,7 @@ static int __init init_gfs2_fs(void) | |||
146 | 146 | ||
147 | gfs2_register_debugfs(); | 147 | gfs2_register_debugfs(); |
148 | 148 | ||
149 | printk("GFS2 (built %s %s) installed\n", __DATE__, __TIME__); | 149 | printk("GFS2 installed\n"); |
150 | 150 | ||
151 | return 0; | 151 | return 0; |
152 | 152 | ||
diff --git a/fs/hfs/dir.c b/fs/hfs/dir.c index b4d70b13be92..1cb70cdba2c1 100644 --- a/fs/hfs/dir.c +++ b/fs/hfs/dir.c | |||
@@ -253,6 +253,9 @@ static int hfs_remove(struct inode *dir, struct dentry *dentry) | |||
253 | struct inode *inode = dentry->d_inode; | 253 | struct inode *inode = dentry->d_inode; |
254 | int res; | 254 | int res; |
255 | 255 | ||
256 | if (S_ISDIR(inode->i_mode)) | ||
257 | dentry_unhash(dentry); | ||
258 | |||
256 | if (S_ISDIR(inode->i_mode) && inode->i_size != 2) | 259 | if (S_ISDIR(inode->i_mode) && inode->i_size != 2) |
257 | return -ENOTEMPTY; | 260 | return -ENOTEMPTY; |
258 | res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); | 261 | res = hfs_cat_delete(inode->i_ino, dir, &dentry->d_name); |
@@ -283,6 +286,9 @@ static int hfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
283 | 286 | ||
284 | /* Unlink destination if it already exists */ | 287 | /* Unlink destination if it already exists */ |
285 | if (new_dentry->d_inode) { | 288 | if (new_dentry->d_inode) { |
289 | if (S_ISDIR(new_dentry->d_inode->i_mode)) | ||
290 | dentry_unhash(new_dentry); | ||
291 | |||
286 | res = hfs_remove(new_dir, new_dentry); | 292 | res = hfs_remove(new_dir, new_dentry); |
287 | if (res) | 293 | if (res) |
288 | return res; | 294 | return res; |
diff --git a/fs/hfsplus/dir.c b/fs/hfsplus/dir.c index 4df5059c25da..b28835091dd0 100644 --- a/fs/hfsplus/dir.c +++ b/fs/hfsplus/dir.c | |||
@@ -370,6 +370,8 @@ static int hfsplus_rmdir(struct inode *dir, struct dentry *dentry) | |||
370 | struct inode *inode = dentry->d_inode; | 370 | struct inode *inode = dentry->d_inode; |
371 | int res; | 371 | int res; |
372 | 372 | ||
373 | dentry_unhash(dentry); | ||
374 | |||
373 | if (inode->i_size != 2) | 375 | if (inode->i_size != 2) |
374 | return -ENOTEMPTY; | 376 | return -ENOTEMPTY; |
375 | 377 | ||
@@ -467,10 +469,12 @@ static int hfsplus_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
467 | 469 | ||
468 | /* Unlink destination if it already exists */ | 470 | /* Unlink destination if it already exists */ |
469 | if (new_dentry->d_inode) { | 471 | if (new_dentry->d_inode) { |
470 | if (S_ISDIR(new_dentry->d_inode->i_mode)) | 472 | if (S_ISDIR(new_dentry->d_inode->i_mode)) { |
473 | dentry_unhash(new_dentry); | ||
471 | res = hfsplus_rmdir(new_dir, new_dentry); | 474 | res = hfsplus_rmdir(new_dir, new_dentry); |
472 | else | 475 | } else { |
473 | res = hfsplus_unlink(new_dir, new_dentry); | 476 | res = hfsplus_unlink(new_dir, new_dentry); |
477 | } | ||
474 | if (res) | 478 | if (res) |
475 | return res; | 479 | return res; |
476 | } | 480 | } |
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c index 2638c834ed28..e6816b9e6903 100644 --- a/fs/hostfs/hostfs_kern.c +++ b/fs/hostfs/hostfs_kern.c | |||
@@ -683,6 +683,8 @@ int hostfs_rmdir(struct inode *ino, struct dentry *dentry) | |||
683 | char *file; | 683 | char *file; |
684 | int err; | 684 | int err; |
685 | 685 | ||
686 | dentry_unhash(dentry); | ||
687 | |||
686 | if ((file = dentry_name(dentry)) == NULL) | 688 | if ((file = dentry_name(dentry)) == NULL) |
687 | return -ENOMEM; | 689 | return -ENOMEM; |
688 | err = do_rmdir(file); | 690 | err = do_rmdir(file); |
@@ -736,6 +738,9 @@ int hostfs_rename(struct inode *from_ino, struct dentry *from, | |||
736 | char *from_name, *to_name; | 738 | char *from_name, *to_name; |
737 | int err; | 739 | int err; |
738 | 740 | ||
741 | if (to->d_inode && S_ISDIR(to->d_inode->i_mode)) | ||
742 | dentry_unhash(to); | ||
743 | |||
739 | if ((from_name = dentry_name(from)) == NULL) | 744 | if ((from_name = dentry_name(from)) == NULL) |
740 | return -ENOMEM; | 745 | return -ENOMEM; |
741 | if ((to_name = dentry_name(to)) == NULL) { | 746 | if ((to_name = dentry_name(to)) == NULL) { |
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c index 1f05839c27a7..ff0ce21c0867 100644 --- a/fs/hpfs/namei.c +++ b/fs/hpfs/namei.c | |||
@@ -395,7 +395,6 @@ again: | |||
395 | 395 | ||
396 | dentry_unhash(dentry); | 396 | dentry_unhash(dentry); |
397 | if (!d_unhashed(dentry)) { | 397 | if (!d_unhashed(dentry)) { |
398 | dput(dentry); | ||
399 | hpfs_unlock(dir->i_sb); | 398 | hpfs_unlock(dir->i_sb); |
400 | return -ENOSPC; | 399 | return -ENOSPC; |
401 | } | 400 | } |
@@ -403,7 +402,6 @@ again: | |||
403 | !S_ISREG(inode->i_mode) || | 402 | !S_ISREG(inode->i_mode) || |
404 | get_write_access(inode)) { | 403 | get_write_access(inode)) { |
405 | d_rehash(dentry); | 404 | d_rehash(dentry); |
406 | dput(dentry); | ||
407 | } else { | 405 | } else { |
408 | struct iattr newattrs; | 406 | struct iattr newattrs; |
409 | /*printk("HPFS: truncating file before delete.\n");*/ | 407 | /*printk("HPFS: truncating file before delete.\n");*/ |
@@ -411,7 +409,6 @@ again: | |||
411 | newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME; | 409 | newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME; |
412 | err = notify_change(dentry, &newattrs); | 410 | err = notify_change(dentry, &newattrs); |
413 | put_write_access(inode); | 411 | put_write_access(inode); |
414 | dput(dentry); | ||
415 | if (!err) | 412 | if (!err) |
416 | goto again; | 413 | goto again; |
417 | } | 414 | } |
@@ -442,6 +439,8 @@ static int hpfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
442 | int err; | 439 | int err; |
443 | int r; | 440 | int r; |
444 | 441 | ||
442 | dentry_unhash(dentry); | ||
443 | |||
445 | hpfs_adjust_length(name, &len); | 444 | hpfs_adjust_length(name, &len); |
446 | hpfs_lock(dir->i_sb); | 445 | hpfs_lock(dir->i_sb); |
447 | err = -ENOENT; | 446 | err = -ENOENT; |
@@ -535,6 +534,10 @@ static int hpfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
535 | struct buffer_head *bh; | 534 | struct buffer_head *bh; |
536 | struct fnode *fnode; | 535 | struct fnode *fnode; |
537 | int err; | 536 | int err; |
537 | |||
538 | if (new_inode && S_ISDIR(new_inode->i_mode)) | ||
539 | dentry_unhash(new_dentry); | ||
540 | |||
538 | if ((err = hpfs_chk_name(new_name, &new_len))) return err; | 541 | if ((err = hpfs_chk_name(new_name, &new_len))) return err; |
539 | err = 0; | 542 | err = 0; |
540 | hpfs_adjust_length(old_name, &old_len); | 543 | hpfs_adjust_length(old_name, &old_len); |
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index e7a035781b7d..7aafeb8fa300 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c | |||
@@ -921,7 +921,8 @@ static int can_do_hugetlb_shm(void) | |||
921 | return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group); | 921 | return capable(CAP_IPC_LOCK) || in_group_p(sysctl_hugetlb_shm_group); |
922 | } | 922 | } |
923 | 923 | ||
924 | struct file *hugetlb_file_setup(const char *name, size_t size, int acctflag, | 924 | struct file *hugetlb_file_setup(const char *name, size_t size, |
925 | vm_flags_t acctflag, | ||
925 | struct user_struct **user, int creat_flags) | 926 | struct user_struct **user, int creat_flags) |
926 | { | 927 | { |
927 | int error = -ENOMEM; | 928 | int error = -ENOMEM; |
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c index 29148a81c783..7f21cf3aaf92 100644 --- a/fs/jbd2/commit.c +++ b/fs/jbd2/commit.c | |||
@@ -219,7 +219,6 @@ static int journal_submit_data_buffers(journal_t *journal, | |||
219 | ret = err; | 219 | ret = err; |
220 | spin_lock(&journal->j_list_lock); | 220 | spin_lock(&journal->j_list_lock); |
221 | J_ASSERT(jinode->i_transaction == commit_transaction); | 221 | J_ASSERT(jinode->i_transaction == commit_transaction); |
222 | commit_transaction->t_flushed_data_blocks = 1; | ||
223 | clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags); | 222 | clear_bit(__JI_COMMIT_RUNNING, &jinode->i_flags); |
224 | smp_mb__after_clear_bit(); | 223 | smp_mb__after_clear_bit(); |
225 | wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING); | 224 | wake_up_bit(&jinode->i_flags, __JI_COMMIT_RUNNING); |
@@ -672,12 +671,16 @@ start_journal_io: | |||
672 | err = 0; | 671 | err = 0; |
673 | } | 672 | } |
674 | 673 | ||
674 | write_lock(&journal->j_state_lock); | ||
675 | J_ASSERT(commit_transaction->t_state == T_COMMIT); | ||
676 | commit_transaction->t_state = T_COMMIT_DFLUSH; | ||
677 | write_unlock(&journal->j_state_lock); | ||
675 | /* | 678 | /* |
676 | * If the journal is not located on the file system device, | 679 | * If the journal is not located on the file system device, |
677 | * then we must flush the file system device before we issue | 680 | * then we must flush the file system device before we issue |
678 | * the commit record | 681 | * the commit record |
679 | */ | 682 | */ |
680 | if (commit_transaction->t_flushed_data_blocks && | 683 | if (commit_transaction->t_need_data_flush && |
681 | (journal->j_fs_dev != journal->j_dev) && | 684 | (journal->j_fs_dev != journal->j_dev) && |
682 | (journal->j_flags & JBD2_BARRIER)) | 685 | (journal->j_flags & JBD2_BARRIER)) |
683 | blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL); | 686 | blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL); |
@@ -754,8 +757,13 @@ wait_for_iobuf: | |||
754 | required. */ | 757 | required. */ |
755 | JBUFFER_TRACE(jh, "file as BJ_Forget"); | 758 | JBUFFER_TRACE(jh, "file as BJ_Forget"); |
756 | jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget); | 759 | jbd2_journal_file_buffer(jh, commit_transaction, BJ_Forget); |
757 | /* Wake up any transactions which were waiting for this | 760 | /* |
758 | IO to complete */ | 761 | * Wake up any transactions which were waiting for this IO to |
762 | * complete. The barrier must be here so that changes by | ||
763 | * jbd2_journal_file_buffer() take effect before wake_up_bit() | ||
764 | * does the waitqueue check. | ||
765 | */ | ||
766 | smp_mb(); | ||
759 | wake_up_bit(&bh->b_state, BH_Unshadow); | 767 | wake_up_bit(&bh->b_state, BH_Unshadow); |
760 | JBUFFER_TRACE(jh, "brelse shadowed buffer"); | 768 | JBUFFER_TRACE(jh, "brelse shadowed buffer"); |
761 | __brelse(bh); | 769 | __brelse(bh); |
@@ -794,6 +802,10 @@ wait_for_iobuf: | |||
794 | jbd2_journal_abort(journal, err); | 802 | jbd2_journal_abort(journal, err); |
795 | 803 | ||
796 | jbd_debug(3, "JBD: commit phase 5\n"); | 804 | jbd_debug(3, "JBD: commit phase 5\n"); |
805 | write_lock(&journal->j_state_lock); | ||
806 | J_ASSERT(commit_transaction->t_state == T_COMMIT_DFLUSH); | ||
807 | commit_transaction->t_state = T_COMMIT_JFLUSH; | ||
808 | write_unlock(&journal->j_state_lock); | ||
797 | 809 | ||
798 | if (!JBD2_HAS_INCOMPAT_FEATURE(journal, | 810 | if (!JBD2_HAS_INCOMPAT_FEATURE(journal, |
799 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { | 811 | JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) { |
@@ -949,7 +961,7 @@ restart_loop: | |||
949 | 961 | ||
950 | jbd_debug(3, "JBD: commit phase 7\n"); | 962 | jbd_debug(3, "JBD: commit phase 7\n"); |
951 | 963 | ||
952 | J_ASSERT(commit_transaction->t_state == T_COMMIT); | 964 | J_ASSERT(commit_transaction->t_state == T_COMMIT_JFLUSH); |
953 | 965 | ||
954 | commit_transaction->t_start = jiffies; | 966 | commit_transaction->t_start = jiffies; |
955 | stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging, | 967 | stats.run.rs_logging = jbd2_time_diff(stats.run.rs_logging, |
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c index e0ec3db1c395..9a7826990304 100644 --- a/fs/jbd2/journal.c +++ b/fs/jbd2/journal.c | |||
@@ -479,9 +479,12 @@ int __jbd2_log_space_left(journal_t *journal) | |||
479 | int __jbd2_log_start_commit(journal_t *journal, tid_t target) | 479 | int __jbd2_log_start_commit(journal_t *journal, tid_t target) |
480 | { | 480 | { |
481 | /* | 481 | /* |
482 | * Are we already doing a recent enough commit? | 482 | * The only transaction we can possibly wait upon is the |
483 | * currently running transaction (if it exists). Otherwise, | ||
484 | * the target tid must be an old one. | ||
483 | */ | 485 | */ |
484 | if (!tid_geq(journal->j_commit_request, target)) { | 486 | if (journal->j_running_transaction && |
487 | journal->j_running_transaction->t_tid == target) { | ||
485 | /* | 488 | /* |
486 | * We want a new commit: OK, mark the request and wakeup the | 489 | * We want a new commit: OK, mark the request and wakeup the |
487 | * commit thread. We do _not_ do the commit ourselves. | 490 | * commit thread. We do _not_ do the commit ourselves. |
@@ -493,7 +496,15 @@ int __jbd2_log_start_commit(journal_t *journal, tid_t target) | |||
493 | journal->j_commit_sequence); | 496 | journal->j_commit_sequence); |
494 | wake_up(&journal->j_wait_commit); | 497 | wake_up(&journal->j_wait_commit); |
495 | return 1; | 498 | return 1; |
496 | } | 499 | } else if (!tid_geq(journal->j_commit_request, target)) |
500 | /* This should never happen, but if it does, preserve | ||
501 | the evidence before kjournald goes into a loop and | ||
502 | increments j_commit_sequence beyond all recognition. */ | ||
503 | WARN_ONCE(1, "jbd: bad log_start_commit: %u %u %u %u\n", | ||
504 | journal->j_commit_request, | ||
505 | journal->j_commit_sequence, | ||
506 | target, journal->j_running_transaction ? | ||
507 | journal->j_running_transaction->t_tid : 0); | ||
497 | return 0; | 508 | return 0; |
498 | } | 509 | } |
499 | 510 | ||
@@ -577,6 +588,47 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *ptid) | |||
577 | } | 588 | } |
578 | 589 | ||
579 | /* | 590 | /* |
591 | * Return 1 if a given transaction has not yet sent barrier request | ||
592 | * connected with a transaction commit. If 0 is returned, transaction | ||
593 | * may or may not have sent the barrier. Used to avoid sending barrier | ||
594 | * twice in common cases. | ||
595 | */ | ||
596 | int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid) | ||
597 | { | ||
598 | int ret = 0; | ||
599 | transaction_t *commit_trans; | ||
600 | |||
601 | if (!(journal->j_flags & JBD2_BARRIER)) | ||
602 | return 0; | ||
603 | read_lock(&journal->j_state_lock); | ||
604 | /* Transaction already committed? */ | ||
605 | if (tid_geq(journal->j_commit_sequence, tid)) | ||
606 | goto out; | ||
607 | commit_trans = journal->j_committing_transaction; | ||
608 | if (!commit_trans || commit_trans->t_tid != tid) { | ||
609 | ret = 1; | ||
610 | goto out; | ||
611 | } | ||
612 | /* | ||
613 | * Transaction is being committed and we already proceeded to | ||
614 | * submitting a flush to fs partition? | ||
615 | */ | ||
616 | if (journal->j_fs_dev != journal->j_dev) { | ||
617 | if (!commit_trans->t_need_data_flush || | ||
618 | commit_trans->t_state >= T_COMMIT_DFLUSH) | ||
619 | goto out; | ||
620 | } else { | ||
621 | if (commit_trans->t_state >= T_COMMIT_JFLUSH) | ||
622 | goto out; | ||
623 | } | ||
624 | ret = 1; | ||
625 | out: | ||
626 | read_unlock(&journal->j_state_lock); | ||
627 | return ret; | ||
628 | } | ||
629 | EXPORT_SYMBOL(jbd2_trans_will_send_data_barrier); | ||
630 | |||
631 | /* | ||
580 | * Wait for a specified commit to complete. | 632 | * Wait for a specified commit to complete. |
581 | * The caller may not hold the journal lock. | 633 | * The caller may not hold the journal lock. |
582 | */ | 634 | */ |
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 05fa77a23711..3eec82d32fd4 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c | |||
@@ -82,7 +82,7 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction) | |||
82 | */ | 82 | */ |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * Update transiaction's maximum wait time, if debugging is enabled. | 85 | * Update transaction's maximum wait time, if debugging is enabled. |
86 | * | 86 | * |
87 | * In order for t_max_wait to be reliable, it must be protected by a | 87 | * In order for t_max_wait to be reliable, it must be protected by a |
88 | * lock. But doing so will mean that start_this_handle() can not be | 88 | * lock. But doing so will mean that start_this_handle() can not be |
@@ -91,11 +91,10 @@ jbd2_get_transaction(journal_t *journal, transaction_t *transaction) | |||
91 | * means that maximum wait time reported by the jbd2_run_stats | 91 | * means that maximum wait time reported by the jbd2_run_stats |
92 | * tracepoint will always be zero. | 92 | * tracepoint will always be zero. |
93 | */ | 93 | */ |
94 | static inline void update_t_max_wait(transaction_t *transaction) | 94 | static inline void update_t_max_wait(transaction_t *transaction, |
95 | unsigned long ts) | ||
95 | { | 96 | { |
96 | #ifdef CONFIG_JBD2_DEBUG | 97 | #ifdef CONFIG_JBD2_DEBUG |
97 | unsigned long ts = jiffies; | ||
98 | |||
99 | if (jbd2_journal_enable_debug && | 98 | if (jbd2_journal_enable_debug && |
100 | time_after(transaction->t_start, ts)) { | 99 | time_after(transaction->t_start, ts)) { |
101 | ts = jbd2_time_diff(ts, transaction->t_start); | 100 | ts = jbd2_time_diff(ts, transaction->t_start); |
@@ -121,6 +120,7 @@ static int start_this_handle(journal_t *journal, handle_t *handle, | |||
121 | tid_t tid; | 120 | tid_t tid; |
122 | int needed, need_to_start; | 121 | int needed, need_to_start; |
123 | int nblocks = handle->h_buffer_credits; | 122 | int nblocks = handle->h_buffer_credits; |
123 | unsigned long ts = jiffies; | ||
124 | 124 | ||
125 | if (nblocks > journal->j_max_transaction_buffers) { | 125 | if (nblocks > journal->j_max_transaction_buffers) { |
126 | printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", | 126 | printk(KERN_ERR "JBD: %s wants too many credits (%d > %d)\n", |
@@ -271,7 +271,7 @@ repeat: | |||
271 | /* OK, account for the buffers that this operation expects to | 271 | /* OK, account for the buffers that this operation expects to |
272 | * use and add the handle to the running transaction. | 272 | * use and add the handle to the running transaction. |
273 | */ | 273 | */ |
274 | update_t_max_wait(transaction); | 274 | update_t_max_wait(transaction, ts); |
275 | handle->h_transaction = transaction; | 275 | handle->h_transaction = transaction; |
276 | atomic_inc(&transaction->t_updates); | 276 | atomic_inc(&transaction->t_updates); |
277 | atomic_inc(&transaction->t_handle_count); | 277 | atomic_inc(&transaction->t_handle_count); |
@@ -316,7 +316,8 @@ static handle_t *new_handle(int nblocks) | |||
316 | * This function is visible to journal users (like ext3fs), so is not | 316 | * This function is visible to journal users (like ext3fs), so is not |
317 | * called with the journal already locked. | 317 | * called with the journal already locked. |
318 | * | 318 | * |
319 | * Return a pointer to a newly allocated handle, or NULL on failure | 319 | * Return a pointer to a newly allocated handle, or an ERR_PTR() value |
320 | * on failure. | ||
320 | */ | 321 | */ |
321 | handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask) | 322 | handle_t *jbd2__journal_start(journal_t *journal, int nblocks, int gfp_mask) |
322 | { | 323 | { |
@@ -921,8 +922,8 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) | |||
921 | */ | 922 | */ |
922 | JBUFFER_TRACE(jh, "cancelling revoke"); | 923 | JBUFFER_TRACE(jh, "cancelling revoke"); |
923 | jbd2_journal_cancel_revoke(handle, jh); | 924 | jbd2_journal_cancel_revoke(handle, jh); |
924 | jbd2_journal_put_journal_head(jh); | ||
925 | out: | 925 | out: |
926 | jbd2_journal_put_journal_head(jh); | ||
926 | return err; | 927 | return err; |
927 | } | 928 | } |
928 | 929 | ||
@@ -2147,6 +2148,13 @@ int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode) | |||
2147 | jinode->i_next_transaction == transaction) | 2148 | jinode->i_next_transaction == transaction) |
2148 | goto done; | 2149 | goto done; |
2149 | 2150 | ||
2151 | /* | ||
2152 | * We only ever set this variable to 1 so the test is safe. Since | ||
2153 | * t_need_data_flush is likely to be set, we do the test to save some | ||
2154 | * cacheline bouncing | ||
2155 | */ | ||
2156 | if (!transaction->t_need_data_flush) | ||
2157 | transaction->t_need_data_flush = 1; | ||
2150 | /* On some different transaction's list - should be | 2158 | /* On some different transaction's list - should be |
2151 | * the committing one */ | 2159 | * the committing one */ |
2152 | if (jinode->i_transaction) { | 2160 | if (jinode->i_transaction) { |
diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c index 82faddd1f321..05f73328b28b 100644 --- a/fs/jffs2/dir.c +++ b/fs/jffs2/dir.c | |||
@@ -609,6 +609,8 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry) | |||
609 | int ret; | 609 | int ret; |
610 | uint32_t now = get_seconds(); | 610 | uint32_t now = get_seconds(); |
611 | 611 | ||
612 | dentry_unhash(dentry); | ||
613 | |||
612 | for (fd = f->dents ; fd; fd = fd->next) { | 614 | for (fd = f->dents ; fd; fd = fd->next) { |
613 | if (fd->ino) | 615 | if (fd->ino) |
614 | return -ENOTEMPTY; | 616 | return -ENOTEMPTY; |
@@ -784,6 +786,9 @@ static int jffs2_rename (struct inode *old_dir_i, struct dentry *old_dentry, | |||
784 | uint8_t type; | 786 | uint8_t type; |
785 | uint32_t now; | 787 | uint32_t now; |
786 | 788 | ||
789 | if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) | ||
790 | dentry_unhash(new_dentry); | ||
791 | |||
787 | /* The VFS will check for us and prevent trying to rename a | 792 | /* The VFS will check for us and prevent trying to rename a |
788 | * file over a directory and vice versa, but if it's a directory, | 793 | * file over a directory and vice versa, but if it's a directory, |
789 | * the VFS can't check whether the victim is empty. The filesystem | 794 | * the VFS can't check whether the victim is empty. The filesystem |
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c index eaaf2b511e89..865df16a6cf3 100644 --- a/fs/jfs/namei.c +++ b/fs/jfs/namei.c | |||
@@ -360,6 +360,8 @@ static int jfs_rmdir(struct inode *dip, struct dentry *dentry) | |||
360 | 360 | ||
361 | jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name); | 361 | jfs_info("jfs_rmdir: dip:0x%p name:%s", dip, dentry->d_name.name); |
362 | 362 | ||
363 | dentry_unhash(dentry); | ||
364 | |||
363 | /* Init inode for quota operations. */ | 365 | /* Init inode for quota operations. */ |
364 | dquot_initialize(dip); | 366 | dquot_initialize(dip); |
365 | dquot_initialize(ip); | 367 | dquot_initialize(ip); |
@@ -1095,6 +1097,9 @@ static int jfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1095 | jfs_info("jfs_rename: %s %s", old_dentry->d_name.name, | 1097 | jfs_info("jfs_rename: %s %s", old_dentry->d_name.name, |
1096 | new_dentry->d_name.name); | 1098 | new_dentry->d_name.name); |
1097 | 1099 | ||
1100 | if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) | ||
1101 | dentry_unhash(new_dentry); | ||
1102 | |||
1098 | dquot_initialize(old_dir); | 1103 | dquot_initialize(old_dir); |
1099 | dquot_initialize(new_dir); | 1104 | dquot_initialize(new_dir); |
1100 | 1105 | ||
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c index 9ed89d1663f8..f34c9cde9e94 100644 --- a/fs/logfs/dir.c +++ b/fs/logfs/dir.c | |||
@@ -273,6 +273,8 @@ static int logfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
273 | { | 273 | { |
274 | struct inode *inode = dentry->d_inode; | 274 | struct inode *inode = dentry->d_inode; |
275 | 275 | ||
276 | dentry_unhash(dentry); | ||
277 | |||
276 | if (!logfs_empty_dir(inode)) | 278 | if (!logfs_empty_dir(inode)) |
277 | return -ENOTEMPTY; | 279 | return -ENOTEMPTY; |
278 | 280 | ||
@@ -622,6 +624,9 @@ static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry, | |||
622 | loff_t pos; | 624 | loff_t pos; |
623 | int err; | 625 | int err; |
624 | 626 | ||
627 | if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) | ||
628 | dentry_unhash(new_dentry); | ||
629 | |||
625 | /* 1. locate source dd */ | 630 | /* 1. locate source dd */ |
626 | err = logfs_get_dd(old_dir, old_dentry, &dd, &pos); | 631 | err = logfs_get_dd(old_dir, old_dentry, &dd, &pos); |
627 | if (err) | 632 | if (err) |
diff --git a/fs/minix/namei.c b/fs/minix/namei.c index 6e6777f1b4b2..f60aed8db9c4 100644 --- a/fs/minix/namei.c +++ b/fs/minix/namei.c | |||
@@ -168,6 +168,8 @@ static int minix_rmdir(struct inode * dir, struct dentry *dentry) | |||
168 | struct inode * inode = dentry->d_inode; | 168 | struct inode * inode = dentry->d_inode; |
169 | int err = -ENOTEMPTY; | 169 | int err = -ENOTEMPTY; |
170 | 170 | ||
171 | dentry_unhash(dentry); | ||
172 | |||
171 | if (minix_empty_dir(inode)) { | 173 | if (minix_empty_dir(inode)) { |
172 | err = minix_unlink(dir, dentry); | 174 | err = minix_unlink(dir, dentry); |
173 | if (!err) { | 175 | if (!err) { |
@@ -190,6 +192,9 @@ static int minix_rename(struct inode * old_dir, struct dentry *old_dentry, | |||
190 | struct minix_dir_entry * old_de; | 192 | struct minix_dir_entry * old_de; |
191 | int err = -ENOENT; | 193 | int err = -ENOENT; |
192 | 194 | ||
195 | if (new_inode && S_ISDIR(new_inode->i_mode)) | ||
196 | dentry_unhash(new_dentry); | ||
197 | |||
193 | old_de = minix_find_entry(old_dentry, &old_page); | 198 | old_de = minix_find_entry(old_dentry, &old_page); |
194 | if (!old_de) | 199 | if (!old_de) |
195 | goto out; | 200 | goto out; |
diff --git a/fs/mpage.c b/fs/mpage.c index 0afc809e46e0..fdfae9fa98cd 100644 --- a/fs/mpage.c +++ b/fs/mpage.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/writeback.h> | 27 | #include <linux/writeback.h> |
28 | #include <linux/backing-dev.h> | 28 | #include <linux/backing-dev.h> |
29 | #include <linux/pagevec.h> | 29 | #include <linux/pagevec.h> |
30 | #include <linux/cleancache.h> | ||
30 | 31 | ||
31 | /* | 32 | /* |
32 | * I/O completion handler for multipage BIOs. | 33 | * I/O completion handler for multipage BIOs. |
@@ -271,6 +272,12 @@ do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages, | |||
271 | SetPageMappedToDisk(page); | 272 | SetPageMappedToDisk(page); |
272 | } | 273 | } |
273 | 274 | ||
275 | if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) && | ||
276 | cleancache_get_page(page) == 0) { | ||
277 | SetPageUptodate(page); | ||
278 | goto confused; | ||
279 | } | ||
280 | |||
274 | /* | 281 | /* |
275 | * This page will go to BIO. Do we need to send this BIO off first? | 282 | * This page will go to BIO. Do we need to send this BIO off first? |
276 | */ | 283 | */ |
diff --git a/fs/namei.c b/fs/namei.c index 6ff858c049c0..2358b326b221 100644 --- a/fs/namei.c +++ b/fs/namei.c | |||
@@ -391,79 +391,28 @@ void path_put(struct path *path) | |||
391 | } | 391 | } |
392 | EXPORT_SYMBOL(path_put); | 392 | EXPORT_SYMBOL(path_put); |
393 | 393 | ||
394 | /** | 394 | /* |
395 | * nameidata_drop_rcu - drop this nameidata out of rcu-walk | ||
396 | * @nd: nameidata pathwalk data to drop | ||
397 | * Returns: 0 on success, -ECHILD on failure | ||
398 | * | ||
399 | * Path walking has 2 modes, rcu-walk and ref-walk (see | 395 | * Path walking has 2 modes, rcu-walk and ref-walk (see |
400 | * Documentation/filesystems/path-lookup.txt). __drop_rcu* functions attempt | 396 | * Documentation/filesystems/path-lookup.txt). In situations when we can't |
401 | * to drop out of rcu-walk mode and take normal reference counts on dentries | 397 | * continue in RCU mode, we attempt to drop out of rcu-walk mode and grab |
402 | * and vfsmounts to transition to rcu-walk mode. __drop_rcu* functions take | 398 | * normal reference counts on dentries and vfsmounts to transition to rcu-walk |
403 | * refcounts at the last known good point before rcu-walk got stuck, so | 399 | * mode. Refcounts are grabbed at the last known good point before rcu-walk |
404 | * ref-walk may continue from there. If this is not successful (eg. a seqcount | 400 | * got stuck, so ref-walk may continue from there. If this is not successful |
405 | * has changed), then failure is returned and path walk restarts from the | 401 | * (eg. a seqcount has changed), then failure is returned and it's up to caller |
406 | * beginning in ref-walk mode. | 402 | * to restart the path walk from the beginning in ref-walk mode. |
407 | * | ||
408 | * nameidata_drop_rcu attempts to drop the current nd->path and nd->root into | ||
409 | * ref-walk. Must be called from rcu-walk context. | ||
410 | */ | 403 | */ |
411 | static int nameidata_drop_rcu(struct nameidata *nd) | ||
412 | { | ||
413 | struct fs_struct *fs = current->fs; | ||
414 | struct dentry *dentry = nd->path.dentry; | ||
415 | int want_root = 0; | ||
416 | |||
417 | BUG_ON(!(nd->flags & LOOKUP_RCU)); | ||
418 | if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) { | ||
419 | want_root = 1; | ||
420 | spin_lock(&fs->lock); | ||
421 | if (nd->root.mnt != fs->root.mnt || | ||
422 | nd->root.dentry != fs->root.dentry) | ||
423 | goto err_root; | ||
424 | } | ||
425 | spin_lock(&dentry->d_lock); | ||
426 | if (!__d_rcu_to_refcount(dentry, nd->seq)) | ||
427 | goto err; | ||
428 | BUG_ON(nd->inode != dentry->d_inode); | ||
429 | spin_unlock(&dentry->d_lock); | ||
430 | if (want_root) { | ||
431 | path_get(&nd->root); | ||
432 | spin_unlock(&fs->lock); | ||
433 | } | ||
434 | mntget(nd->path.mnt); | ||
435 | |||
436 | rcu_read_unlock(); | ||
437 | br_read_unlock(vfsmount_lock); | ||
438 | nd->flags &= ~LOOKUP_RCU; | ||
439 | return 0; | ||
440 | err: | ||
441 | spin_unlock(&dentry->d_lock); | ||
442 | err_root: | ||
443 | if (want_root) | ||
444 | spin_unlock(&fs->lock); | ||
445 | return -ECHILD; | ||
446 | } | ||
447 | |||
448 | /* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing. */ | ||
449 | static inline int nameidata_drop_rcu_maybe(struct nameidata *nd) | ||
450 | { | ||
451 | if (nd->flags & LOOKUP_RCU) | ||
452 | return nameidata_drop_rcu(nd); | ||
453 | return 0; | ||
454 | } | ||
455 | 404 | ||
456 | /** | 405 | /** |
457 | * nameidata_dentry_drop_rcu - drop nameidata and dentry out of rcu-walk | 406 | * unlazy_walk - try to switch to ref-walk mode. |
458 | * @nd: nameidata pathwalk data to drop | 407 | * @nd: nameidata pathwalk data |
459 | * @dentry: dentry to drop | 408 | * @dentry: child of nd->path.dentry or NULL |
460 | * Returns: 0 on success, -ECHILD on failure | 409 | * Returns: 0 on success, -ECHILD on failure |
461 | * | 410 | * |
462 | * nameidata_dentry_drop_rcu attempts to drop the current nd->path and nd->root, | 411 | * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry |
463 | * and dentry into ref-walk. @dentry must be a path found by a do_lookup call on | 412 | * for ref-walk mode. @dentry must be a path found by a do_lookup call on |
464 | * @nd. Must be called from rcu-walk context. | 413 | * @nd or NULL. Must be called from rcu-walk context. |
465 | */ | 414 | */ |
466 | static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry) | 415 | static int unlazy_walk(struct nameidata *nd, struct dentry *dentry) |
467 | { | 416 | { |
468 | struct fs_struct *fs = current->fs; | 417 | struct fs_struct *fs = current->fs; |
469 | struct dentry *parent = nd->path.dentry; | 418 | struct dentry *parent = nd->path.dentry; |
@@ -478,18 +427,25 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry | |||
478 | goto err_root; | 427 | goto err_root; |
479 | } | 428 | } |
480 | spin_lock(&parent->d_lock); | 429 | spin_lock(&parent->d_lock); |
481 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); | 430 | if (!dentry) { |
482 | if (!__d_rcu_to_refcount(dentry, nd->seq)) | 431 | if (!__d_rcu_to_refcount(parent, nd->seq)) |
483 | goto err; | 432 | goto err_parent; |
484 | /* | 433 | BUG_ON(nd->inode != parent->d_inode); |
485 | * If the sequence check on the child dentry passed, then the child has | 434 | } else { |
486 | * not been removed from its parent. This means the parent dentry must | 435 | spin_lock_nested(&dentry->d_lock, DENTRY_D_LOCK_NESTED); |
487 | * be valid and able to take a reference at this point. | 436 | if (!__d_rcu_to_refcount(dentry, nd->seq)) |
488 | */ | 437 | goto err_child; |
489 | BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); | 438 | /* |
490 | BUG_ON(!parent->d_count); | 439 | * If the sequence check on the child dentry passed, then |
491 | parent->d_count++; | 440 | * the child has not been removed from its parent. This |
492 | spin_unlock(&dentry->d_lock); | 441 | * means the parent dentry must be valid and able to take |
442 | * a reference at this point. | ||
443 | */ | ||
444 | BUG_ON(!IS_ROOT(dentry) && dentry->d_parent != parent); | ||
445 | BUG_ON(!parent->d_count); | ||
446 | parent->d_count++; | ||
447 | spin_unlock(&dentry->d_lock); | ||
448 | } | ||
493 | spin_unlock(&parent->d_lock); | 449 | spin_unlock(&parent->d_lock); |
494 | if (want_root) { | 450 | if (want_root) { |
495 | path_get(&nd->root); | 451 | path_get(&nd->root); |
@@ -501,8 +457,10 @@ static int nameidata_dentry_drop_rcu(struct nameidata *nd, struct dentry *dentry | |||
501 | br_read_unlock(vfsmount_lock); | 457 | br_read_unlock(vfsmount_lock); |
502 | nd->flags &= ~LOOKUP_RCU; | 458 | nd->flags &= ~LOOKUP_RCU; |
503 | return 0; | 459 | return 0; |
504 | err: | 460 | |
461 | err_child: | ||
505 | spin_unlock(&dentry->d_lock); | 462 | spin_unlock(&dentry->d_lock); |
463 | err_parent: | ||
506 | spin_unlock(&parent->d_lock); | 464 | spin_unlock(&parent->d_lock); |
507 | err_root: | 465 | err_root: |
508 | if (want_root) | 466 | if (want_root) |
@@ -510,59 +468,6 @@ err_root: | |||
510 | return -ECHILD; | 468 | return -ECHILD; |
511 | } | 469 | } |
512 | 470 | ||
513 | /* Try to drop out of rcu-walk mode if we were in it, otherwise do nothing. */ | ||
514 | static inline int nameidata_dentry_drop_rcu_maybe(struct nameidata *nd, struct dentry *dentry) | ||
515 | { | ||
516 | if (nd->flags & LOOKUP_RCU) { | ||
517 | if (unlikely(nameidata_dentry_drop_rcu(nd, dentry))) { | ||
518 | nd->flags &= ~LOOKUP_RCU; | ||
519 | if (!(nd->flags & LOOKUP_ROOT)) | ||
520 | nd->root.mnt = NULL; | ||
521 | rcu_read_unlock(); | ||
522 | br_read_unlock(vfsmount_lock); | ||
523 | return -ECHILD; | ||
524 | } | ||
525 | } | ||
526 | return 0; | ||
527 | } | ||
528 | |||
529 | /** | ||
530 | * nameidata_drop_rcu_last - drop nameidata ending path walk out of rcu-walk | ||
531 | * @nd: nameidata pathwalk data to drop | ||
532 | * Returns: 0 on success, -ECHILD on failure | ||
533 | * | ||
534 | * nameidata_drop_rcu_last attempts to drop the current nd->path into ref-walk. | ||
535 | * nd->path should be the final element of the lookup, so nd->root is discarded. | ||
536 | * Must be called from rcu-walk context. | ||
537 | */ | ||
538 | static int nameidata_drop_rcu_last(struct nameidata *nd) | ||
539 | { | ||
540 | struct dentry *dentry = nd->path.dentry; | ||
541 | |||
542 | BUG_ON(!(nd->flags & LOOKUP_RCU)); | ||
543 | nd->flags &= ~LOOKUP_RCU; | ||
544 | if (!(nd->flags & LOOKUP_ROOT)) | ||
545 | nd->root.mnt = NULL; | ||
546 | spin_lock(&dentry->d_lock); | ||
547 | if (!__d_rcu_to_refcount(dentry, nd->seq)) | ||
548 | goto err_unlock; | ||
549 | BUG_ON(nd->inode != dentry->d_inode); | ||
550 | spin_unlock(&dentry->d_lock); | ||
551 | |||
552 | mntget(nd->path.mnt); | ||
553 | |||
554 | rcu_read_unlock(); | ||
555 | br_read_unlock(vfsmount_lock); | ||
556 | |||
557 | return 0; | ||
558 | |||
559 | err_unlock: | ||
560 | spin_unlock(&dentry->d_lock); | ||
561 | rcu_read_unlock(); | ||
562 | br_read_unlock(vfsmount_lock); | ||
563 | return -ECHILD; | ||
564 | } | ||
565 | |||
566 | /** | 471 | /** |
567 | * release_open_intent - free up open intent resources | 472 | * release_open_intent - free up open intent resources |
568 | * @nd: pointer to nameidata | 473 | * @nd: pointer to nameidata |
@@ -606,26 +511,39 @@ do_revalidate(struct dentry *dentry, struct nameidata *nd) | |||
606 | return dentry; | 511 | return dentry; |
607 | } | 512 | } |
608 | 513 | ||
609 | /* | 514 | /** |
610 | * handle_reval_path - force revalidation of a dentry | 515 | * complete_walk - successful completion of path walk |
611 | * | 516 | * @nd: pointer nameidata |
612 | * In some situations the path walking code will trust dentries without | ||
613 | * revalidating them. This causes problems for filesystems that depend on | ||
614 | * d_revalidate to handle file opens (e.g. NFSv4). When FS_REVAL_DOT is set | ||
615 | * (which indicates that it's possible for the dentry to go stale), force | ||
616 | * a d_revalidate call before proceeding. | ||
617 | * | 517 | * |
618 | * Returns 0 if the revalidation was successful. If the revalidation fails, | 518 | * If we had been in RCU mode, drop out of it and legitimize nd->path. |
619 | * either return the error returned by d_revalidate or -ESTALE if the | 519 | * Revalidate the final result, unless we'd already done that during |
620 | * revalidation it just returned 0. If d_revalidate returns 0, we attempt to | 520 | * the path walk or the filesystem doesn't ask for it. Return 0 on |
621 | * invalidate the dentry. It's up to the caller to handle putting references | 521 | * success, -error on failure. In case of failure caller does not |
622 | * to the path if necessary. | 522 | * need to drop nd->path. |
623 | */ | 523 | */ |
624 | static inline int handle_reval_path(struct nameidata *nd) | 524 | static int complete_walk(struct nameidata *nd) |
625 | { | 525 | { |
626 | struct dentry *dentry = nd->path.dentry; | 526 | struct dentry *dentry = nd->path.dentry; |
627 | int status; | 527 | int status; |
628 | 528 | ||
529 | if (nd->flags & LOOKUP_RCU) { | ||
530 | nd->flags &= ~LOOKUP_RCU; | ||
531 | if (!(nd->flags & LOOKUP_ROOT)) | ||
532 | nd->root.mnt = NULL; | ||
533 | spin_lock(&dentry->d_lock); | ||
534 | if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) { | ||
535 | spin_unlock(&dentry->d_lock); | ||
536 | rcu_read_unlock(); | ||
537 | br_read_unlock(vfsmount_lock); | ||
538 | return -ECHILD; | ||
539 | } | ||
540 | BUG_ON(nd->inode != dentry->d_inode); | ||
541 | spin_unlock(&dentry->d_lock); | ||
542 | mntget(nd->path.mnt); | ||
543 | rcu_read_unlock(); | ||
544 | br_read_unlock(vfsmount_lock); | ||
545 | } | ||
546 | |||
629 | if (likely(!(nd->flags & LOOKUP_JUMPED))) | 547 | if (likely(!(nd->flags & LOOKUP_JUMPED))) |
630 | return 0; | 548 | return 0; |
631 | 549 | ||
@@ -643,6 +561,7 @@ static inline int handle_reval_path(struct nameidata *nd) | |||
643 | if (!status) | 561 | if (!status) |
644 | status = -ESTALE; | 562 | status = -ESTALE; |
645 | 563 | ||
564 | path_put(&nd->path); | ||
646 | return status; | 565 | return status; |
647 | } | 566 | } |
648 | 567 | ||
@@ -1241,13 +1160,8 @@ static int do_lookup(struct nameidata *nd, struct qstr *name, | |||
1241 | if (likely(__follow_mount_rcu(nd, path, inode, false))) | 1160 | if (likely(__follow_mount_rcu(nd, path, inode, false))) |
1242 | return 0; | 1161 | return 0; |
1243 | unlazy: | 1162 | unlazy: |
1244 | if (dentry) { | 1163 | if (unlazy_walk(nd, dentry)) |
1245 | if (nameidata_dentry_drop_rcu(nd, dentry)) | 1164 | return -ECHILD; |
1246 | return -ECHILD; | ||
1247 | } else { | ||
1248 | if (nameidata_drop_rcu(nd)) | ||
1249 | return -ECHILD; | ||
1250 | } | ||
1251 | } else { | 1165 | } else { |
1252 | dentry = __d_lookup(parent, name); | 1166 | dentry = __d_lookup(parent, name); |
1253 | } | 1167 | } |
@@ -1303,7 +1217,7 @@ static inline int may_lookup(struct nameidata *nd) | |||
1303 | int err = exec_permission(nd->inode, IPERM_FLAG_RCU); | 1217 | int err = exec_permission(nd->inode, IPERM_FLAG_RCU); |
1304 | if (err != -ECHILD) | 1218 | if (err != -ECHILD) |
1305 | return err; | 1219 | return err; |
1306 | if (nameidata_drop_rcu(nd)) | 1220 | if (unlazy_walk(nd, NULL)) |
1307 | return -ECHILD; | 1221 | return -ECHILD; |
1308 | } | 1222 | } |
1309 | return exec_permission(nd->inode, 0); | 1223 | return exec_permission(nd->inode, 0); |
@@ -1357,8 +1271,12 @@ static inline int walk_component(struct nameidata *nd, struct path *path, | |||
1357 | return -ENOENT; | 1271 | return -ENOENT; |
1358 | } | 1272 | } |
1359 | if (unlikely(inode->i_op->follow_link) && follow) { | 1273 | if (unlikely(inode->i_op->follow_link) && follow) { |
1360 | if (nameidata_dentry_drop_rcu_maybe(nd, path->dentry)) | 1274 | if (nd->flags & LOOKUP_RCU) { |
1361 | return -ECHILD; | 1275 | if (unlikely(unlazy_walk(nd, path->dentry))) { |
1276 | terminate_walk(nd); | ||
1277 | return -ECHILD; | ||
1278 | } | ||
1279 | } | ||
1362 | BUG_ON(inode != path->dentry->d_inode); | 1280 | BUG_ON(inode != path->dentry->d_inode); |
1363 | return 1; | 1281 | return 1; |
1364 | } | 1282 | } |
@@ -1657,18 +1575,8 @@ static int path_lookupat(int dfd, const char *name, | |||
1657 | } | 1575 | } |
1658 | } | 1576 | } |
1659 | 1577 | ||
1660 | if (nd->flags & LOOKUP_RCU) { | 1578 | if (!err) |
1661 | /* went all way through without dropping RCU */ | 1579 | err = complete_walk(nd); |
1662 | BUG_ON(err); | ||
1663 | if (nameidata_drop_rcu_last(nd)) | ||
1664 | err = -ECHILD; | ||
1665 | } | ||
1666 | |||
1667 | if (!err) { | ||
1668 | err = handle_reval_path(nd); | ||
1669 | if (err) | ||
1670 | path_put(&nd->path); | ||
1671 | } | ||
1672 | 1580 | ||
1673 | if (!err && nd->flags & LOOKUP_DIRECTORY) { | 1581 | if (!err && nd->flags & LOOKUP_DIRECTORY) { |
1674 | if (!nd->inode->i_op->lookup) { | 1582 | if (!nd->inode->i_op->lookup) { |
@@ -2134,13 +2042,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path, | |||
2134 | return ERR_PTR(error); | 2042 | return ERR_PTR(error); |
2135 | /* fallthrough */ | 2043 | /* fallthrough */ |
2136 | case LAST_ROOT: | 2044 | case LAST_ROOT: |
2137 | if (nd->flags & LOOKUP_RCU) { | 2045 | error = complete_walk(nd); |
2138 | if (nameidata_drop_rcu_last(nd)) | ||
2139 | return ERR_PTR(-ECHILD); | ||
2140 | } | ||
2141 | error = handle_reval_path(nd); | ||
2142 | if (error) | 2046 | if (error) |
2143 | goto exit; | 2047 | return ERR_PTR(error); |
2144 | audit_inode(pathname, nd->path.dentry); | 2048 | audit_inode(pathname, nd->path.dentry); |
2145 | if (open_flag & O_CREAT) { | 2049 | if (open_flag & O_CREAT) { |
2146 | error = -EISDIR; | 2050 | error = -EISDIR; |
@@ -2148,10 +2052,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path, | |||
2148 | } | 2052 | } |
2149 | goto ok; | 2053 | goto ok; |
2150 | case LAST_BIND: | 2054 | case LAST_BIND: |
2151 | /* can't be RCU mode here */ | 2055 | error = complete_walk(nd); |
2152 | error = handle_reval_path(nd); | ||
2153 | if (error) | 2056 | if (error) |
2154 | goto exit; | 2057 | return ERR_PTR(error); |
2155 | audit_inode(pathname, dir); | 2058 | audit_inode(pathname, dir); |
2156 | goto ok; | 2059 | goto ok; |
2157 | } | 2060 | } |
@@ -2170,10 +2073,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path, | |||
2170 | if (error) /* symlink */ | 2073 | if (error) /* symlink */ |
2171 | return NULL; | 2074 | return NULL; |
2172 | /* sayonara */ | 2075 | /* sayonara */ |
2173 | if (nd->flags & LOOKUP_RCU) { | 2076 | error = complete_walk(nd); |
2174 | if (nameidata_drop_rcu_last(nd)) | 2077 | if (error) |
2175 | return ERR_PTR(-ECHILD); | 2078 | return ERR_PTR(-ECHILD); |
2176 | } | ||
2177 | 2079 | ||
2178 | error = -ENOTDIR; | 2080 | error = -ENOTDIR; |
2179 | if (nd->flags & LOOKUP_DIRECTORY) { | 2081 | if (nd->flags & LOOKUP_DIRECTORY) { |
@@ -2185,11 +2087,9 @@ static struct file *do_last(struct nameidata *nd, struct path *path, | |||
2185 | } | 2087 | } |
2186 | 2088 | ||
2187 | /* create side of things */ | 2089 | /* create side of things */ |
2188 | 2090 | error = complete_walk(nd); | |
2189 | if (nd->flags & LOOKUP_RCU) { | 2091 | if (error) |
2190 | if (nameidata_drop_rcu_last(nd)) | 2092 | return ERR_PTR(error); |
2191 | return ERR_PTR(-ECHILD); | ||
2192 | } | ||
2193 | 2093 | ||
2194 | audit_inode(pathname, dir); | 2094 | audit_inode(pathname, dir); |
2195 | error = -EISDIR; | 2095 | error = -EISDIR; |
@@ -2629,10 +2529,10 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, int, mode) | |||
2629 | } | 2529 | } |
2630 | 2530 | ||
2631 | /* | 2531 | /* |
2632 | * We try to drop the dentry early: we should have | 2532 | * The dentry_unhash() helper will try to drop the dentry early: we |
2633 | * a usage count of 2 if we're the only user of this | 2533 | * should have a usage count of 2 if we're the only user of this |
2634 | * dentry, and if that is true (possibly after pruning | 2534 | * dentry, and if that is true (possibly after pruning the dcache), |
2635 | * the dcache), then we drop the dentry now. | 2535 | * then we drop the dentry now. |
2636 | * | 2536 | * |
2637 | * A low-level filesystem can, if it choses, legally | 2537 | * A low-level filesystem can, if it choses, legally |
2638 | * do a | 2538 | * do a |
@@ -2645,10 +2545,9 @@ SYSCALL_DEFINE2(mkdir, const char __user *, pathname, int, mode) | |||
2645 | */ | 2545 | */ |
2646 | void dentry_unhash(struct dentry *dentry) | 2546 | void dentry_unhash(struct dentry *dentry) |
2647 | { | 2547 | { |
2648 | dget(dentry); | ||
2649 | shrink_dcache_parent(dentry); | 2548 | shrink_dcache_parent(dentry); |
2650 | spin_lock(&dentry->d_lock); | 2549 | spin_lock(&dentry->d_lock); |
2651 | if (dentry->d_count == 2) | 2550 | if (dentry->d_count == 1) |
2652 | __d_drop(dentry); | 2551 | __d_drop(dentry); |
2653 | spin_unlock(&dentry->d_lock); | 2552 | spin_unlock(&dentry->d_lock); |
2654 | } | 2553 | } |
@@ -2664,25 +2563,26 @@ int vfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
2664 | return -EPERM; | 2563 | return -EPERM; |
2665 | 2564 | ||
2666 | mutex_lock(&dentry->d_inode->i_mutex); | 2565 | mutex_lock(&dentry->d_inode->i_mutex); |
2667 | dentry_unhash(dentry); | 2566 | |
2567 | error = -EBUSY; | ||
2668 | if (d_mountpoint(dentry)) | 2568 | if (d_mountpoint(dentry)) |
2669 | error = -EBUSY; | 2569 | goto out; |
2670 | else { | 2570 | |
2671 | error = security_inode_rmdir(dir, dentry); | 2571 | error = security_inode_rmdir(dir, dentry); |
2672 | if (!error) { | 2572 | if (error) |
2673 | error = dir->i_op->rmdir(dir, dentry); | 2573 | goto out; |
2674 | if (!error) { | 2574 | |
2675 | dentry->d_inode->i_flags |= S_DEAD; | 2575 | error = dir->i_op->rmdir(dir, dentry); |
2676 | dont_mount(dentry); | 2576 | if (error) |
2677 | } | 2577 | goto out; |
2678 | } | 2578 | |
2679 | } | 2579 | dentry->d_inode->i_flags |= S_DEAD; |
2580 | dont_mount(dentry); | ||
2581 | |||
2582 | out: | ||
2680 | mutex_unlock(&dentry->d_inode->i_mutex); | 2583 | mutex_unlock(&dentry->d_inode->i_mutex); |
2681 | if (!error) { | 2584 | if (!error) |
2682 | d_delete(dentry); | 2585 | d_delete(dentry); |
2683 | } | ||
2684 | dput(dentry); | ||
2685 | |||
2686 | return error; | 2586 | return error; |
2687 | } | 2587 | } |
2688 | 2588 | ||
@@ -3053,12 +2953,7 @@ SYSCALL_DEFINE2(link, const char __user *, oldname, const char __user *, newname | |||
3053 | * HOWEVER, it relies on the assumption that any object with ->lookup() | 2953 | * HOWEVER, it relies on the assumption that any object with ->lookup() |
3054 | * has no more than 1 dentry. If "hybrid" objects will ever appear, | 2954 | * has no more than 1 dentry. If "hybrid" objects will ever appear, |
3055 | * we'd better make sure that there's no link(2) for them. | 2955 | * we'd better make sure that there's no link(2) for them. |
3056 | * d) some filesystems don't support opened-but-unlinked directories, | 2956 | * d) conversion from fhandle to dentry may come in the wrong moment - when |
3057 | * either because of layout or because they are not ready to deal with | ||
3058 | * all cases correctly. The latter will be fixed (taking this sort of | ||
3059 | * stuff into VFS), but the former is not going away. Solution: the same | ||
3060 | * trick as in rmdir(). | ||
3061 | * e) conversion from fhandle to dentry may come in the wrong moment - when | ||
3062 | * we are removing the target. Solution: we will have to grab ->i_mutex | 2957 | * we are removing the target. Solution: we will have to grab ->i_mutex |
3063 | * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on | 2958 | * in the fhandle_to_dentry code. [FIXME - current nfsfh.c relies on |
3064 | * ->i_mutex on parents, which works but leads to some truly excessive | 2959 | * ->i_mutex on parents, which works but leads to some truly excessive |
@@ -3068,7 +2963,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, | |||
3068 | struct inode *new_dir, struct dentry *new_dentry) | 2963 | struct inode *new_dir, struct dentry *new_dentry) |
3069 | { | 2964 | { |
3070 | int error = 0; | 2965 | int error = 0; |
3071 | struct inode *target; | 2966 | struct inode *target = new_dentry->d_inode; |
3072 | 2967 | ||
3073 | /* | 2968 | /* |
3074 | * If we are going to change the parent - check write permissions, | 2969 | * If we are going to change the parent - check write permissions, |
@@ -3084,26 +2979,24 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, | |||
3084 | if (error) | 2979 | if (error) |
3085 | return error; | 2980 | return error; |
3086 | 2981 | ||
3087 | target = new_dentry->d_inode; | ||
3088 | if (target) | 2982 | if (target) |
3089 | mutex_lock(&target->i_mutex); | 2983 | mutex_lock(&target->i_mutex); |
3090 | if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) | 2984 | |
3091 | error = -EBUSY; | 2985 | error = -EBUSY; |
3092 | else { | 2986 | if (d_mountpoint(old_dentry) || d_mountpoint(new_dentry)) |
3093 | if (target) | 2987 | goto out; |
3094 | dentry_unhash(new_dentry); | 2988 | |
3095 | error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); | 2989 | error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); |
3096 | } | 2990 | if (error) |
2991 | goto out; | ||
2992 | |||
3097 | if (target) { | 2993 | if (target) { |
3098 | if (!error) { | 2994 | target->i_flags |= S_DEAD; |
3099 | target->i_flags |= S_DEAD; | 2995 | dont_mount(new_dentry); |
3100 | dont_mount(new_dentry); | ||
3101 | } | ||
3102 | mutex_unlock(&target->i_mutex); | ||
3103 | if (d_unhashed(new_dentry)) | ||
3104 | d_rehash(new_dentry); | ||
3105 | dput(new_dentry); | ||
3106 | } | 2996 | } |
2997 | out: | ||
2998 | if (target) | ||
2999 | mutex_unlock(&target->i_mutex); | ||
3107 | if (!error) | 3000 | if (!error) |
3108 | if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) | 3001 | if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) |
3109 | d_move(old_dentry,new_dentry); | 3002 | d_move(old_dentry,new_dentry); |
@@ -3113,7 +3006,7 @@ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry, | |||
3113 | static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry, | 3006 | static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry, |
3114 | struct inode *new_dir, struct dentry *new_dentry) | 3007 | struct inode *new_dir, struct dentry *new_dentry) |
3115 | { | 3008 | { |
3116 | struct inode *target; | 3009 | struct inode *target = new_dentry->d_inode; |
3117 | int error; | 3010 | int error; |
3118 | 3011 | ||
3119 | error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); | 3012 | error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry); |
@@ -3121,19 +3014,22 @@ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry, | |||
3121 | return error; | 3014 | return error; |
3122 | 3015 | ||
3123 | dget(new_dentry); | 3016 | dget(new_dentry); |
3124 | target = new_dentry->d_inode; | ||
3125 | if (target) | 3017 | if (target) |
3126 | mutex_lock(&target->i_mutex); | 3018 | mutex_lock(&target->i_mutex); |
3019 | |||
3020 | error = -EBUSY; | ||
3127 | if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) | 3021 | if (d_mountpoint(old_dentry)||d_mountpoint(new_dentry)) |
3128 | error = -EBUSY; | 3022 | goto out; |
3129 | else | 3023 | |
3130 | error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); | 3024 | error = old_dir->i_op->rename(old_dir, old_dentry, new_dir, new_dentry); |
3131 | if (!error) { | 3025 | if (error) |
3132 | if (target) | 3026 | goto out; |
3133 | dont_mount(new_dentry); | 3027 | |
3134 | if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) | 3028 | if (target) |
3135 | d_move(old_dentry, new_dentry); | 3029 | dont_mount(new_dentry); |
3136 | } | 3030 | if (!(old_dir->i_sb->s_type->fs_flags & FS_RENAME_DOES_D_MOVE)) |
3031 | d_move(old_dentry, new_dentry); | ||
3032 | out: | ||
3137 | if (target) | 3033 | if (target) |
3138 | mutex_unlock(&target->i_mutex); | 3034 | mutex_unlock(&target->i_mutex); |
3139 | dput(new_dentry); | 3035 | dput(new_dentry); |
diff --git a/fs/namespace.c b/fs/namespace.c index d99bcf59e4c2..fe59bd145d21 100644 --- a/fs/namespace.c +++ b/fs/namespace.c | |||
@@ -1695,7 +1695,7 @@ static int graft_tree(struct vfsmount *mnt, struct path *path) | |||
1695 | 1695 | ||
1696 | static int flags_to_propagation_type(int flags) | 1696 | static int flags_to_propagation_type(int flags) |
1697 | { | 1697 | { |
1698 | int type = flags & ~MS_REC; | 1698 | int type = flags & ~(MS_REC | MS_SILENT); |
1699 | 1699 | ||
1700 | /* Fail if any non-propagation flags are set */ | 1700 | /* Fail if any non-propagation flags are set */ |
1701 | if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) | 1701 | if (type & ~(MS_SHARED | MS_PRIVATE | MS_SLAVE | MS_UNBINDABLE)) |
diff --git a/fs/ncpfs/dir.c b/fs/ncpfs/dir.c index f6946bb5cb55..e3e646b06404 100644 --- a/fs/ncpfs/dir.c +++ b/fs/ncpfs/dir.c | |||
@@ -1033,6 +1033,8 @@ static int ncp_rmdir(struct inode *dir, struct dentry *dentry) | |||
1033 | DPRINTK("ncp_rmdir: removing %s/%s\n", | 1033 | DPRINTK("ncp_rmdir: removing %s/%s\n", |
1034 | dentry->d_parent->d_name.name, dentry->d_name.name); | 1034 | dentry->d_parent->d_name.name, dentry->d_name.name); |
1035 | 1035 | ||
1036 | dentry_unhash(dentry); | ||
1037 | |||
1036 | error = -EBUSY; | 1038 | error = -EBUSY; |
1037 | if (!d_unhashed(dentry)) | 1039 | if (!d_unhashed(dentry)) |
1038 | goto out; | 1040 | goto out; |
@@ -1139,6 +1141,9 @@ static int ncp_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1139 | old_dentry->d_parent->d_name.name, old_dentry->d_name.name, | 1141 | old_dentry->d_parent->d_name.name, old_dentry->d_name.name, |
1140 | new_dentry->d_parent->d_name.name, new_dentry->d_name.name); | 1142 | new_dentry->d_parent->d_name.name, new_dentry->d_name.name); |
1141 | 1143 | ||
1144 | if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) | ||
1145 | dentry_unhash(new_dentry); | ||
1146 | |||
1142 | ncp_age_dentry(server, old_dentry); | 1147 | ncp_age_dentry(server, old_dentry); |
1143 | ncp_age_dentry(server, new_dentry); | 1148 | ncp_age_dentry(server, new_dentry); |
1144 | 1149 | ||
diff --git a/fs/ncpfs/mmap.c b/fs/ncpfs/mmap.c index a7c07b44b100..e5d71b27a5b0 100644 --- a/fs/ncpfs/mmap.c +++ b/fs/ncpfs/mmap.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/mman.h> | 16 | #include <linux/mman.h> |
17 | #include <linux/string.h> | 17 | #include <linux/string.h> |
18 | #include <linux/fcntl.h> | 18 | #include <linux/fcntl.h> |
19 | #include <linux/memcontrol.h> | ||
19 | 20 | ||
20 | #include <asm/uaccess.h> | 21 | #include <asm/uaccess.h> |
21 | #include <asm/system.h> | 22 | #include <asm/system.h> |
@@ -92,6 +93,7 @@ static int ncp_file_mmap_fault(struct vm_area_struct *area, | |||
92 | * -- wli | 93 | * -- wli |
93 | */ | 94 | */ |
94 | count_vm_event(PGMAJFAULT); | 95 | count_vm_event(PGMAJFAULT); |
96 | mem_cgroup_count_vm_event(area->vm_mm, PGMAJFAULT); | ||
95 | return VM_FAULT_MAJOR; | 97 | return VM_FAULT_MAJOR; |
96 | } | 98 | } |
97 | 99 | ||
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c index 546849b3e88f..1102a5fbb744 100644 --- a/fs/nilfs2/namei.c +++ b/fs/nilfs2/namei.c | |||
@@ -334,6 +334,8 @@ static int nilfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
334 | struct nilfs_transaction_info ti; | 334 | struct nilfs_transaction_info ti; |
335 | int err; | 335 | int err; |
336 | 336 | ||
337 | dentry_unhash(dentry); | ||
338 | |||
337 | err = nilfs_transaction_begin(dir->i_sb, &ti, 0); | 339 | err = nilfs_transaction_begin(dir->i_sb, &ti, 0); |
338 | if (err) | 340 | if (err) |
339 | return err; | 341 | return err; |
@@ -369,6 +371,9 @@ static int nilfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
369 | struct nilfs_transaction_info ti; | 371 | struct nilfs_transaction_info ti; |
370 | int err; | 372 | int err; |
371 | 373 | ||
374 | if (new_inode && S_ISDIR(new_inode->i_mode)) | ||
375 | dentry_unhash(new_dentry); | ||
376 | |||
372 | err = nilfs_transaction_begin(old_dir->i_sb, &ti, 1); | 377 | err = nilfs_transaction_begin(old_dir->i_sb, &ti, 1); |
373 | if (unlikely(err)) | 378 | if (unlikely(err)) |
374 | return err; | 379 | return err; |
diff --git a/fs/ocfs2/Makefile b/fs/ocfs2/Makefile index d8a0313e99e6..f17e58b32989 100644 --- a/fs/ocfs2/Makefile +++ b/fs/ocfs2/Makefile | |||
@@ -30,6 +30,7 @@ ocfs2-objs := \ | |||
30 | namei.o \ | 30 | namei.o \ |
31 | refcounttree.o \ | 31 | refcounttree.o \ |
32 | reservations.o \ | 32 | reservations.o \ |
33 | move_extents.o \ | ||
33 | resize.o \ | 34 | resize.o \ |
34 | slot_map.o \ | 35 | slot_map.o \ |
35 | suballoc.o \ | 36 | suballoc.o \ |
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c index 48aa9c7401c7..ed553c60de82 100644 --- a/fs/ocfs2/alloc.c +++ b/fs/ocfs2/alloc.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include <linux/highmem.h> | 29 | #include <linux/highmem.h> |
30 | #include <linux/swap.h> | 30 | #include <linux/swap.h> |
31 | #include <linux/quotaops.h> | 31 | #include <linux/quotaops.h> |
32 | #include <linux/blkdev.h> | ||
32 | 33 | ||
33 | #include <cluster/masklog.h> | 34 | #include <cluster/masklog.h> |
34 | 35 | ||
@@ -7184,3 +7185,168 @@ out_commit: | |||
7184 | out: | 7185 | out: |
7185 | return ret; | 7186 | return ret; |
7186 | } | 7187 | } |
7188 | |||
7189 | static int ocfs2_trim_extent(struct super_block *sb, | ||
7190 | struct ocfs2_group_desc *gd, | ||
7191 | u32 start, u32 count) | ||
7192 | { | ||
7193 | u64 discard, bcount; | ||
7194 | |||
7195 | bcount = ocfs2_clusters_to_blocks(sb, count); | ||
7196 | discard = le64_to_cpu(gd->bg_blkno) + | ||
7197 | ocfs2_clusters_to_blocks(sb, start); | ||
7198 | |||
7199 | trace_ocfs2_trim_extent(sb, (unsigned long long)discard, bcount); | ||
7200 | |||
7201 | return sb_issue_discard(sb, discard, bcount, GFP_NOFS, 0); | ||
7202 | } | ||
7203 | |||
7204 | static int ocfs2_trim_group(struct super_block *sb, | ||
7205 | struct ocfs2_group_desc *gd, | ||
7206 | u32 start, u32 max, u32 minbits) | ||
7207 | { | ||
7208 | int ret = 0, count = 0, next; | ||
7209 | void *bitmap = gd->bg_bitmap; | ||
7210 | |||
7211 | if (le16_to_cpu(gd->bg_free_bits_count) < minbits) | ||
7212 | return 0; | ||
7213 | |||
7214 | trace_ocfs2_trim_group((unsigned long long)le64_to_cpu(gd->bg_blkno), | ||
7215 | start, max, minbits); | ||
7216 | |||
7217 | while (start < max) { | ||
7218 | start = ocfs2_find_next_zero_bit(bitmap, max, start); | ||
7219 | if (start >= max) | ||
7220 | break; | ||
7221 | next = ocfs2_find_next_bit(bitmap, max, start); | ||
7222 | |||
7223 | if ((next - start) >= minbits) { | ||
7224 | ret = ocfs2_trim_extent(sb, gd, | ||
7225 | start, next - start); | ||
7226 | if (ret < 0) { | ||
7227 | mlog_errno(ret); | ||
7228 | break; | ||
7229 | } | ||
7230 | count += next - start; | ||
7231 | } | ||
7232 | start = next + 1; | ||
7233 | |||
7234 | if (fatal_signal_pending(current)) { | ||
7235 | count = -ERESTARTSYS; | ||
7236 | break; | ||
7237 | } | ||
7238 | |||
7239 | if ((le16_to_cpu(gd->bg_free_bits_count) - count) < minbits) | ||
7240 | break; | ||
7241 | } | ||
7242 | |||
7243 | if (ret < 0) | ||
7244 | count = ret; | ||
7245 | |||
7246 | return count; | ||
7247 | } | ||
7248 | |||
7249 | int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range) | ||
7250 | { | ||
7251 | struct ocfs2_super *osb = OCFS2_SB(sb); | ||
7252 | u64 start, len, trimmed, first_group, last_group, group; | ||
7253 | int ret, cnt; | ||
7254 | u32 first_bit, last_bit, minlen; | ||
7255 | struct buffer_head *main_bm_bh = NULL; | ||
7256 | struct inode *main_bm_inode = NULL; | ||
7257 | struct buffer_head *gd_bh = NULL; | ||
7258 | struct ocfs2_dinode *main_bm; | ||
7259 | struct ocfs2_group_desc *gd = NULL; | ||
7260 | |||
7261 | start = range->start >> osb->s_clustersize_bits; | ||
7262 | len = range->len >> osb->s_clustersize_bits; | ||
7263 | minlen = range->minlen >> osb->s_clustersize_bits; | ||
7264 | trimmed = 0; | ||
7265 | |||
7266 | if (!len) { | ||
7267 | range->len = 0; | ||
7268 | return 0; | ||
7269 | } | ||
7270 | |||
7271 | if (minlen >= osb->bitmap_cpg) | ||
7272 | return -EINVAL; | ||
7273 | |||
7274 | main_bm_inode = ocfs2_get_system_file_inode(osb, | ||
7275 | GLOBAL_BITMAP_SYSTEM_INODE, | ||
7276 | OCFS2_INVALID_SLOT); | ||
7277 | if (!main_bm_inode) { | ||
7278 | ret = -EIO; | ||
7279 | mlog_errno(ret); | ||
7280 | goto out; | ||
7281 | } | ||
7282 | |||
7283 | mutex_lock(&main_bm_inode->i_mutex); | ||
7284 | |||
7285 | ret = ocfs2_inode_lock(main_bm_inode, &main_bm_bh, 0); | ||
7286 | if (ret < 0) { | ||
7287 | mlog_errno(ret); | ||
7288 | goto out_mutex; | ||
7289 | } | ||
7290 | main_bm = (struct ocfs2_dinode *)main_bm_bh->b_data; | ||
7291 | |||
7292 | if (start >= le32_to_cpu(main_bm->i_clusters)) { | ||
7293 | ret = -EINVAL; | ||
7294 | goto out_unlock; | ||
7295 | } | ||
7296 | |||
7297 | if (start + len > le32_to_cpu(main_bm->i_clusters)) | ||
7298 | len = le32_to_cpu(main_bm->i_clusters) - start; | ||
7299 | |||
7300 | trace_ocfs2_trim_fs(start, len, minlen); | ||
7301 | |||
7302 | /* Determine first and last group to examine based on start and len */ | ||
7303 | first_group = ocfs2_which_cluster_group(main_bm_inode, start); | ||
7304 | if (first_group == osb->first_cluster_group_blkno) | ||
7305 | first_bit = start; | ||
7306 | else | ||
7307 | first_bit = start - ocfs2_blocks_to_clusters(sb, first_group); | ||
7308 | last_group = ocfs2_which_cluster_group(main_bm_inode, start + len - 1); | ||
7309 | last_bit = osb->bitmap_cpg; | ||
7310 | |||
7311 | for (group = first_group; group <= last_group;) { | ||
7312 | if (first_bit + len >= osb->bitmap_cpg) | ||
7313 | last_bit = osb->bitmap_cpg; | ||
7314 | else | ||
7315 | last_bit = first_bit + len; | ||
7316 | |||
7317 | ret = ocfs2_read_group_descriptor(main_bm_inode, | ||
7318 | main_bm, group, | ||
7319 | &gd_bh); | ||
7320 | if (ret < 0) { | ||
7321 | mlog_errno(ret); | ||
7322 | break; | ||
7323 | } | ||
7324 | |||
7325 | gd = (struct ocfs2_group_desc *)gd_bh->b_data; | ||
7326 | cnt = ocfs2_trim_group(sb, gd, first_bit, last_bit, minlen); | ||
7327 | brelse(gd_bh); | ||
7328 | gd_bh = NULL; | ||
7329 | if (cnt < 0) { | ||
7330 | ret = cnt; | ||
7331 | mlog_errno(ret); | ||
7332 | break; | ||
7333 | } | ||
7334 | |||
7335 | trimmed += cnt; | ||
7336 | len -= osb->bitmap_cpg - first_bit; | ||
7337 | first_bit = 0; | ||
7338 | if (group == osb->first_cluster_group_blkno) | ||
7339 | group = ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg); | ||
7340 | else | ||
7341 | group += ocfs2_clusters_to_blocks(sb, osb->bitmap_cpg); | ||
7342 | } | ||
7343 | range->len = trimmed * sb->s_blocksize; | ||
7344 | out_unlock: | ||
7345 | ocfs2_inode_unlock(main_bm_inode, 0); | ||
7346 | brelse(main_bm_bh); | ||
7347 | out_mutex: | ||
7348 | mutex_unlock(&main_bm_inode->i_mutex); | ||
7349 | iput(main_bm_inode); | ||
7350 | out: | ||
7351 | return ret; | ||
7352 | } | ||
diff --git a/fs/ocfs2/alloc.h b/fs/ocfs2/alloc.h index 3bd08a03251c..ca381c584127 100644 --- a/fs/ocfs2/alloc.h +++ b/fs/ocfs2/alloc.h | |||
@@ -239,6 +239,7 @@ int ocfs2_find_leaf(struct ocfs2_caching_info *ci, | |||
239 | struct buffer_head **leaf_bh); | 239 | struct buffer_head **leaf_bh); |
240 | int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster); | 240 | int ocfs2_search_extent_list(struct ocfs2_extent_list *el, u32 v_cluster); |
241 | 241 | ||
242 | int ocfs2_trim_fs(struct super_block *sb, struct fstrim_range *range); | ||
242 | /* | 243 | /* |
243 | * Helper function to look at the # of clusters in an extent record. | 244 | * Helper function to look at the # of clusters in an extent record. |
244 | */ | 245 | */ |
diff --git a/fs/ocfs2/cluster/sys.c b/fs/ocfs2/cluster/sys.c index bc702dab5d1f..a4b07730b2e1 100644 --- a/fs/ocfs2/cluster/sys.c +++ b/fs/ocfs2/cluster/sys.c | |||
@@ -57,7 +57,6 @@ static struct kset *o2cb_kset; | |||
57 | void o2cb_sys_shutdown(void) | 57 | void o2cb_sys_shutdown(void) |
58 | { | 58 | { |
59 | mlog_sys_shutdown(); | 59 | mlog_sys_shutdown(); |
60 | sysfs_remove_link(NULL, "o2cb"); | ||
61 | kset_unregister(o2cb_kset); | 60 | kset_unregister(o2cb_kset); |
62 | } | 61 | } |
63 | 62 | ||
@@ -69,14 +68,6 @@ int o2cb_sys_init(void) | |||
69 | if (!o2cb_kset) | 68 | if (!o2cb_kset) |
70 | return -ENOMEM; | 69 | return -ENOMEM; |
71 | 70 | ||
72 | /* | ||
73 | * Create this symlink for backwards compatibility with old | ||
74 | * versions of ocfs2-tools which look for things in /sys/o2cb. | ||
75 | */ | ||
76 | ret = sysfs_create_link(NULL, &o2cb_kset->kobj, "o2cb"); | ||
77 | if (ret) | ||
78 | goto error; | ||
79 | |||
80 | ret = sysfs_create_group(&o2cb_kset->kobj, &o2cb_attr_group); | 71 | ret = sysfs_create_group(&o2cb_kset->kobj, &o2cb_attr_group); |
81 | if (ret) | 72 | if (ret) |
82 | goto error; | 73 | goto error; |
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h index 4bdf7baee344..d602abb51b61 100644 --- a/fs/ocfs2/dlm/dlmcommon.h +++ b/fs/ocfs2/dlm/dlmcommon.h | |||
@@ -144,6 +144,7 @@ struct dlm_ctxt | |||
144 | wait_queue_head_t dlm_join_events; | 144 | wait_queue_head_t dlm_join_events; |
145 | unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 145 | unsigned long live_nodes_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
146 | unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 146 | unsigned long domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
147 | unsigned long exit_domain_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | ||
147 | unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; | 148 | unsigned long recovery_map[BITS_TO_LONGS(O2NM_MAX_NODES)]; |
148 | struct dlm_recovery_ctxt reco; | 149 | struct dlm_recovery_ctxt reco; |
149 | spinlock_t master_lock; | 150 | spinlock_t master_lock; |
@@ -401,6 +402,18 @@ static inline int dlm_lvb_is_empty(char *lvb) | |||
401 | return 1; | 402 | return 1; |
402 | } | 403 | } |
403 | 404 | ||
405 | static inline char *dlm_list_in_text(enum dlm_lockres_list idx) | ||
406 | { | ||
407 | if (idx == DLM_GRANTED_LIST) | ||
408 | return "granted"; | ||
409 | else if (idx == DLM_CONVERTING_LIST) | ||
410 | return "converting"; | ||
411 | else if (idx == DLM_BLOCKED_LIST) | ||
412 | return "blocked"; | ||
413 | else | ||
414 | return "unknown"; | ||
415 | } | ||
416 | |||
404 | static inline struct list_head * | 417 | static inline struct list_head * |
405 | dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) | 418 | dlm_list_idx_to_ptr(struct dlm_lock_resource *res, enum dlm_lockres_list idx) |
406 | { | 419 | { |
@@ -448,6 +461,7 @@ enum { | |||
448 | DLM_FINALIZE_RECO_MSG = 518, | 461 | DLM_FINALIZE_RECO_MSG = 518, |
449 | DLM_QUERY_REGION = 519, | 462 | DLM_QUERY_REGION = 519, |
450 | DLM_QUERY_NODEINFO = 520, | 463 | DLM_QUERY_NODEINFO = 520, |
464 | DLM_BEGIN_EXIT_DOMAIN_MSG = 521, | ||
451 | }; | 465 | }; |
452 | 466 | ||
453 | struct dlm_reco_node_data | 467 | struct dlm_reco_node_data |
diff --git a/fs/ocfs2/dlm/dlmdebug.c b/fs/ocfs2/dlm/dlmdebug.c index 04a32be0aeb9..56f82cb912e3 100644 --- a/fs/ocfs2/dlm/dlmdebug.c +++ b/fs/ocfs2/dlm/dlmdebug.c | |||
@@ -756,6 +756,12 @@ static int debug_state_print(struct dlm_ctxt *dlm, char *buf, int len) | |||
756 | buf + out, len - out); | 756 | buf + out, len - out); |
757 | out += snprintf(buf + out, len - out, "\n"); | 757 | out += snprintf(buf + out, len - out, "\n"); |
758 | 758 | ||
759 | /* Exit Domain Map: xx xx xx */ | ||
760 | out += snprintf(buf + out, len - out, "Exit Domain Map: "); | ||
761 | out += stringify_nodemap(dlm->exit_domain_map, O2NM_MAX_NODES, | ||
762 | buf + out, len - out); | ||
763 | out += snprintf(buf + out, len - out, "\n"); | ||
764 | |||
759 | /* Live Map: xx xx xx */ | 765 | /* Live Map: xx xx xx */ |
760 | out += snprintf(buf + out, len - out, "Live Map: "); | 766 | out += snprintf(buf + out, len - out, "Live Map: "); |
761 | out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES, | 767 | out += stringify_nodemap(dlm->live_nodes_map, O2NM_MAX_NODES, |
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c index 3b179d6cbde0..6ed6b95dcf93 100644 --- a/fs/ocfs2/dlm/dlmdomain.c +++ b/fs/ocfs2/dlm/dlmdomain.c | |||
@@ -132,10 +132,12 @@ static DECLARE_WAIT_QUEUE_HEAD(dlm_domain_events); | |||
132 | * New in version 1.1: | 132 | * New in version 1.1: |
133 | * - Message DLM_QUERY_REGION added to support global heartbeat | 133 | * - Message DLM_QUERY_REGION added to support global heartbeat |
134 | * - Message DLM_QUERY_NODEINFO added to allow online node removes | 134 | * - Message DLM_QUERY_NODEINFO added to allow online node removes |
135 | * New in version 1.2: | ||
136 | * - Message DLM_BEGIN_EXIT_DOMAIN_MSG added to mark start of exit domain | ||
135 | */ | 137 | */ |
136 | static const struct dlm_protocol_version dlm_protocol = { | 138 | static const struct dlm_protocol_version dlm_protocol = { |
137 | .pv_major = 1, | 139 | .pv_major = 1, |
138 | .pv_minor = 1, | 140 | .pv_minor = 2, |
139 | }; | 141 | }; |
140 | 142 | ||
141 | #define DLM_DOMAIN_BACKOFF_MS 200 | 143 | #define DLM_DOMAIN_BACKOFF_MS 200 |
@@ -449,14 +451,18 @@ redo_bucket: | |||
449 | dropped = dlm_empty_lockres(dlm, res); | 451 | dropped = dlm_empty_lockres(dlm, res); |
450 | 452 | ||
451 | spin_lock(&res->spinlock); | 453 | spin_lock(&res->spinlock); |
452 | __dlm_lockres_calc_usage(dlm, res); | 454 | if (dropped) |
453 | iter = res->hash_node.next; | 455 | __dlm_lockres_calc_usage(dlm, res); |
456 | else | ||
457 | iter = res->hash_node.next; | ||
454 | spin_unlock(&res->spinlock); | 458 | spin_unlock(&res->spinlock); |
455 | 459 | ||
456 | dlm_lockres_put(res); | 460 | dlm_lockres_put(res); |
457 | 461 | ||
458 | if (dropped) | 462 | if (dropped) { |
463 | cond_resched_lock(&dlm->spinlock); | ||
459 | goto redo_bucket; | 464 | goto redo_bucket; |
465 | } | ||
460 | } | 466 | } |
461 | cond_resched_lock(&dlm->spinlock); | 467 | cond_resched_lock(&dlm->spinlock); |
462 | num += n; | 468 | num += n; |
@@ -486,6 +492,28 @@ static int dlm_no_joining_node(struct dlm_ctxt *dlm) | |||
486 | return ret; | 492 | return ret; |
487 | } | 493 | } |
488 | 494 | ||
495 | static int dlm_begin_exit_domain_handler(struct o2net_msg *msg, u32 len, | ||
496 | void *data, void **ret_data) | ||
497 | { | ||
498 | struct dlm_ctxt *dlm = data; | ||
499 | unsigned int node; | ||
500 | struct dlm_exit_domain *exit_msg = (struct dlm_exit_domain *) msg->buf; | ||
501 | |||
502 | if (!dlm_grab(dlm)) | ||
503 | return 0; | ||
504 | |||
505 | node = exit_msg->node_idx; | ||
506 | mlog(0, "%s: Node %u sent a begin exit domain message\n", dlm->name, node); | ||
507 | |||
508 | spin_lock(&dlm->spinlock); | ||
509 | set_bit(node, dlm->exit_domain_map); | ||
510 | spin_unlock(&dlm->spinlock); | ||
511 | |||
512 | dlm_put(dlm); | ||
513 | |||
514 | return 0; | ||
515 | } | ||
516 | |||
489 | static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm) | 517 | static void dlm_mark_domain_leaving(struct dlm_ctxt *dlm) |
490 | { | 518 | { |
491 | /* Yikes, a double spinlock! I need domain_lock for the dlm | 519 | /* Yikes, a double spinlock! I need domain_lock for the dlm |
@@ -542,6 +570,7 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, | |||
542 | 570 | ||
543 | spin_lock(&dlm->spinlock); | 571 | spin_lock(&dlm->spinlock); |
544 | clear_bit(node, dlm->domain_map); | 572 | clear_bit(node, dlm->domain_map); |
573 | clear_bit(node, dlm->exit_domain_map); | ||
545 | __dlm_print_nodes(dlm); | 574 | __dlm_print_nodes(dlm); |
546 | 575 | ||
547 | /* notify anything attached to the heartbeat events */ | 576 | /* notify anything attached to the heartbeat events */ |
@@ -554,29 +583,56 @@ static int dlm_exit_domain_handler(struct o2net_msg *msg, u32 len, void *data, | |||
554 | return 0; | 583 | return 0; |
555 | } | 584 | } |
556 | 585 | ||
557 | static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, | 586 | static int dlm_send_one_domain_exit(struct dlm_ctxt *dlm, u32 msg_type, |
558 | unsigned int node) | 587 | unsigned int node) |
559 | { | 588 | { |
560 | int status; | 589 | int status; |
561 | struct dlm_exit_domain leave_msg; | 590 | struct dlm_exit_domain leave_msg; |
562 | 591 | ||
563 | mlog(0, "Asking node %u if we can leave the domain %s me = %u\n", | 592 | mlog(0, "%s: Sending domain exit message %u to node %u\n", dlm->name, |
564 | node, dlm->name, dlm->node_num); | 593 | msg_type, node); |
565 | 594 | ||
566 | memset(&leave_msg, 0, sizeof(leave_msg)); | 595 | memset(&leave_msg, 0, sizeof(leave_msg)); |
567 | leave_msg.node_idx = dlm->node_num; | 596 | leave_msg.node_idx = dlm->node_num; |
568 | 597 | ||
569 | status = o2net_send_message(DLM_EXIT_DOMAIN_MSG, dlm->key, | 598 | status = o2net_send_message(msg_type, dlm->key, &leave_msg, |
570 | &leave_msg, sizeof(leave_msg), node, | 599 | sizeof(leave_msg), node, NULL); |
571 | NULL); | ||
572 | if (status < 0) | 600 | if (status < 0) |
573 | mlog(ML_ERROR, "Error %d when sending message %u (key 0x%x) to " | 601 | mlog(ML_ERROR, "Error %d sending domain exit message %u " |
574 | "node %u\n", status, DLM_EXIT_DOMAIN_MSG, dlm->key, node); | 602 | "to node %u on domain %s\n", status, msg_type, node, |
575 | mlog(0, "status return %d from o2net_send_message\n", status); | 603 | dlm->name); |
576 | 604 | ||
577 | return status; | 605 | return status; |
578 | } | 606 | } |
579 | 607 | ||
608 | static void dlm_begin_exit_domain(struct dlm_ctxt *dlm) | ||
609 | { | ||
610 | int node = -1; | ||
611 | |||
612 | /* Support for begin exit domain was added in 1.2 */ | ||
613 | if (dlm->dlm_locking_proto.pv_major == 1 && | ||
614 | dlm->dlm_locking_proto.pv_minor < 2) | ||
615 | return; | ||
616 | |||
617 | /* | ||
618 | * Unlike DLM_EXIT_DOMAIN_MSG, DLM_BEGIN_EXIT_DOMAIN_MSG is purely | ||
619 | * informational. Meaning if a node does not receive the message, | ||
620 | * so be it. | ||
621 | */ | ||
622 | spin_lock(&dlm->spinlock); | ||
623 | while (1) { | ||
624 | node = find_next_bit(dlm->domain_map, O2NM_MAX_NODES, node + 1); | ||
625 | if (node >= O2NM_MAX_NODES) | ||
626 | break; | ||
627 | if (node == dlm->node_num) | ||
628 | continue; | ||
629 | |||
630 | spin_unlock(&dlm->spinlock); | ||
631 | dlm_send_one_domain_exit(dlm, DLM_BEGIN_EXIT_DOMAIN_MSG, node); | ||
632 | spin_lock(&dlm->spinlock); | ||
633 | } | ||
634 | spin_unlock(&dlm->spinlock); | ||
635 | } | ||
580 | 636 | ||
581 | static void dlm_leave_domain(struct dlm_ctxt *dlm) | 637 | static void dlm_leave_domain(struct dlm_ctxt *dlm) |
582 | { | 638 | { |
@@ -602,7 +658,8 @@ static void dlm_leave_domain(struct dlm_ctxt *dlm) | |||
602 | 658 | ||
603 | clear_node = 1; | 659 | clear_node = 1; |
604 | 660 | ||
605 | status = dlm_send_one_domain_exit(dlm, node); | 661 | status = dlm_send_one_domain_exit(dlm, DLM_EXIT_DOMAIN_MSG, |
662 | node); | ||
606 | if (status < 0 && | 663 | if (status < 0 && |
607 | status != -ENOPROTOOPT && | 664 | status != -ENOPROTOOPT && |
608 | status != -ENOTCONN) { | 665 | status != -ENOTCONN) { |
@@ -677,6 +734,7 @@ void dlm_unregister_domain(struct dlm_ctxt *dlm) | |||
677 | 734 | ||
678 | if (leave) { | 735 | if (leave) { |
679 | mlog(0, "shutting down domain %s\n", dlm->name); | 736 | mlog(0, "shutting down domain %s\n", dlm->name); |
737 | dlm_begin_exit_domain(dlm); | ||
680 | 738 | ||
681 | /* We changed dlm state, notify the thread */ | 739 | /* We changed dlm state, notify the thread */ |
682 | dlm_kick_thread(dlm, NULL); | 740 | dlm_kick_thread(dlm, NULL); |
@@ -909,6 +967,7 @@ static int dlm_assert_joined_handler(struct o2net_msg *msg, u32 len, void *data, | |||
909 | * leftover join state. */ | 967 | * leftover join state. */ |
910 | BUG_ON(dlm->joining_node != assert->node_idx); | 968 | BUG_ON(dlm->joining_node != assert->node_idx); |
911 | set_bit(assert->node_idx, dlm->domain_map); | 969 | set_bit(assert->node_idx, dlm->domain_map); |
970 | clear_bit(assert->node_idx, dlm->exit_domain_map); | ||
912 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); | 971 | __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); |
913 | 972 | ||
914 | printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n", | 973 | printk(KERN_NOTICE "o2dlm: Node %u joins domain %s\n", |
@@ -1793,6 +1852,13 @@ static int dlm_register_domain_handlers(struct dlm_ctxt *dlm) | |||
1793 | if (status) | 1852 | if (status) |
1794 | goto bail; | 1853 | goto bail; |
1795 | 1854 | ||
1855 | status = o2net_register_handler(DLM_BEGIN_EXIT_DOMAIN_MSG, dlm->key, | ||
1856 | sizeof(struct dlm_exit_domain), | ||
1857 | dlm_begin_exit_domain_handler, | ||
1858 | dlm, NULL, &dlm->dlm_domain_handlers); | ||
1859 | if (status) | ||
1860 | goto bail; | ||
1861 | |||
1796 | bail: | 1862 | bail: |
1797 | if (status) | 1863 | if (status) |
1798 | dlm_unregister_domain_handlers(dlm); | 1864 | dlm_unregister_domain_handlers(dlm); |
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c index 84d166328cf7..11eefb8c12e9 100644 --- a/fs/ocfs2/dlm/dlmmaster.c +++ b/fs/ocfs2/dlm/dlmmaster.c | |||
@@ -2339,65 +2339,55 @@ static void dlm_deref_lockres_worker(struct dlm_work_item *item, void *data) | |||
2339 | dlm_lockres_put(res); | 2339 | dlm_lockres_put(res); |
2340 | } | 2340 | } |
2341 | 2341 | ||
2342 | /* Checks whether the lockres can be migrated. Returns 0 if yes, < 0 | 2342 | /* |
2343 | * if not. If 0, numlocks is set to the number of locks in the lockres. | 2343 | * A migrateable resource is one that is : |
2344 | * 1. locally mastered, and, | ||
2345 | * 2. zero local locks, and, | ||
2346 | * 3. one or more non-local locks, or, one or more references | ||
2347 | * Returns 1 if yes, 0 if not. | ||
2344 | */ | 2348 | */ |
2345 | static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, | 2349 | static int dlm_is_lockres_migrateable(struct dlm_ctxt *dlm, |
2346 | struct dlm_lock_resource *res, | 2350 | struct dlm_lock_resource *res) |
2347 | int *numlocks, | ||
2348 | int *hasrefs) | ||
2349 | { | 2351 | { |
2350 | int ret; | 2352 | enum dlm_lockres_list idx; |
2351 | int i; | 2353 | int nonlocal = 0, node_ref; |
2352 | int count = 0; | ||
2353 | struct list_head *queue; | 2354 | struct list_head *queue; |
2354 | struct dlm_lock *lock; | 2355 | struct dlm_lock *lock; |
2356 | u64 cookie; | ||
2355 | 2357 | ||
2356 | assert_spin_locked(&res->spinlock); | 2358 | assert_spin_locked(&res->spinlock); |
2357 | 2359 | ||
2358 | *numlocks = 0; | 2360 | if (res->owner != dlm->node_num) |
2359 | *hasrefs = 0; | 2361 | return 0; |
2360 | |||
2361 | ret = -EINVAL; | ||
2362 | if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { | ||
2363 | mlog(0, "cannot migrate lockres with unknown owner!\n"); | ||
2364 | goto leave; | ||
2365 | } | ||
2366 | |||
2367 | if (res->owner != dlm->node_num) { | ||
2368 | mlog(0, "cannot migrate lockres this node doesn't own!\n"); | ||
2369 | goto leave; | ||
2370 | } | ||
2371 | 2362 | ||
2372 | ret = 0; | 2363 | for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { |
2373 | queue = &res->granted; | 2364 | queue = dlm_list_idx_to_ptr(res, idx); |
2374 | for (i = 0; i < 3; i++) { | ||
2375 | list_for_each_entry(lock, queue, list) { | 2365 | list_for_each_entry(lock, queue, list) { |
2376 | ++count; | 2366 | if (lock->ml.node != dlm->node_num) { |
2377 | if (lock->ml.node == dlm->node_num) { | 2367 | nonlocal++; |
2378 | mlog(0, "found a lock owned by this node still " | 2368 | continue; |
2379 | "on the %s queue! will not migrate this " | ||
2380 | "lockres\n", (i == 0 ? "granted" : | ||
2381 | (i == 1 ? "converting" : | ||
2382 | "blocked"))); | ||
2383 | ret = -ENOTEMPTY; | ||
2384 | goto leave; | ||
2385 | } | 2369 | } |
2370 | cookie = be64_to_cpu(lock->ml.cookie); | ||
2371 | mlog(0, "%s: Not migrateable res %.*s, lock %u:%llu on " | ||
2372 | "%s list\n", dlm->name, res->lockname.len, | ||
2373 | res->lockname.name, | ||
2374 | dlm_get_lock_cookie_node(cookie), | ||
2375 | dlm_get_lock_cookie_seq(cookie), | ||
2376 | dlm_list_in_text(idx)); | ||
2377 | return 0; | ||
2386 | } | 2378 | } |
2387 | queue++; | ||
2388 | } | 2379 | } |
2389 | 2380 | ||
2390 | *numlocks = count; | 2381 | if (!nonlocal) { |
2391 | 2382 | node_ref = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); | |
2392 | count = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); | 2383 | if (node_ref >= O2NM_MAX_NODES) |
2393 | if (count < O2NM_MAX_NODES) | 2384 | return 0; |
2394 | *hasrefs = 1; | 2385 | } |
2395 | 2386 | ||
2396 | mlog(0, "%s: res %.*s, Migrateable, locks %d, refs %d\n", dlm->name, | 2387 | mlog(0, "%s: res %.*s, Migrateable\n", dlm->name, res->lockname.len, |
2397 | res->lockname.len, res->lockname.name, *numlocks, *hasrefs); | 2388 | res->lockname.name); |
2398 | 2389 | ||
2399 | leave: | 2390 | return 1; |
2400 | return ret; | ||
2401 | } | 2391 | } |
2402 | 2392 | ||
2403 | /* | 2393 | /* |
@@ -2406,8 +2396,7 @@ leave: | |||
2406 | 2396 | ||
2407 | 2397 | ||
2408 | static int dlm_migrate_lockres(struct dlm_ctxt *dlm, | 2398 | static int dlm_migrate_lockres(struct dlm_ctxt *dlm, |
2409 | struct dlm_lock_resource *res, | 2399 | struct dlm_lock_resource *res, u8 target) |
2410 | u8 target) | ||
2411 | { | 2400 | { |
2412 | struct dlm_master_list_entry *mle = NULL; | 2401 | struct dlm_master_list_entry *mle = NULL; |
2413 | struct dlm_master_list_entry *oldmle = NULL; | 2402 | struct dlm_master_list_entry *oldmle = NULL; |
@@ -2416,37 +2405,20 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, | |||
2416 | const char *name; | 2405 | const char *name; |
2417 | unsigned int namelen; | 2406 | unsigned int namelen; |
2418 | int mle_added = 0; | 2407 | int mle_added = 0; |
2419 | int numlocks, hasrefs; | ||
2420 | int wake = 0; | 2408 | int wake = 0; |
2421 | 2409 | ||
2422 | if (!dlm_grab(dlm)) | 2410 | if (!dlm_grab(dlm)) |
2423 | return -EINVAL; | 2411 | return -EINVAL; |
2424 | 2412 | ||
2413 | BUG_ON(target == O2NM_MAX_NODES); | ||
2414 | |||
2425 | name = res->lockname.name; | 2415 | name = res->lockname.name; |
2426 | namelen = res->lockname.len; | 2416 | namelen = res->lockname.len; |
2427 | 2417 | ||
2428 | mlog(0, "%s: Migrating %.*s to %u\n", dlm->name, namelen, name, target); | 2418 | mlog(0, "%s: Migrating %.*s to node %u\n", dlm->name, namelen, name, |
2429 | 2419 | target); | |
2430 | /* | ||
2431 | * ensure this lockres is a proper candidate for migration | ||
2432 | */ | ||
2433 | spin_lock(&res->spinlock); | ||
2434 | ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs); | ||
2435 | if (ret < 0) { | ||
2436 | spin_unlock(&res->spinlock); | ||
2437 | goto leave; | ||
2438 | } | ||
2439 | spin_unlock(&res->spinlock); | ||
2440 | |||
2441 | /* no work to do */ | ||
2442 | if (numlocks == 0 && !hasrefs) | ||
2443 | goto leave; | ||
2444 | |||
2445 | /* | ||
2446 | * preallocate up front | ||
2447 | * if this fails, abort | ||
2448 | */ | ||
2449 | 2420 | ||
2421 | /* preallocate up front. if this fails, abort */ | ||
2450 | ret = -ENOMEM; | 2422 | ret = -ENOMEM; |
2451 | mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS); | 2423 | mres = (struct dlm_migratable_lockres *) __get_free_page(GFP_NOFS); |
2452 | if (!mres) { | 2424 | if (!mres) { |
@@ -2462,35 +2434,10 @@ static int dlm_migrate_lockres(struct dlm_ctxt *dlm, | |||
2462 | ret = 0; | 2434 | ret = 0; |
2463 | 2435 | ||
2464 | /* | 2436 | /* |
2465 | * find a node to migrate the lockres to | ||
2466 | */ | ||
2467 | |||
2468 | spin_lock(&dlm->spinlock); | ||
2469 | /* pick a new node */ | ||
2470 | if (!test_bit(target, dlm->domain_map) || | ||
2471 | target >= O2NM_MAX_NODES) { | ||
2472 | target = dlm_pick_migration_target(dlm, res); | ||
2473 | } | ||
2474 | mlog(0, "%s: res %.*s, Node %u chosen for migration\n", dlm->name, | ||
2475 | namelen, name, target); | ||
2476 | |||
2477 | if (target >= O2NM_MAX_NODES || | ||
2478 | !test_bit(target, dlm->domain_map)) { | ||
2479 | /* target chosen is not alive */ | ||
2480 | ret = -EINVAL; | ||
2481 | } | ||
2482 | |||
2483 | if (ret) { | ||
2484 | spin_unlock(&dlm->spinlock); | ||
2485 | goto fail; | ||
2486 | } | ||
2487 | |||
2488 | mlog(0, "continuing with target = %u\n", target); | ||
2489 | |||
2490 | /* | ||
2491 | * clear any existing master requests and | 2437 | * clear any existing master requests and |
2492 | * add the migration mle to the list | 2438 | * add the migration mle to the list |
2493 | */ | 2439 | */ |
2440 | spin_lock(&dlm->spinlock); | ||
2494 | spin_lock(&dlm->master_lock); | 2441 | spin_lock(&dlm->master_lock); |
2495 | ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, | 2442 | ret = dlm_add_migration_mle(dlm, res, mle, &oldmle, name, |
2496 | namelen, target, dlm->node_num); | 2443 | namelen, target, dlm->node_num); |
@@ -2531,6 +2478,7 @@ fail: | |||
2531 | dlm_put_mle(mle); | 2478 | dlm_put_mle(mle); |
2532 | } else if (mle) { | 2479 | } else if (mle) { |
2533 | kmem_cache_free(dlm_mle_cache, mle); | 2480 | kmem_cache_free(dlm_mle_cache, mle); |
2481 | mle = NULL; | ||
2534 | } | 2482 | } |
2535 | goto leave; | 2483 | goto leave; |
2536 | } | 2484 | } |
@@ -2652,69 +2600,52 @@ leave: | |||
2652 | if (wake) | 2600 | if (wake) |
2653 | wake_up(&res->wq); | 2601 | wake_up(&res->wq); |
2654 | 2602 | ||
2655 | /* TODO: cleanup */ | ||
2656 | if (mres) | 2603 | if (mres) |
2657 | free_page((unsigned long)mres); | 2604 | free_page((unsigned long)mres); |
2658 | 2605 | ||
2659 | dlm_put(dlm); | 2606 | dlm_put(dlm); |
2660 | 2607 | ||
2661 | mlog(0, "returning %d\n", ret); | 2608 | mlog(0, "%s: Migrating %.*s to %u, returns %d\n", dlm->name, namelen, |
2609 | name, target, ret); | ||
2662 | return ret; | 2610 | return ret; |
2663 | } | 2611 | } |
2664 | 2612 | ||
2665 | #define DLM_MIGRATION_RETRY_MS 100 | 2613 | #define DLM_MIGRATION_RETRY_MS 100 |
2666 | 2614 | ||
2667 | /* Should be called only after beginning the domain leave process. | 2615 | /* |
2616 | * Should be called only after beginning the domain leave process. | ||
2668 | * There should not be any remaining locks on nonlocal lock resources, | 2617 | * There should not be any remaining locks on nonlocal lock resources, |
2669 | * and there should be no local locks left on locally mastered resources. | 2618 | * and there should be no local locks left on locally mastered resources. |
2670 | * | 2619 | * |
2671 | * Called with the dlm spinlock held, may drop it to do migration, but | 2620 | * Called with the dlm spinlock held, may drop it to do migration, but |
2672 | * will re-acquire before exit. | 2621 | * will re-acquire before exit. |
2673 | * | 2622 | * |
2674 | * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped */ | 2623 | * Returns: 1 if dlm->spinlock was dropped/retaken, 0 if never dropped |
2624 | */ | ||
2675 | int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) | 2625 | int dlm_empty_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) |
2676 | { | 2626 | { |
2677 | int ret; | 2627 | int ret; |
2678 | int lock_dropped = 0; | 2628 | int lock_dropped = 0; |
2679 | int numlocks, hasrefs; | 2629 | u8 target = O2NM_MAX_NODES; |
2630 | |||
2631 | assert_spin_locked(&dlm->spinlock); | ||
2680 | 2632 | ||
2681 | spin_lock(&res->spinlock); | 2633 | spin_lock(&res->spinlock); |
2682 | if (res->owner != dlm->node_num) { | 2634 | if (dlm_is_lockres_migrateable(dlm, res)) |
2683 | if (!__dlm_lockres_unused(res)) { | 2635 | target = dlm_pick_migration_target(dlm, res); |
2684 | mlog(ML_ERROR, "%s:%.*s: this node is not master, " | 2636 | spin_unlock(&res->spinlock); |
2685 | "trying to free this but locks remain\n", | ||
2686 | dlm->name, res->lockname.len, res->lockname.name); | ||
2687 | } | ||
2688 | spin_unlock(&res->spinlock); | ||
2689 | goto leave; | ||
2690 | } | ||
2691 | 2637 | ||
2692 | /* No need to migrate a lockres having no locks */ | 2638 | if (target == O2NM_MAX_NODES) |
2693 | ret = dlm_is_lockres_migrateable(dlm, res, &numlocks, &hasrefs); | ||
2694 | if (ret >= 0 && numlocks == 0 && !hasrefs) { | ||
2695 | spin_unlock(&res->spinlock); | ||
2696 | goto leave; | 2639 | goto leave; |
2697 | } | ||
2698 | spin_unlock(&res->spinlock); | ||
2699 | 2640 | ||
2700 | /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ | 2641 | /* Wheee! Migrate lockres here! Will sleep so drop spinlock. */ |
2701 | spin_unlock(&dlm->spinlock); | 2642 | spin_unlock(&dlm->spinlock); |
2702 | lock_dropped = 1; | 2643 | lock_dropped = 1; |
2703 | while (1) { | 2644 | ret = dlm_migrate_lockres(dlm, res, target); |
2704 | ret = dlm_migrate_lockres(dlm, res, O2NM_MAX_NODES); | 2645 | if (ret) |
2705 | if (ret >= 0) | 2646 | mlog(0, "%s: res %.*s, Migrate to node %u failed with %d\n", |
2706 | break; | 2647 | dlm->name, res->lockname.len, res->lockname.name, |
2707 | if (ret == -ENOTEMPTY) { | 2648 | target, ret); |
2708 | mlog(ML_ERROR, "lockres %.*s still has local locks!\n", | ||
2709 | res->lockname.len, res->lockname.name); | ||
2710 | BUG(); | ||
2711 | } | ||
2712 | |||
2713 | mlog(0, "lockres %.*s: migrate failed, " | ||
2714 | "retrying\n", res->lockname.len, | ||
2715 | res->lockname.name); | ||
2716 | msleep(DLM_MIGRATION_RETRY_MS); | ||
2717 | } | ||
2718 | spin_lock(&dlm->spinlock); | 2649 | spin_lock(&dlm->spinlock); |
2719 | leave: | 2650 | leave: |
2720 | return lock_dropped; | 2651 | return lock_dropped; |
@@ -2898,61 +2829,55 @@ static void dlm_remove_nonlocal_locks(struct dlm_ctxt *dlm, | |||
2898 | } | 2829 | } |
2899 | } | 2830 | } |
2900 | 2831 | ||
2901 | /* for now this is not too intelligent. we will | 2832 | /* |
2902 | * need stats to make this do the right thing. | 2833 | * Pick a node to migrate the lock resource to. This function selects a |
2903 | * this just finds the first lock on one of the | 2834 | * potential target based first on the locks and then on refmap. It skips |
2904 | * queues and uses that node as the target. */ | 2835 | * nodes that are in the process of exiting the domain. |
2836 | */ | ||
2905 | static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, | 2837 | static u8 dlm_pick_migration_target(struct dlm_ctxt *dlm, |
2906 | struct dlm_lock_resource *res) | 2838 | struct dlm_lock_resource *res) |
2907 | { | 2839 | { |
2908 | int i; | 2840 | enum dlm_lockres_list idx; |
2909 | struct list_head *queue = &res->granted; | 2841 | struct list_head *queue = &res->granted; |
2910 | struct dlm_lock *lock; | 2842 | struct dlm_lock *lock; |
2911 | int nodenum; | 2843 | int noderef; |
2844 | u8 nodenum = O2NM_MAX_NODES; | ||
2912 | 2845 | ||
2913 | assert_spin_locked(&dlm->spinlock); | 2846 | assert_spin_locked(&dlm->spinlock); |
2847 | assert_spin_locked(&res->spinlock); | ||
2914 | 2848 | ||
2915 | spin_lock(&res->spinlock); | 2849 | /* Go through all the locks */ |
2916 | for (i=0; i<3; i++) { | 2850 | for (idx = DLM_GRANTED_LIST; idx <= DLM_BLOCKED_LIST; idx++) { |
2851 | queue = dlm_list_idx_to_ptr(res, idx); | ||
2917 | list_for_each_entry(lock, queue, list) { | 2852 | list_for_each_entry(lock, queue, list) { |
2918 | /* up to the caller to make sure this node | 2853 | if (lock->ml.node == dlm->node_num) |
2919 | * is alive */ | 2854 | continue; |
2920 | if (lock->ml.node != dlm->node_num) { | 2855 | if (test_bit(lock->ml.node, dlm->exit_domain_map)) |
2921 | spin_unlock(&res->spinlock); | 2856 | continue; |
2922 | return lock->ml.node; | 2857 | nodenum = lock->ml.node; |
2923 | } | 2858 | goto bail; |
2924 | } | 2859 | } |
2925 | queue++; | ||
2926 | } | ||
2927 | |||
2928 | nodenum = find_next_bit(res->refmap, O2NM_MAX_NODES, 0); | ||
2929 | if (nodenum < O2NM_MAX_NODES) { | ||
2930 | spin_unlock(&res->spinlock); | ||
2931 | return nodenum; | ||
2932 | } | 2860 | } |
2933 | spin_unlock(&res->spinlock); | ||
2934 | mlog(0, "have not found a suitable target yet! checking domain map\n"); | ||
2935 | 2861 | ||
2936 | /* ok now we're getting desperate. pick anyone alive. */ | 2862 | /* Go thru the refmap */ |
2937 | nodenum = -1; | 2863 | noderef = -1; |
2938 | while (1) { | 2864 | while (1) { |
2939 | nodenum = find_next_bit(dlm->domain_map, | 2865 | noderef = find_next_bit(res->refmap, O2NM_MAX_NODES, |
2940 | O2NM_MAX_NODES, nodenum+1); | 2866 | noderef + 1); |
2941 | mlog(0, "found %d in domain map\n", nodenum); | 2867 | if (noderef >= O2NM_MAX_NODES) |
2942 | if (nodenum >= O2NM_MAX_NODES) | ||
2943 | break; | 2868 | break; |
2944 | if (nodenum != dlm->node_num) { | 2869 | if (noderef == dlm->node_num) |
2945 | mlog(0, "picking %d\n", nodenum); | 2870 | continue; |
2946 | return nodenum; | 2871 | if (test_bit(noderef, dlm->exit_domain_map)) |
2947 | } | 2872 | continue; |
2873 | nodenum = noderef; | ||
2874 | goto bail; | ||
2948 | } | 2875 | } |
2949 | 2876 | ||
2950 | mlog(0, "giving up. no master to migrate to\n"); | 2877 | bail: |
2951 | return DLM_LOCK_RES_OWNER_UNKNOWN; | 2878 | return nodenum; |
2952 | } | 2879 | } |
2953 | 2880 | ||
2954 | |||
2955 | |||
2956 | /* this is called by the new master once all lockres | 2881 | /* this is called by the new master once all lockres |
2957 | * data has been received */ | 2882 | * data has been received */ |
2958 | static int dlm_do_migrate_request(struct dlm_ctxt *dlm, | 2883 | static int dlm_do_migrate_request(struct dlm_ctxt *dlm, |
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c index f1beb6fc254d..7efab6d28a21 100644 --- a/fs/ocfs2/dlm/dlmrecovery.c +++ b/fs/ocfs2/dlm/dlmrecovery.c | |||
@@ -2393,6 +2393,7 @@ static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) | |||
2393 | 2393 | ||
2394 | mlog(0, "node %u being removed from domain map!\n", idx); | 2394 | mlog(0, "node %u being removed from domain map!\n", idx); |
2395 | clear_bit(idx, dlm->domain_map); | 2395 | clear_bit(idx, dlm->domain_map); |
2396 | clear_bit(idx, dlm->exit_domain_map); | ||
2396 | /* wake up migration waiters if a node goes down. | 2397 | /* wake up migration waiters if a node goes down. |
2397 | * perhaps later we can genericize this for other waiters. */ | 2398 | * perhaps later we can genericize this for other waiters. */ |
2398 | wake_up(&dlm->migration_wq); | 2399 | wake_up(&dlm->migration_wq); |
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c index 8c5c0eddc365..b42076797049 100644 --- a/fs/ocfs2/dlmfs/dlmfs.c +++ b/fs/ocfs2/dlmfs/dlmfs.c | |||
@@ -88,7 +88,7 @@ struct workqueue_struct *user_dlm_worker; | |||
88 | * signifies a bast fired on the lock. | 88 | * signifies a bast fired on the lock. |
89 | */ | 89 | */ |
90 | #define DLMFS_CAPABILITIES "bast stackglue" | 90 | #define DLMFS_CAPABILITIES "bast stackglue" |
91 | extern int param_set_dlmfs_capabilities(const char *val, | 91 | static int param_set_dlmfs_capabilities(const char *val, |
92 | struct kernel_param *kp) | 92 | struct kernel_param *kp) |
93 | { | 93 | { |
94 | printk(KERN_ERR "%s: readonly parameter\n", kp->name); | 94 | printk(KERN_ERR "%s: readonly parameter\n", kp->name); |
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c index 89659d6dc206..b1e35a392ca5 100644 --- a/fs/ocfs2/file.c +++ b/fs/ocfs2/file.c | |||
@@ -2670,6 +2670,7 @@ const struct file_operations ocfs2_fops_no_plocks = { | |||
2670 | .flock = ocfs2_flock, | 2670 | .flock = ocfs2_flock, |
2671 | .splice_read = ocfs2_file_splice_read, | 2671 | .splice_read = ocfs2_file_splice_read, |
2672 | .splice_write = ocfs2_file_splice_write, | 2672 | .splice_write = ocfs2_file_splice_write, |
2673 | .fallocate = ocfs2_fallocate, | ||
2673 | }; | 2674 | }; |
2674 | 2675 | ||
2675 | const struct file_operations ocfs2_dops_no_plocks = { | 2676 | const struct file_operations ocfs2_dops_no_plocks = { |
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c index 8f13c5989eae..bc91072b7219 100644 --- a/fs/ocfs2/ioctl.c +++ b/fs/ocfs2/ioctl.c | |||
@@ -22,6 +22,11 @@ | |||
22 | #include "ioctl.h" | 22 | #include "ioctl.h" |
23 | #include "resize.h" | 23 | #include "resize.h" |
24 | #include "refcounttree.h" | 24 | #include "refcounttree.h" |
25 | #include "sysfile.h" | ||
26 | #include "dir.h" | ||
27 | #include "buffer_head_io.h" | ||
28 | #include "suballoc.h" | ||
29 | #include "move_extents.h" | ||
25 | 30 | ||
26 | #include <linux/ext2_fs.h> | 31 | #include <linux/ext2_fs.h> |
27 | 32 | ||
@@ -35,31 +40,27 @@ | |||
35 | * be -EFAULT. The error will be returned from the ioctl(2) call. It's | 40 | * be -EFAULT. The error will be returned from the ioctl(2) call. It's |
36 | * just a best-effort to tell userspace that this request caused the error. | 41 | * just a best-effort to tell userspace that this request caused the error. |
37 | */ | 42 | */ |
38 | static inline void __o2info_set_request_error(struct ocfs2_info_request *kreq, | 43 | static inline void o2info_set_request_error(struct ocfs2_info_request *kreq, |
39 | struct ocfs2_info_request __user *req) | 44 | struct ocfs2_info_request __user *req) |
40 | { | 45 | { |
41 | kreq->ir_flags |= OCFS2_INFO_FL_ERROR; | 46 | kreq->ir_flags |= OCFS2_INFO_FL_ERROR; |
42 | (void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags)); | 47 | (void)put_user(kreq->ir_flags, (__u32 __user *)&(req->ir_flags)); |
43 | } | 48 | } |
44 | 49 | ||
45 | #define o2info_set_request_error(a, b) \ | 50 | static inline void o2info_set_request_filled(struct ocfs2_info_request *req) |
46 | __o2info_set_request_error((struct ocfs2_info_request *)&(a), b) | ||
47 | |||
48 | static inline void __o2info_set_request_filled(struct ocfs2_info_request *req) | ||
49 | { | 51 | { |
50 | req->ir_flags |= OCFS2_INFO_FL_FILLED; | 52 | req->ir_flags |= OCFS2_INFO_FL_FILLED; |
51 | } | 53 | } |
52 | 54 | ||
53 | #define o2info_set_request_filled(a) \ | 55 | static inline void o2info_clear_request_filled(struct ocfs2_info_request *req) |
54 | __o2info_set_request_filled((struct ocfs2_info_request *)&(a)) | ||
55 | |||
56 | static inline void __o2info_clear_request_filled(struct ocfs2_info_request *req) | ||
57 | { | 56 | { |
58 | req->ir_flags &= ~OCFS2_INFO_FL_FILLED; | 57 | req->ir_flags &= ~OCFS2_INFO_FL_FILLED; |
59 | } | 58 | } |
60 | 59 | ||
61 | #define o2info_clear_request_filled(a) \ | 60 | static inline int o2info_coherent(struct ocfs2_info_request *req) |
62 | __o2info_clear_request_filled((struct ocfs2_info_request *)&(a)) | 61 | { |
62 | return (!(req->ir_flags & OCFS2_INFO_FL_NON_COHERENT)); | ||
63 | } | ||
63 | 64 | ||
64 | static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags) | 65 | static int ocfs2_get_inode_attr(struct inode *inode, unsigned *flags) |
65 | { | 66 | { |
@@ -153,7 +154,7 @@ int ocfs2_info_handle_blocksize(struct inode *inode, | |||
153 | 154 | ||
154 | oib.ib_blocksize = inode->i_sb->s_blocksize; | 155 | oib.ib_blocksize = inode->i_sb->s_blocksize; |
155 | 156 | ||
156 | o2info_set_request_filled(oib); | 157 | o2info_set_request_filled(&oib.ib_req); |
157 | 158 | ||
158 | if (o2info_to_user(oib, req)) | 159 | if (o2info_to_user(oib, req)) |
159 | goto bail; | 160 | goto bail; |
@@ -161,7 +162,7 @@ int ocfs2_info_handle_blocksize(struct inode *inode, | |||
161 | status = 0; | 162 | status = 0; |
162 | bail: | 163 | bail: |
163 | if (status) | 164 | if (status) |
164 | o2info_set_request_error(oib, req); | 165 | o2info_set_request_error(&oib.ib_req, req); |
165 | 166 | ||
166 | return status; | 167 | return status; |
167 | } | 168 | } |
@@ -178,7 +179,7 @@ int ocfs2_info_handle_clustersize(struct inode *inode, | |||
178 | 179 | ||
179 | oic.ic_clustersize = osb->s_clustersize; | 180 | oic.ic_clustersize = osb->s_clustersize; |
180 | 181 | ||
181 | o2info_set_request_filled(oic); | 182 | o2info_set_request_filled(&oic.ic_req); |
182 | 183 | ||
183 | if (o2info_to_user(oic, req)) | 184 | if (o2info_to_user(oic, req)) |
184 | goto bail; | 185 | goto bail; |
@@ -186,7 +187,7 @@ int ocfs2_info_handle_clustersize(struct inode *inode, | |||
186 | status = 0; | 187 | status = 0; |
187 | bail: | 188 | bail: |
188 | if (status) | 189 | if (status) |
189 | o2info_set_request_error(oic, req); | 190 | o2info_set_request_error(&oic.ic_req, req); |
190 | 191 | ||
191 | return status; | 192 | return status; |
192 | } | 193 | } |
@@ -203,7 +204,7 @@ int ocfs2_info_handle_maxslots(struct inode *inode, | |||
203 | 204 | ||
204 | oim.im_max_slots = osb->max_slots; | 205 | oim.im_max_slots = osb->max_slots; |
205 | 206 | ||
206 | o2info_set_request_filled(oim); | 207 | o2info_set_request_filled(&oim.im_req); |
207 | 208 | ||
208 | if (o2info_to_user(oim, req)) | 209 | if (o2info_to_user(oim, req)) |
209 | goto bail; | 210 | goto bail; |
@@ -211,7 +212,7 @@ int ocfs2_info_handle_maxslots(struct inode *inode, | |||
211 | status = 0; | 212 | status = 0; |
212 | bail: | 213 | bail: |
213 | if (status) | 214 | if (status) |
214 | o2info_set_request_error(oim, req); | 215 | o2info_set_request_error(&oim.im_req, req); |
215 | 216 | ||
216 | return status; | 217 | return status; |
217 | } | 218 | } |
@@ -228,7 +229,7 @@ int ocfs2_info_handle_label(struct inode *inode, | |||
228 | 229 | ||
229 | memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN); | 230 | memcpy(oil.il_label, osb->vol_label, OCFS2_MAX_VOL_LABEL_LEN); |
230 | 231 | ||
231 | o2info_set_request_filled(oil); | 232 | o2info_set_request_filled(&oil.il_req); |
232 | 233 | ||
233 | if (o2info_to_user(oil, req)) | 234 | if (o2info_to_user(oil, req)) |
234 | goto bail; | 235 | goto bail; |
@@ -236,7 +237,7 @@ int ocfs2_info_handle_label(struct inode *inode, | |||
236 | status = 0; | 237 | status = 0; |
237 | bail: | 238 | bail: |
238 | if (status) | 239 | if (status) |
239 | o2info_set_request_error(oil, req); | 240 | o2info_set_request_error(&oil.il_req, req); |
240 | 241 | ||
241 | return status; | 242 | return status; |
242 | } | 243 | } |
@@ -253,7 +254,7 @@ int ocfs2_info_handle_uuid(struct inode *inode, | |||
253 | 254 | ||
254 | memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1); | 255 | memcpy(oiu.iu_uuid_str, osb->uuid_str, OCFS2_TEXT_UUID_LEN + 1); |
255 | 256 | ||
256 | o2info_set_request_filled(oiu); | 257 | o2info_set_request_filled(&oiu.iu_req); |
257 | 258 | ||
258 | if (o2info_to_user(oiu, req)) | 259 | if (o2info_to_user(oiu, req)) |
259 | goto bail; | 260 | goto bail; |
@@ -261,7 +262,7 @@ int ocfs2_info_handle_uuid(struct inode *inode, | |||
261 | status = 0; | 262 | status = 0; |
262 | bail: | 263 | bail: |
263 | if (status) | 264 | if (status) |
264 | o2info_set_request_error(oiu, req); | 265 | o2info_set_request_error(&oiu.iu_req, req); |
265 | 266 | ||
266 | return status; | 267 | return status; |
267 | } | 268 | } |
@@ -280,7 +281,7 @@ int ocfs2_info_handle_fs_features(struct inode *inode, | |||
280 | oif.if_incompat_features = osb->s_feature_incompat; | 281 | oif.if_incompat_features = osb->s_feature_incompat; |
281 | oif.if_ro_compat_features = osb->s_feature_ro_compat; | 282 | oif.if_ro_compat_features = osb->s_feature_ro_compat; |
282 | 283 | ||
283 | o2info_set_request_filled(oif); | 284 | o2info_set_request_filled(&oif.if_req); |
284 | 285 | ||
285 | if (o2info_to_user(oif, req)) | 286 | if (o2info_to_user(oif, req)) |
286 | goto bail; | 287 | goto bail; |
@@ -288,7 +289,7 @@ int ocfs2_info_handle_fs_features(struct inode *inode, | |||
288 | status = 0; | 289 | status = 0; |
289 | bail: | 290 | bail: |
290 | if (status) | 291 | if (status) |
291 | o2info_set_request_error(oif, req); | 292 | o2info_set_request_error(&oif.if_req, req); |
292 | 293 | ||
293 | return status; | 294 | return status; |
294 | } | 295 | } |
@@ -305,7 +306,7 @@ int ocfs2_info_handle_journal_size(struct inode *inode, | |||
305 | 306 | ||
306 | oij.ij_journal_size = osb->journal->j_inode->i_size; | 307 | oij.ij_journal_size = osb->journal->j_inode->i_size; |
307 | 308 | ||
308 | o2info_set_request_filled(oij); | 309 | o2info_set_request_filled(&oij.ij_req); |
309 | 310 | ||
310 | if (o2info_to_user(oij, req)) | 311 | if (o2info_to_user(oij, req)) |
311 | goto bail; | 312 | goto bail; |
@@ -313,7 +314,408 @@ int ocfs2_info_handle_journal_size(struct inode *inode, | |||
313 | status = 0; | 314 | status = 0; |
314 | bail: | 315 | bail: |
315 | if (status) | 316 | if (status) |
316 | o2info_set_request_error(oij, req); | 317 | o2info_set_request_error(&oij.ij_req, req); |
318 | |||
319 | return status; | ||
320 | } | ||
321 | |||
322 | int ocfs2_info_scan_inode_alloc(struct ocfs2_super *osb, | ||
323 | struct inode *inode_alloc, u64 blkno, | ||
324 | struct ocfs2_info_freeinode *fi, u32 slot) | ||
325 | { | ||
326 | int status = 0, unlock = 0; | ||
327 | |||
328 | struct buffer_head *bh = NULL; | ||
329 | struct ocfs2_dinode *dinode_alloc = NULL; | ||
330 | |||
331 | if (inode_alloc) | ||
332 | mutex_lock(&inode_alloc->i_mutex); | ||
333 | |||
334 | if (o2info_coherent(&fi->ifi_req)) { | ||
335 | status = ocfs2_inode_lock(inode_alloc, &bh, 0); | ||
336 | if (status < 0) { | ||
337 | mlog_errno(status); | ||
338 | goto bail; | ||
339 | } | ||
340 | unlock = 1; | ||
341 | } else { | ||
342 | status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh); | ||
343 | if (status < 0) { | ||
344 | mlog_errno(status); | ||
345 | goto bail; | ||
346 | } | ||
347 | } | ||
348 | |||
349 | dinode_alloc = (struct ocfs2_dinode *)bh->b_data; | ||
350 | |||
351 | fi->ifi_stat[slot].lfi_total = | ||
352 | le32_to_cpu(dinode_alloc->id1.bitmap1.i_total); | ||
353 | fi->ifi_stat[slot].lfi_free = | ||
354 | le32_to_cpu(dinode_alloc->id1.bitmap1.i_total) - | ||
355 | le32_to_cpu(dinode_alloc->id1.bitmap1.i_used); | ||
356 | |||
357 | bail: | ||
358 | if (unlock) | ||
359 | ocfs2_inode_unlock(inode_alloc, 0); | ||
360 | |||
361 | if (inode_alloc) | ||
362 | mutex_unlock(&inode_alloc->i_mutex); | ||
363 | |||
364 | brelse(bh); | ||
365 | |||
366 | return status; | ||
367 | } | ||
368 | |||
369 | int ocfs2_info_handle_freeinode(struct inode *inode, | ||
370 | struct ocfs2_info_request __user *req) | ||
371 | { | ||
372 | u32 i; | ||
373 | u64 blkno = -1; | ||
374 | char namebuf[40]; | ||
375 | int status = -EFAULT, type = INODE_ALLOC_SYSTEM_INODE; | ||
376 | struct ocfs2_info_freeinode *oifi = NULL; | ||
377 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
378 | struct inode *inode_alloc = NULL; | ||
379 | |||
380 | oifi = kzalloc(sizeof(struct ocfs2_info_freeinode), GFP_KERNEL); | ||
381 | if (!oifi) { | ||
382 | status = -ENOMEM; | ||
383 | mlog_errno(status); | ||
384 | goto bail; | ||
385 | } | ||
386 | |||
387 | if (o2info_from_user(*oifi, req)) | ||
388 | goto bail; | ||
389 | |||
390 | oifi->ifi_slotnum = osb->max_slots; | ||
391 | |||
392 | for (i = 0; i < oifi->ifi_slotnum; i++) { | ||
393 | if (o2info_coherent(&oifi->ifi_req)) { | ||
394 | inode_alloc = ocfs2_get_system_file_inode(osb, type, i); | ||
395 | if (!inode_alloc) { | ||
396 | mlog(ML_ERROR, "unable to get alloc inode in " | ||
397 | "slot %u\n", i); | ||
398 | status = -EIO; | ||
399 | goto bail; | ||
400 | } | ||
401 | } else { | ||
402 | ocfs2_sprintf_system_inode_name(namebuf, | ||
403 | sizeof(namebuf), | ||
404 | type, i); | ||
405 | status = ocfs2_lookup_ino_from_name(osb->sys_root_inode, | ||
406 | namebuf, | ||
407 | strlen(namebuf), | ||
408 | &blkno); | ||
409 | if (status < 0) { | ||
410 | status = -ENOENT; | ||
411 | goto bail; | ||
412 | } | ||
413 | } | ||
414 | |||
415 | status = ocfs2_info_scan_inode_alloc(osb, inode_alloc, blkno, oifi, i); | ||
416 | if (status < 0) | ||
417 | goto bail; | ||
418 | |||
419 | iput(inode_alloc); | ||
420 | inode_alloc = NULL; | ||
421 | } | ||
422 | |||
423 | o2info_set_request_filled(&oifi->ifi_req); | ||
424 | |||
425 | if (o2info_to_user(*oifi, req)) | ||
426 | goto bail; | ||
427 | |||
428 | status = 0; | ||
429 | bail: | ||
430 | if (status) | ||
431 | o2info_set_request_error(&oifi->ifi_req, req); | ||
432 | |||
433 | kfree(oifi); | ||
434 | |||
435 | return status; | ||
436 | } | ||
437 | |||
438 | static void o2ffg_update_histogram(struct ocfs2_info_free_chunk_list *hist, | ||
439 | unsigned int chunksize) | ||
440 | { | ||
441 | int index; | ||
442 | |||
443 | index = __ilog2_u32(chunksize); | ||
444 | if (index >= OCFS2_INFO_MAX_HIST) | ||
445 | index = OCFS2_INFO_MAX_HIST - 1; | ||
446 | |||
447 | hist->fc_chunks[index]++; | ||
448 | hist->fc_clusters[index] += chunksize; | ||
449 | } | ||
450 | |||
451 | static void o2ffg_update_stats(struct ocfs2_info_freefrag_stats *stats, | ||
452 | unsigned int chunksize) | ||
453 | { | ||
454 | if (chunksize > stats->ffs_max) | ||
455 | stats->ffs_max = chunksize; | ||
456 | |||
457 | if (chunksize < stats->ffs_min) | ||
458 | stats->ffs_min = chunksize; | ||
459 | |||
460 | stats->ffs_avg += chunksize; | ||
461 | stats->ffs_free_chunks_real++; | ||
462 | } | ||
463 | |||
464 | void ocfs2_info_update_ffg(struct ocfs2_info_freefrag *ffg, | ||
465 | unsigned int chunksize) | ||
466 | { | ||
467 | o2ffg_update_histogram(&(ffg->iff_ffs.ffs_fc_hist), chunksize); | ||
468 | o2ffg_update_stats(&(ffg->iff_ffs), chunksize); | ||
469 | } | ||
470 | |||
471 | int ocfs2_info_freefrag_scan_chain(struct ocfs2_super *osb, | ||
472 | struct inode *gb_inode, | ||
473 | struct ocfs2_dinode *gb_dinode, | ||
474 | struct ocfs2_chain_rec *rec, | ||
475 | struct ocfs2_info_freefrag *ffg, | ||
476 | u32 chunks_in_group) | ||
477 | { | ||
478 | int status = 0, used; | ||
479 | u64 blkno; | ||
480 | |||
481 | struct buffer_head *bh = NULL; | ||
482 | struct ocfs2_group_desc *bg = NULL; | ||
483 | |||
484 | unsigned int max_bits, num_clusters; | ||
485 | unsigned int offset = 0, cluster, chunk; | ||
486 | unsigned int chunk_free, last_chunksize = 0; | ||
487 | |||
488 | if (!le32_to_cpu(rec->c_free)) | ||
489 | goto bail; | ||
490 | |||
491 | do { | ||
492 | if (!bg) | ||
493 | blkno = le64_to_cpu(rec->c_blkno); | ||
494 | else | ||
495 | blkno = le64_to_cpu(bg->bg_next_group); | ||
496 | |||
497 | if (bh) { | ||
498 | brelse(bh); | ||
499 | bh = NULL; | ||
500 | } | ||
501 | |||
502 | if (o2info_coherent(&ffg->iff_req)) | ||
503 | status = ocfs2_read_group_descriptor(gb_inode, | ||
504 | gb_dinode, | ||
505 | blkno, &bh); | ||
506 | else | ||
507 | status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh); | ||
508 | |||
509 | if (status < 0) { | ||
510 | mlog(ML_ERROR, "Can't read the group descriptor # " | ||
511 | "%llu from device.", (unsigned long long)blkno); | ||
512 | status = -EIO; | ||
513 | goto bail; | ||
514 | } | ||
515 | |||
516 | bg = (struct ocfs2_group_desc *)bh->b_data; | ||
517 | |||
518 | if (!le16_to_cpu(bg->bg_free_bits_count)) | ||
519 | continue; | ||
520 | |||
521 | max_bits = le16_to_cpu(bg->bg_bits); | ||
522 | offset = 0; | ||
523 | |||
524 | for (chunk = 0; chunk < chunks_in_group; chunk++) { | ||
525 | /* | ||
526 | * last chunk may be not an entire one. | ||
527 | */ | ||
528 | if ((offset + ffg->iff_chunksize) > max_bits) | ||
529 | num_clusters = max_bits - offset; | ||
530 | else | ||
531 | num_clusters = ffg->iff_chunksize; | ||
532 | |||
533 | chunk_free = 0; | ||
534 | for (cluster = 0; cluster < num_clusters; cluster++) { | ||
535 | used = ocfs2_test_bit(offset, | ||
536 | (unsigned long *)bg->bg_bitmap); | ||
537 | /* | ||
538 | * - chunk_free counts free clusters in #N chunk. | ||
539 | * - last_chunksize records the size(in) clusters | ||
540 | * for the last real free chunk being counted. | ||
541 | */ | ||
542 | if (!used) { | ||
543 | last_chunksize++; | ||
544 | chunk_free++; | ||
545 | } | ||
546 | |||
547 | if (used && last_chunksize) { | ||
548 | ocfs2_info_update_ffg(ffg, | ||
549 | last_chunksize); | ||
550 | last_chunksize = 0; | ||
551 | } | ||
552 | |||
553 | offset++; | ||
554 | } | ||
555 | |||
556 | if (chunk_free == ffg->iff_chunksize) | ||
557 | ffg->iff_ffs.ffs_free_chunks++; | ||
558 | } | ||
559 | |||
560 | /* | ||
561 | * need to update the info for last free chunk. | ||
562 | */ | ||
563 | if (last_chunksize) | ||
564 | ocfs2_info_update_ffg(ffg, last_chunksize); | ||
565 | |||
566 | } while (le64_to_cpu(bg->bg_next_group)); | ||
567 | |||
568 | bail: | ||
569 | brelse(bh); | ||
570 | |||
571 | return status; | ||
572 | } | ||
573 | |||
574 | int ocfs2_info_freefrag_scan_bitmap(struct ocfs2_super *osb, | ||
575 | struct inode *gb_inode, u64 blkno, | ||
576 | struct ocfs2_info_freefrag *ffg) | ||
577 | { | ||
578 | u32 chunks_in_group; | ||
579 | int status = 0, unlock = 0, i; | ||
580 | |||
581 | struct buffer_head *bh = NULL; | ||
582 | struct ocfs2_chain_list *cl = NULL; | ||
583 | struct ocfs2_chain_rec *rec = NULL; | ||
584 | struct ocfs2_dinode *gb_dinode = NULL; | ||
585 | |||
586 | if (gb_inode) | ||
587 | mutex_lock(&gb_inode->i_mutex); | ||
588 | |||
589 | if (o2info_coherent(&ffg->iff_req)) { | ||
590 | status = ocfs2_inode_lock(gb_inode, &bh, 0); | ||
591 | if (status < 0) { | ||
592 | mlog_errno(status); | ||
593 | goto bail; | ||
594 | } | ||
595 | unlock = 1; | ||
596 | } else { | ||
597 | status = ocfs2_read_blocks_sync(osb, blkno, 1, &bh); | ||
598 | if (status < 0) { | ||
599 | mlog_errno(status); | ||
600 | goto bail; | ||
601 | } | ||
602 | } | ||
603 | |||
604 | gb_dinode = (struct ocfs2_dinode *)bh->b_data; | ||
605 | cl = &(gb_dinode->id2.i_chain); | ||
606 | |||
607 | /* | ||
608 | * Chunksize(in) clusters from userspace should be | ||
609 | * less than clusters in a group. | ||
610 | */ | ||
611 | if (ffg->iff_chunksize > le16_to_cpu(cl->cl_cpg)) { | ||
612 | status = -EINVAL; | ||
613 | goto bail; | ||
614 | } | ||
615 | |||
616 | memset(&ffg->iff_ffs, 0, sizeof(struct ocfs2_info_freefrag_stats)); | ||
617 | |||
618 | ffg->iff_ffs.ffs_min = ~0U; | ||
619 | ffg->iff_ffs.ffs_clusters = | ||
620 | le32_to_cpu(gb_dinode->id1.bitmap1.i_total); | ||
621 | ffg->iff_ffs.ffs_free_clusters = ffg->iff_ffs.ffs_clusters - | ||
622 | le32_to_cpu(gb_dinode->id1.bitmap1.i_used); | ||
623 | |||
624 | chunks_in_group = le16_to_cpu(cl->cl_cpg) / ffg->iff_chunksize + 1; | ||
625 | |||
626 | for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) { | ||
627 | rec = &(cl->cl_recs[i]); | ||
628 | status = ocfs2_info_freefrag_scan_chain(osb, gb_inode, | ||
629 | gb_dinode, | ||
630 | rec, ffg, | ||
631 | chunks_in_group); | ||
632 | if (status) | ||
633 | goto bail; | ||
634 | } | ||
635 | |||
636 | if (ffg->iff_ffs.ffs_free_chunks_real) | ||
637 | ffg->iff_ffs.ffs_avg = (ffg->iff_ffs.ffs_avg / | ||
638 | ffg->iff_ffs.ffs_free_chunks_real); | ||
639 | bail: | ||
640 | if (unlock) | ||
641 | ocfs2_inode_unlock(gb_inode, 0); | ||
642 | |||
643 | if (gb_inode) | ||
644 | mutex_unlock(&gb_inode->i_mutex); | ||
645 | |||
646 | if (gb_inode) | ||
647 | iput(gb_inode); | ||
648 | |||
649 | brelse(bh); | ||
650 | |||
651 | return status; | ||
652 | } | ||
653 | |||
654 | int ocfs2_info_handle_freefrag(struct inode *inode, | ||
655 | struct ocfs2_info_request __user *req) | ||
656 | { | ||
657 | u64 blkno = -1; | ||
658 | char namebuf[40]; | ||
659 | int status = -EFAULT, type = GLOBAL_BITMAP_SYSTEM_INODE; | ||
660 | |||
661 | struct ocfs2_info_freefrag *oiff; | ||
662 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
663 | struct inode *gb_inode = NULL; | ||
664 | |||
665 | oiff = kzalloc(sizeof(struct ocfs2_info_freefrag), GFP_KERNEL); | ||
666 | if (!oiff) { | ||
667 | status = -ENOMEM; | ||
668 | mlog_errno(status); | ||
669 | goto bail; | ||
670 | } | ||
671 | |||
672 | if (o2info_from_user(*oiff, req)) | ||
673 | goto bail; | ||
674 | /* | ||
675 | * chunksize from userspace should be power of 2. | ||
676 | */ | ||
677 | if ((oiff->iff_chunksize & (oiff->iff_chunksize - 1)) || | ||
678 | (!oiff->iff_chunksize)) { | ||
679 | status = -EINVAL; | ||
680 | goto bail; | ||
681 | } | ||
682 | |||
683 | if (o2info_coherent(&oiff->iff_req)) { | ||
684 | gb_inode = ocfs2_get_system_file_inode(osb, type, | ||
685 | OCFS2_INVALID_SLOT); | ||
686 | if (!gb_inode) { | ||
687 | mlog(ML_ERROR, "unable to get global_bitmap inode\n"); | ||
688 | status = -EIO; | ||
689 | goto bail; | ||
690 | } | ||
691 | } else { | ||
692 | ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, | ||
693 | OCFS2_INVALID_SLOT); | ||
694 | status = ocfs2_lookup_ino_from_name(osb->sys_root_inode, | ||
695 | namebuf, | ||
696 | strlen(namebuf), | ||
697 | &blkno); | ||
698 | if (status < 0) { | ||
699 | status = -ENOENT; | ||
700 | goto bail; | ||
701 | } | ||
702 | } | ||
703 | |||
704 | status = ocfs2_info_freefrag_scan_bitmap(osb, gb_inode, blkno, oiff); | ||
705 | if (status < 0) | ||
706 | goto bail; | ||
707 | |||
708 | o2info_set_request_filled(&oiff->iff_req); | ||
709 | |||
710 | if (o2info_to_user(*oiff, req)) | ||
711 | goto bail; | ||
712 | |||
713 | status = 0; | ||
714 | bail: | ||
715 | if (status) | ||
716 | o2info_set_request_error(&oiff->iff_req, req); | ||
717 | |||
718 | kfree(oiff); | ||
317 | 719 | ||
318 | return status; | 720 | return status; |
319 | } | 721 | } |
@@ -327,7 +729,7 @@ int ocfs2_info_handle_unknown(struct inode *inode, | |||
327 | if (o2info_from_user(oir, req)) | 729 | if (o2info_from_user(oir, req)) |
328 | goto bail; | 730 | goto bail; |
329 | 731 | ||
330 | o2info_clear_request_filled(oir); | 732 | o2info_clear_request_filled(&oir); |
331 | 733 | ||
332 | if (o2info_to_user(oir, req)) | 734 | if (o2info_to_user(oir, req)) |
333 | goto bail; | 735 | goto bail; |
@@ -335,7 +737,7 @@ int ocfs2_info_handle_unknown(struct inode *inode, | |||
335 | status = 0; | 737 | status = 0; |
336 | bail: | 738 | bail: |
337 | if (status) | 739 | if (status) |
338 | o2info_set_request_error(oir, req); | 740 | o2info_set_request_error(&oir, req); |
339 | 741 | ||
340 | return status; | 742 | return status; |
341 | } | 743 | } |
@@ -389,6 +791,14 @@ int ocfs2_info_handle_request(struct inode *inode, | |||
389 | if (oir.ir_size == sizeof(struct ocfs2_info_journal_size)) | 791 | if (oir.ir_size == sizeof(struct ocfs2_info_journal_size)) |
390 | status = ocfs2_info_handle_journal_size(inode, req); | 792 | status = ocfs2_info_handle_journal_size(inode, req); |
391 | break; | 793 | break; |
794 | case OCFS2_INFO_FREEINODE: | ||
795 | if (oir.ir_size == sizeof(struct ocfs2_info_freeinode)) | ||
796 | status = ocfs2_info_handle_freeinode(inode, req); | ||
797 | break; | ||
798 | case OCFS2_INFO_FREEFRAG: | ||
799 | if (oir.ir_size == sizeof(struct ocfs2_info_freefrag)) | ||
800 | status = ocfs2_info_handle_freefrag(inode, req); | ||
801 | break; | ||
392 | default: | 802 | default: |
393 | status = ocfs2_info_handle_unknown(inode, req); | 803 | status = ocfs2_info_handle_unknown(inode, req); |
394 | break; | 804 | break; |
@@ -542,6 +952,31 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) | |||
542 | return -EFAULT; | 952 | return -EFAULT; |
543 | 953 | ||
544 | return ocfs2_info_handle(inode, &info, 0); | 954 | return ocfs2_info_handle(inode, &info, 0); |
955 | case FITRIM: | ||
956 | { | ||
957 | struct super_block *sb = inode->i_sb; | ||
958 | struct fstrim_range range; | ||
959 | int ret = 0; | ||
960 | |||
961 | if (!capable(CAP_SYS_ADMIN)) | ||
962 | return -EPERM; | ||
963 | |||
964 | if (copy_from_user(&range, (struct fstrim_range *)arg, | ||
965 | sizeof(range))) | ||
966 | return -EFAULT; | ||
967 | |||
968 | ret = ocfs2_trim_fs(sb, &range); | ||
969 | if (ret < 0) | ||
970 | return ret; | ||
971 | |||
972 | if (copy_to_user((struct fstrim_range *)arg, &range, | ||
973 | sizeof(range))) | ||
974 | return -EFAULT; | ||
975 | |||
976 | return 0; | ||
977 | } | ||
978 | case OCFS2_IOC_MOVE_EXT: | ||
979 | return ocfs2_ioctl_move_extents(filp, (void __user *)arg); | ||
545 | default: | 980 | default: |
546 | return -ENOTTY; | 981 | return -ENOTTY; |
547 | } | 982 | } |
@@ -569,6 +1004,7 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
569 | case OCFS2_IOC_GROUP_EXTEND: | 1004 | case OCFS2_IOC_GROUP_EXTEND: |
570 | case OCFS2_IOC_GROUP_ADD: | 1005 | case OCFS2_IOC_GROUP_ADD: |
571 | case OCFS2_IOC_GROUP_ADD64: | 1006 | case OCFS2_IOC_GROUP_ADD64: |
1007 | case FITRIM: | ||
572 | break; | 1008 | break; |
573 | case OCFS2_IOC_REFLINK: | 1009 | case OCFS2_IOC_REFLINK: |
574 | if (copy_from_user(&args, (struct reflink_arguments *)arg, | 1010 | if (copy_from_user(&args, (struct reflink_arguments *)arg, |
@@ -584,6 +1020,8 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg) | |||
584 | return -EFAULT; | 1020 | return -EFAULT; |
585 | 1021 | ||
586 | return ocfs2_info_handle(inode, &info, 1); | 1022 | return ocfs2_info_handle(inode, &info, 1); |
1023 | case OCFS2_IOC_MOVE_EXT: | ||
1024 | break; | ||
587 | default: | 1025 | default: |
588 | return -ENOIOCTLCMD; | 1026 | return -ENOIOCTLCMD; |
589 | } | 1027 | } |
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c new file mode 100644 index 000000000000..cd9427023d2e --- /dev/null +++ b/fs/ocfs2/move_extents.c | |||
@@ -0,0 +1,1152 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * move_extents.c | ||
5 | * | ||
6 | * Copyright (C) 2011 Oracle. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public | ||
10 | * License version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | */ | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/mount.h> | ||
20 | #include <linux/swap.h> | ||
21 | |||
22 | #include <cluster/masklog.h> | ||
23 | |||
24 | #include "ocfs2.h" | ||
25 | #include "ocfs2_ioctl.h" | ||
26 | |||
27 | #include "alloc.h" | ||
28 | #include "aops.h" | ||
29 | #include "dlmglue.h" | ||
30 | #include "extent_map.h" | ||
31 | #include "inode.h" | ||
32 | #include "journal.h" | ||
33 | #include "suballoc.h" | ||
34 | #include "uptodate.h" | ||
35 | #include "super.h" | ||
36 | #include "dir.h" | ||
37 | #include "buffer_head_io.h" | ||
38 | #include "sysfile.h" | ||
39 | #include "suballoc.h" | ||
40 | #include "refcounttree.h" | ||
41 | #include "move_extents.h" | ||
42 | |||
43 | struct ocfs2_move_extents_context { | ||
44 | struct inode *inode; | ||
45 | struct file *file; | ||
46 | int auto_defrag; | ||
47 | int partial; | ||
48 | int credits; | ||
49 | u32 new_phys_cpos; | ||
50 | u32 clusters_moved; | ||
51 | u64 refcount_loc; | ||
52 | struct ocfs2_move_extents *range; | ||
53 | struct ocfs2_extent_tree et; | ||
54 | struct ocfs2_alloc_context *meta_ac; | ||
55 | struct ocfs2_alloc_context *data_ac; | ||
56 | struct ocfs2_cached_dealloc_ctxt dealloc; | ||
57 | }; | ||
58 | |||
59 | static int __ocfs2_move_extent(handle_t *handle, | ||
60 | struct ocfs2_move_extents_context *context, | ||
61 | u32 cpos, u32 len, u32 p_cpos, u32 new_p_cpos, | ||
62 | int ext_flags) | ||
63 | { | ||
64 | int ret = 0, index; | ||
65 | struct inode *inode = context->inode; | ||
66 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
67 | struct ocfs2_extent_rec *rec, replace_rec; | ||
68 | struct ocfs2_path *path = NULL; | ||
69 | struct ocfs2_extent_list *el; | ||
70 | u64 ino = ocfs2_metadata_cache_owner(context->et.et_ci); | ||
71 | u64 old_blkno = ocfs2_clusters_to_blocks(inode->i_sb, p_cpos); | ||
72 | |||
73 | ret = ocfs2_duplicate_clusters_by_page(handle, context->file, cpos, | ||
74 | p_cpos, new_p_cpos, len); | ||
75 | if (ret) { | ||
76 | mlog_errno(ret); | ||
77 | goto out; | ||
78 | } | ||
79 | |||
80 | memset(&replace_rec, 0, sizeof(replace_rec)); | ||
81 | replace_rec.e_cpos = cpu_to_le32(cpos); | ||
82 | replace_rec.e_leaf_clusters = cpu_to_le16(len); | ||
83 | replace_rec.e_blkno = cpu_to_le64(ocfs2_clusters_to_blocks(inode->i_sb, | ||
84 | new_p_cpos)); | ||
85 | |||
86 | path = ocfs2_new_path_from_et(&context->et); | ||
87 | if (!path) { | ||
88 | ret = -ENOMEM; | ||
89 | mlog_errno(ret); | ||
90 | goto out; | ||
91 | } | ||
92 | |||
93 | ret = ocfs2_find_path(INODE_CACHE(inode), path, cpos); | ||
94 | if (ret) { | ||
95 | mlog_errno(ret); | ||
96 | goto out; | ||
97 | } | ||
98 | |||
99 | el = path_leaf_el(path); | ||
100 | |||
101 | index = ocfs2_search_extent_list(el, cpos); | ||
102 | if (index == -1 || index >= le16_to_cpu(el->l_next_free_rec)) { | ||
103 | ocfs2_error(inode->i_sb, | ||
104 | "Inode %llu has an extent at cpos %u which can no " | ||
105 | "longer be found.\n", | ||
106 | (unsigned long long)ino, cpos); | ||
107 | ret = -EROFS; | ||
108 | goto out; | ||
109 | } | ||
110 | |||
111 | rec = &el->l_recs[index]; | ||
112 | |||
113 | BUG_ON(ext_flags != rec->e_flags); | ||
114 | /* | ||
115 | * after moving/defraging to new location, the extent is not going | ||
116 | * to be refcounted anymore. | ||
117 | */ | ||
118 | replace_rec.e_flags = ext_flags & ~OCFS2_EXT_REFCOUNTED; | ||
119 | |||
120 | ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), | ||
121 | context->et.et_root_bh, | ||
122 | OCFS2_JOURNAL_ACCESS_WRITE); | ||
123 | if (ret) { | ||
124 | mlog_errno(ret); | ||
125 | goto out; | ||
126 | } | ||
127 | |||
128 | ret = ocfs2_split_extent(handle, &context->et, path, index, | ||
129 | &replace_rec, context->meta_ac, | ||
130 | &context->dealloc); | ||
131 | if (ret) { | ||
132 | mlog_errno(ret); | ||
133 | goto out; | ||
134 | } | ||
135 | |||
136 | ocfs2_journal_dirty(handle, context->et.et_root_bh); | ||
137 | |||
138 | context->new_phys_cpos = new_p_cpos; | ||
139 | |||
140 | /* | ||
141 | * need I to append truncate log for old clusters? | ||
142 | */ | ||
143 | if (old_blkno) { | ||
144 | if (ext_flags & OCFS2_EXT_REFCOUNTED) | ||
145 | ret = ocfs2_decrease_refcount(inode, handle, | ||
146 | ocfs2_blocks_to_clusters(osb->sb, | ||
147 | old_blkno), | ||
148 | len, context->meta_ac, | ||
149 | &context->dealloc, 1); | ||
150 | else | ||
151 | ret = ocfs2_truncate_log_append(osb, handle, | ||
152 | old_blkno, len); | ||
153 | } | ||
154 | |||
155 | out: | ||
156 | return ret; | ||
157 | } | ||
158 | |||
159 | /* | ||
160 | * lock allocators, and reserving appropriate number of bits for | ||
161 | * meta blocks and data clusters. | ||
162 | * | ||
163 | * in some cases, we don't need to reserve clusters, just let data_ac | ||
164 | * be NULL. | ||
165 | */ | ||
166 | static int ocfs2_lock_allocators_move_extents(struct inode *inode, | ||
167 | struct ocfs2_extent_tree *et, | ||
168 | u32 clusters_to_move, | ||
169 | u32 extents_to_split, | ||
170 | struct ocfs2_alloc_context **meta_ac, | ||
171 | struct ocfs2_alloc_context **data_ac, | ||
172 | int extra_blocks, | ||
173 | int *credits) | ||
174 | { | ||
175 | int ret, num_free_extents; | ||
176 | unsigned int max_recs_needed = 2 * extents_to_split + clusters_to_move; | ||
177 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
178 | |||
179 | num_free_extents = ocfs2_num_free_extents(osb, et); | ||
180 | if (num_free_extents < 0) { | ||
181 | ret = num_free_extents; | ||
182 | mlog_errno(ret); | ||
183 | goto out; | ||
184 | } | ||
185 | |||
186 | if (!num_free_extents || | ||
187 | (ocfs2_sparse_alloc(osb) && num_free_extents < max_recs_needed)) | ||
188 | extra_blocks += ocfs2_extend_meta_needed(et->et_root_el); | ||
189 | |||
190 | ret = ocfs2_reserve_new_metadata_blocks(osb, extra_blocks, meta_ac); | ||
191 | if (ret) { | ||
192 | mlog_errno(ret); | ||
193 | goto out; | ||
194 | } | ||
195 | |||
196 | if (data_ac) { | ||
197 | ret = ocfs2_reserve_clusters(osb, clusters_to_move, data_ac); | ||
198 | if (ret) { | ||
199 | mlog_errno(ret); | ||
200 | goto out; | ||
201 | } | ||
202 | } | ||
203 | |||
204 | *credits += ocfs2_calc_extend_credits(osb->sb, et->et_root_el, | ||
205 | clusters_to_move + 2); | ||
206 | |||
207 | mlog(0, "reserve metadata_blocks: %d, data_clusters: %u, credits: %d\n", | ||
208 | extra_blocks, clusters_to_move, *credits); | ||
209 | out: | ||
210 | if (ret) { | ||
211 | if (*meta_ac) { | ||
212 | ocfs2_free_alloc_context(*meta_ac); | ||
213 | *meta_ac = NULL; | ||
214 | } | ||
215 | } | ||
216 | |||
217 | return ret; | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * Using one journal handle to guarantee the data consistency in case | ||
222 | * crash happens anywhere. | ||
223 | * | ||
224 | * XXX: defrag can end up with finishing partial extent as requested, | ||
225 | * due to not enough contiguous clusters can be found in allocator. | ||
226 | */ | ||
227 | static int ocfs2_defrag_extent(struct ocfs2_move_extents_context *context, | ||
228 | u32 cpos, u32 phys_cpos, u32 *len, int ext_flags) | ||
229 | { | ||
230 | int ret, credits = 0, extra_blocks = 0, partial = context->partial; | ||
231 | handle_t *handle; | ||
232 | struct inode *inode = context->inode; | ||
233 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
234 | struct inode *tl_inode = osb->osb_tl_inode; | ||
235 | struct ocfs2_refcount_tree *ref_tree = NULL; | ||
236 | u32 new_phys_cpos, new_len; | ||
237 | u64 phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); | ||
238 | |||
239 | if ((ext_flags & OCFS2_EXT_REFCOUNTED) && *len) { | ||
240 | |||
241 | BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & | ||
242 | OCFS2_HAS_REFCOUNT_FL)); | ||
243 | |||
244 | BUG_ON(!context->refcount_loc); | ||
245 | |||
246 | ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1, | ||
247 | &ref_tree, NULL); | ||
248 | if (ret) { | ||
249 | mlog_errno(ret); | ||
250 | return ret; | ||
251 | } | ||
252 | |||
253 | ret = ocfs2_prepare_refcount_change_for_del(inode, | ||
254 | context->refcount_loc, | ||
255 | phys_blkno, | ||
256 | *len, | ||
257 | &credits, | ||
258 | &extra_blocks); | ||
259 | if (ret) { | ||
260 | mlog_errno(ret); | ||
261 | goto out; | ||
262 | } | ||
263 | } | ||
264 | |||
265 | ret = ocfs2_lock_allocators_move_extents(inode, &context->et, *len, 1, | ||
266 | &context->meta_ac, | ||
267 | &context->data_ac, | ||
268 | extra_blocks, &credits); | ||
269 | if (ret) { | ||
270 | mlog_errno(ret); | ||
271 | goto out; | ||
272 | } | ||
273 | |||
274 | /* | ||
275 | * should be using allocation reservation strategy there? | ||
276 | * | ||
277 | * if (context->data_ac) | ||
278 | * context->data_ac->ac_resv = &OCFS2_I(inode)->ip_la_data_resv; | ||
279 | */ | ||
280 | |||
281 | mutex_lock(&tl_inode->i_mutex); | ||
282 | |||
283 | if (ocfs2_truncate_log_needs_flush(osb)) { | ||
284 | ret = __ocfs2_flush_truncate_log(osb); | ||
285 | if (ret < 0) { | ||
286 | mlog_errno(ret); | ||
287 | goto out_unlock_mutex; | ||
288 | } | ||
289 | } | ||
290 | |||
291 | handle = ocfs2_start_trans(osb, credits); | ||
292 | if (IS_ERR(handle)) { | ||
293 | ret = PTR_ERR(handle); | ||
294 | mlog_errno(ret); | ||
295 | goto out_unlock_mutex; | ||
296 | } | ||
297 | |||
298 | ret = __ocfs2_claim_clusters(handle, context->data_ac, 1, *len, | ||
299 | &new_phys_cpos, &new_len); | ||
300 | if (ret) { | ||
301 | mlog_errno(ret); | ||
302 | goto out_commit; | ||
303 | } | ||
304 | |||
305 | /* | ||
306 | * allowing partial extent moving is kind of 'pros and cons', it makes | ||
307 | * whole defragmentation less likely to fail, on the contrary, the bad | ||
308 | * thing is it may make the fs even more fragmented after moving, let | ||
309 | * userspace make a good decision here. | ||
310 | */ | ||
311 | if (new_len != *len) { | ||
312 | mlog(0, "len_claimed: %u, len: %u\n", new_len, *len); | ||
313 | if (!partial) { | ||
314 | context->range->me_flags &= ~OCFS2_MOVE_EXT_FL_COMPLETE; | ||
315 | ret = -ENOSPC; | ||
316 | goto out_commit; | ||
317 | } | ||
318 | } | ||
319 | |||
320 | mlog(0, "cpos: %u, phys_cpos: %u, new_phys_cpos: %u\n", cpos, | ||
321 | phys_cpos, new_phys_cpos); | ||
322 | |||
323 | ret = __ocfs2_move_extent(handle, context, cpos, new_len, phys_cpos, | ||
324 | new_phys_cpos, ext_flags); | ||
325 | if (ret) | ||
326 | mlog_errno(ret); | ||
327 | |||
328 | if (partial && (new_len != *len)) | ||
329 | *len = new_len; | ||
330 | |||
331 | /* | ||
332 | * Here we should write the new page out first if we are | ||
333 | * in write-back mode. | ||
334 | */ | ||
335 | ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, *len); | ||
336 | if (ret) | ||
337 | mlog_errno(ret); | ||
338 | |||
339 | out_commit: | ||
340 | ocfs2_commit_trans(osb, handle); | ||
341 | |||
342 | out_unlock_mutex: | ||
343 | mutex_unlock(&tl_inode->i_mutex); | ||
344 | |||
345 | if (context->data_ac) { | ||
346 | ocfs2_free_alloc_context(context->data_ac); | ||
347 | context->data_ac = NULL; | ||
348 | } | ||
349 | |||
350 | if (context->meta_ac) { | ||
351 | ocfs2_free_alloc_context(context->meta_ac); | ||
352 | context->meta_ac = NULL; | ||
353 | } | ||
354 | |||
355 | out: | ||
356 | if (ref_tree) | ||
357 | ocfs2_unlock_refcount_tree(osb, ref_tree, 1); | ||
358 | |||
359 | return ret; | ||
360 | } | ||
361 | |||
362 | /* | ||
363 | * find the victim alloc group, where #blkno fits. | ||
364 | */ | ||
365 | static int ocfs2_find_victim_alloc_group(struct inode *inode, | ||
366 | u64 vict_blkno, | ||
367 | int type, int slot, | ||
368 | int *vict_bit, | ||
369 | struct buffer_head **ret_bh) | ||
370 | { | ||
371 | int ret, i, bits_per_unit = 0; | ||
372 | u64 blkno; | ||
373 | char namebuf[40]; | ||
374 | |||
375 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
376 | struct buffer_head *ac_bh = NULL, *gd_bh = NULL; | ||
377 | struct ocfs2_chain_list *cl; | ||
378 | struct ocfs2_chain_rec *rec; | ||
379 | struct ocfs2_dinode *ac_dinode; | ||
380 | struct ocfs2_group_desc *bg; | ||
381 | |||
382 | ocfs2_sprintf_system_inode_name(namebuf, sizeof(namebuf), type, slot); | ||
383 | ret = ocfs2_lookup_ino_from_name(osb->sys_root_inode, namebuf, | ||
384 | strlen(namebuf), &blkno); | ||
385 | if (ret) { | ||
386 | ret = -ENOENT; | ||
387 | goto out; | ||
388 | } | ||
389 | |||
390 | ret = ocfs2_read_blocks_sync(osb, blkno, 1, &ac_bh); | ||
391 | if (ret) { | ||
392 | mlog_errno(ret); | ||
393 | goto out; | ||
394 | } | ||
395 | |||
396 | ac_dinode = (struct ocfs2_dinode *)ac_bh->b_data; | ||
397 | cl = &(ac_dinode->id2.i_chain); | ||
398 | rec = &(cl->cl_recs[0]); | ||
399 | |||
400 | if (type == GLOBAL_BITMAP_SYSTEM_INODE) | ||
401 | bits_per_unit = osb->s_clustersize_bits - | ||
402 | inode->i_sb->s_blocksize_bits; | ||
403 | /* | ||
404 | * 'vict_blkno' was out of the valid range. | ||
405 | */ | ||
406 | if ((vict_blkno < le64_to_cpu(rec->c_blkno)) || | ||
407 | (vict_blkno >= (le32_to_cpu(ac_dinode->id1.bitmap1.i_total) << | ||
408 | bits_per_unit))) { | ||
409 | ret = -EINVAL; | ||
410 | goto out; | ||
411 | } | ||
412 | |||
413 | for (i = 0; i < le16_to_cpu(cl->cl_next_free_rec); i++) { | ||
414 | |||
415 | rec = &(cl->cl_recs[i]); | ||
416 | if (!rec) | ||
417 | continue; | ||
418 | |||
419 | bg = NULL; | ||
420 | |||
421 | do { | ||
422 | if (!bg) | ||
423 | blkno = le64_to_cpu(rec->c_blkno); | ||
424 | else | ||
425 | blkno = le64_to_cpu(bg->bg_next_group); | ||
426 | |||
427 | if (gd_bh) { | ||
428 | brelse(gd_bh); | ||
429 | gd_bh = NULL; | ||
430 | } | ||
431 | |||
432 | ret = ocfs2_read_blocks_sync(osb, blkno, 1, &gd_bh); | ||
433 | if (ret) { | ||
434 | mlog_errno(ret); | ||
435 | goto out; | ||
436 | } | ||
437 | |||
438 | bg = (struct ocfs2_group_desc *)gd_bh->b_data; | ||
439 | |||
440 | if (vict_blkno < (le64_to_cpu(bg->bg_blkno) + | ||
441 | le16_to_cpu(bg->bg_bits))) { | ||
442 | |||
443 | *ret_bh = gd_bh; | ||
444 | *vict_bit = (vict_blkno - blkno) >> | ||
445 | bits_per_unit; | ||
446 | mlog(0, "find the victim group: #%llu, " | ||
447 | "total_bits: %u, vict_bit: %u\n", | ||
448 | blkno, le16_to_cpu(bg->bg_bits), | ||
449 | *vict_bit); | ||
450 | goto out; | ||
451 | } | ||
452 | |||
453 | } while (le64_to_cpu(bg->bg_next_group)); | ||
454 | } | ||
455 | |||
456 | ret = -EINVAL; | ||
457 | out: | ||
458 | brelse(ac_bh); | ||
459 | |||
460 | /* | ||
461 | * caller has to release the gd_bh properly. | ||
462 | */ | ||
463 | return ret; | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * XXX: helper to validate and adjust moving goal. | ||
468 | */ | ||
469 | static int ocfs2_validate_and_adjust_move_goal(struct inode *inode, | ||
470 | struct ocfs2_move_extents *range) | ||
471 | { | ||
472 | int ret, goal_bit = 0; | ||
473 | |||
474 | struct buffer_head *gd_bh = NULL; | ||
475 | struct ocfs2_group_desc *bg = NULL; | ||
476 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
477 | int c_to_b = 1 << (osb->s_clustersize_bits - | ||
478 | inode->i_sb->s_blocksize_bits); | ||
479 | |||
480 | /* | ||
481 | * make goal become cluster aligned. | ||
482 | */ | ||
483 | range->me_goal = ocfs2_block_to_cluster_start(inode->i_sb, | ||
484 | range->me_goal); | ||
485 | /* | ||
486 | * moving goal is not allowd to start with a group desc blok(#0 blk) | ||
487 | * let's compromise to the latter cluster. | ||
488 | */ | ||
489 | if (range->me_goal == le64_to_cpu(bg->bg_blkno)) | ||
490 | range->me_goal += c_to_b; | ||
491 | |||
492 | /* | ||
493 | * validate goal sits within global_bitmap, and return the victim | ||
494 | * group desc | ||
495 | */ | ||
496 | ret = ocfs2_find_victim_alloc_group(inode, range->me_goal, | ||
497 | GLOBAL_BITMAP_SYSTEM_INODE, | ||
498 | OCFS2_INVALID_SLOT, | ||
499 | &goal_bit, &gd_bh); | ||
500 | if (ret) | ||
501 | goto out; | ||
502 | |||
503 | bg = (struct ocfs2_group_desc *)gd_bh->b_data; | ||
504 | |||
505 | /* | ||
506 | * movement is not gonna cross two groups. | ||
507 | */ | ||
508 | if ((le16_to_cpu(bg->bg_bits) - goal_bit) * osb->s_clustersize < | ||
509 | range->me_len) { | ||
510 | ret = -EINVAL; | ||
511 | goto out; | ||
512 | } | ||
513 | /* | ||
514 | * more exact validations/adjustments will be performed later during | ||
515 | * moving operation for each extent range. | ||
516 | */ | ||
517 | mlog(0, "extents get ready to be moved to #%llu block\n", | ||
518 | range->me_goal); | ||
519 | |||
520 | out: | ||
521 | brelse(gd_bh); | ||
522 | |||
523 | return ret; | ||
524 | } | ||
525 | |||
526 | static void ocfs2_probe_alloc_group(struct inode *inode, struct buffer_head *bh, | ||
527 | int *goal_bit, u32 move_len, u32 max_hop, | ||
528 | u32 *phys_cpos) | ||
529 | { | ||
530 | int i, used, last_free_bits = 0, base_bit = *goal_bit; | ||
531 | struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data; | ||
532 | u32 base_cpos = ocfs2_blocks_to_clusters(inode->i_sb, | ||
533 | le64_to_cpu(gd->bg_blkno)); | ||
534 | |||
535 | for (i = base_bit; i < le16_to_cpu(gd->bg_bits); i++) { | ||
536 | |||
537 | used = ocfs2_test_bit(i, (unsigned long *)gd->bg_bitmap); | ||
538 | if (used) { | ||
539 | /* | ||
540 | * we even tried searching the free chunk by jumping | ||
541 | * a 'max_hop' distance, but still failed. | ||
542 | */ | ||
543 | if ((i - base_bit) > max_hop) { | ||
544 | *phys_cpos = 0; | ||
545 | break; | ||
546 | } | ||
547 | |||
548 | if (last_free_bits) | ||
549 | last_free_bits = 0; | ||
550 | |||
551 | continue; | ||
552 | } else | ||
553 | last_free_bits++; | ||
554 | |||
555 | if (last_free_bits == move_len) { | ||
556 | *goal_bit = i; | ||
557 | *phys_cpos = base_cpos + i; | ||
558 | break; | ||
559 | } | ||
560 | } | ||
561 | |||
562 | mlog(0, "found phys_cpos: %u to fit the wanted moving.\n", *phys_cpos); | ||
563 | } | ||
564 | |||
565 | static int ocfs2_alloc_dinode_update_counts(struct inode *inode, | ||
566 | handle_t *handle, | ||
567 | struct buffer_head *di_bh, | ||
568 | u32 num_bits, | ||
569 | u16 chain) | ||
570 | { | ||
571 | int ret; | ||
572 | u32 tmp_used; | ||
573 | struct ocfs2_dinode *di = (struct ocfs2_dinode *) di_bh->b_data; | ||
574 | struct ocfs2_chain_list *cl = | ||
575 | (struct ocfs2_chain_list *) &di->id2.i_chain; | ||
576 | |||
577 | ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, | ||
578 | OCFS2_JOURNAL_ACCESS_WRITE); | ||
579 | if (ret < 0) { | ||
580 | mlog_errno(ret); | ||
581 | goto out; | ||
582 | } | ||
583 | |||
584 | tmp_used = le32_to_cpu(di->id1.bitmap1.i_used); | ||
585 | di->id1.bitmap1.i_used = cpu_to_le32(num_bits + tmp_used); | ||
586 | le32_add_cpu(&cl->cl_recs[chain].c_free, -num_bits); | ||
587 | ocfs2_journal_dirty(handle, di_bh); | ||
588 | |||
589 | out: | ||
590 | return ret; | ||
591 | } | ||
592 | |||
593 | static inline int ocfs2_block_group_set_bits(handle_t *handle, | ||
594 | struct inode *alloc_inode, | ||
595 | struct ocfs2_group_desc *bg, | ||
596 | struct buffer_head *group_bh, | ||
597 | unsigned int bit_off, | ||
598 | unsigned int num_bits) | ||
599 | { | ||
600 | int status; | ||
601 | void *bitmap = bg->bg_bitmap; | ||
602 | int journal_type = OCFS2_JOURNAL_ACCESS_WRITE; | ||
603 | |||
604 | /* All callers get the descriptor via | ||
605 | * ocfs2_read_group_descriptor(). Any corruption is a code bug. */ | ||
606 | BUG_ON(!OCFS2_IS_VALID_GROUP_DESC(bg)); | ||
607 | BUG_ON(le16_to_cpu(bg->bg_free_bits_count) < num_bits); | ||
608 | |||
609 | mlog(0, "block_group_set_bits: off = %u, num = %u\n", bit_off, | ||
610 | num_bits); | ||
611 | |||
612 | if (ocfs2_is_cluster_bitmap(alloc_inode)) | ||
613 | journal_type = OCFS2_JOURNAL_ACCESS_UNDO; | ||
614 | |||
615 | status = ocfs2_journal_access_gd(handle, | ||
616 | INODE_CACHE(alloc_inode), | ||
617 | group_bh, | ||
618 | journal_type); | ||
619 | if (status < 0) { | ||
620 | mlog_errno(status); | ||
621 | goto bail; | ||
622 | } | ||
623 | |||
624 | le16_add_cpu(&bg->bg_free_bits_count, -num_bits); | ||
625 | if (le16_to_cpu(bg->bg_free_bits_count) > le16_to_cpu(bg->bg_bits)) { | ||
626 | ocfs2_error(alloc_inode->i_sb, "Group descriptor # %llu has bit" | ||
627 | " count %u but claims %u are freed. num_bits %d", | ||
628 | (unsigned long long)le64_to_cpu(bg->bg_blkno), | ||
629 | le16_to_cpu(bg->bg_bits), | ||
630 | le16_to_cpu(bg->bg_free_bits_count), num_bits); | ||
631 | return -EROFS; | ||
632 | } | ||
633 | while (num_bits--) | ||
634 | ocfs2_set_bit(bit_off++, bitmap); | ||
635 | |||
636 | ocfs2_journal_dirty(handle, group_bh); | ||
637 | |||
638 | bail: | ||
639 | return status; | ||
640 | } | ||
641 | |||
642 | static int ocfs2_move_extent(struct ocfs2_move_extents_context *context, | ||
643 | u32 cpos, u32 phys_cpos, u32 *new_phys_cpos, | ||
644 | u32 len, int ext_flags) | ||
645 | { | ||
646 | int ret, credits = 0, extra_blocks = 0, goal_bit = 0; | ||
647 | handle_t *handle; | ||
648 | struct inode *inode = context->inode; | ||
649 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
650 | struct inode *tl_inode = osb->osb_tl_inode; | ||
651 | struct inode *gb_inode = NULL; | ||
652 | struct buffer_head *gb_bh = NULL; | ||
653 | struct buffer_head *gd_bh = NULL; | ||
654 | struct ocfs2_group_desc *gd; | ||
655 | struct ocfs2_refcount_tree *ref_tree = NULL; | ||
656 | u32 move_max_hop = ocfs2_blocks_to_clusters(inode->i_sb, | ||
657 | context->range->me_threshold); | ||
658 | u64 phys_blkno, new_phys_blkno; | ||
659 | |||
660 | phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, phys_cpos); | ||
661 | |||
662 | if ((ext_flags & OCFS2_EXT_REFCOUNTED) && len) { | ||
663 | |||
664 | BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & | ||
665 | OCFS2_HAS_REFCOUNT_FL)); | ||
666 | |||
667 | BUG_ON(!context->refcount_loc); | ||
668 | |||
669 | ret = ocfs2_lock_refcount_tree(osb, context->refcount_loc, 1, | ||
670 | &ref_tree, NULL); | ||
671 | if (ret) { | ||
672 | mlog_errno(ret); | ||
673 | return ret; | ||
674 | } | ||
675 | |||
676 | ret = ocfs2_prepare_refcount_change_for_del(inode, | ||
677 | context->refcount_loc, | ||
678 | phys_blkno, | ||
679 | len, | ||
680 | &credits, | ||
681 | &extra_blocks); | ||
682 | if (ret) { | ||
683 | mlog_errno(ret); | ||
684 | goto out; | ||
685 | } | ||
686 | } | ||
687 | |||
688 | ret = ocfs2_lock_allocators_move_extents(inode, &context->et, len, 1, | ||
689 | &context->meta_ac, | ||
690 | NULL, extra_blocks, &credits); | ||
691 | if (ret) { | ||
692 | mlog_errno(ret); | ||
693 | goto out; | ||
694 | } | ||
695 | |||
696 | /* | ||
697 | * need to count 2 extra credits for global_bitmap inode and | ||
698 | * group descriptor. | ||
699 | */ | ||
700 | credits += OCFS2_INODE_UPDATE_CREDITS + 1; | ||
701 | |||
702 | /* | ||
703 | * ocfs2_move_extent() didn't reserve any clusters in lock_allocators() | ||
704 | * logic, while we still need to lock the global_bitmap. | ||
705 | */ | ||
706 | gb_inode = ocfs2_get_system_file_inode(osb, GLOBAL_BITMAP_SYSTEM_INODE, | ||
707 | OCFS2_INVALID_SLOT); | ||
708 | if (!gb_inode) { | ||
709 | mlog(ML_ERROR, "unable to get global_bitmap inode\n"); | ||
710 | ret = -EIO; | ||
711 | goto out; | ||
712 | } | ||
713 | |||
714 | mutex_lock(&gb_inode->i_mutex); | ||
715 | |||
716 | ret = ocfs2_inode_lock(gb_inode, &gb_bh, 1); | ||
717 | if (ret) { | ||
718 | mlog_errno(ret); | ||
719 | goto out_unlock_gb_mutex; | ||
720 | } | ||
721 | |||
722 | mutex_lock(&tl_inode->i_mutex); | ||
723 | |||
724 | handle = ocfs2_start_trans(osb, credits); | ||
725 | if (IS_ERR(handle)) { | ||
726 | ret = PTR_ERR(handle); | ||
727 | mlog_errno(ret); | ||
728 | goto out_unlock_tl_inode; | ||
729 | } | ||
730 | |||
731 | new_phys_blkno = ocfs2_clusters_to_blocks(inode->i_sb, *new_phys_cpos); | ||
732 | ret = ocfs2_find_victim_alloc_group(inode, new_phys_blkno, | ||
733 | GLOBAL_BITMAP_SYSTEM_INODE, | ||
734 | OCFS2_INVALID_SLOT, | ||
735 | &goal_bit, &gd_bh); | ||
736 | if (ret) { | ||
737 | mlog_errno(ret); | ||
738 | goto out_commit; | ||
739 | } | ||
740 | |||
741 | /* | ||
742 | * probe the victim cluster group to find a proper | ||
743 | * region to fit wanted movement, it even will perfrom | ||
744 | * a best-effort attempt by compromising to a threshold | ||
745 | * around the goal. | ||
746 | */ | ||
747 | ocfs2_probe_alloc_group(inode, gd_bh, &goal_bit, len, move_max_hop, | ||
748 | new_phys_cpos); | ||
749 | if (!new_phys_cpos) { | ||
750 | ret = -ENOSPC; | ||
751 | goto out_commit; | ||
752 | } | ||
753 | |||
754 | ret = __ocfs2_move_extent(handle, context, cpos, len, phys_cpos, | ||
755 | *new_phys_cpos, ext_flags); | ||
756 | if (ret) { | ||
757 | mlog_errno(ret); | ||
758 | goto out_commit; | ||
759 | } | ||
760 | |||
761 | gd = (struct ocfs2_group_desc *)gd_bh->b_data; | ||
762 | ret = ocfs2_alloc_dinode_update_counts(gb_inode, handle, gb_bh, len, | ||
763 | le16_to_cpu(gd->bg_chain)); | ||
764 | if (ret) { | ||
765 | mlog_errno(ret); | ||
766 | goto out_commit; | ||
767 | } | ||
768 | |||
769 | ret = ocfs2_block_group_set_bits(handle, gb_inode, gd, gd_bh, | ||
770 | goal_bit, len); | ||
771 | if (ret) | ||
772 | mlog_errno(ret); | ||
773 | |||
774 | /* | ||
775 | * Here we should write the new page out first if we are | ||
776 | * in write-back mode. | ||
777 | */ | ||
778 | ret = ocfs2_cow_sync_writeback(inode->i_sb, context->inode, cpos, len); | ||
779 | if (ret) | ||
780 | mlog_errno(ret); | ||
781 | |||
782 | out_commit: | ||
783 | ocfs2_commit_trans(osb, handle); | ||
784 | brelse(gd_bh); | ||
785 | |||
786 | out_unlock_tl_inode: | ||
787 | mutex_unlock(&tl_inode->i_mutex); | ||
788 | |||
789 | ocfs2_inode_unlock(gb_inode, 1); | ||
790 | out_unlock_gb_mutex: | ||
791 | mutex_unlock(&gb_inode->i_mutex); | ||
792 | brelse(gb_bh); | ||
793 | iput(gb_inode); | ||
794 | |||
795 | out: | ||
796 | if (context->meta_ac) { | ||
797 | ocfs2_free_alloc_context(context->meta_ac); | ||
798 | context->meta_ac = NULL; | ||
799 | } | ||
800 | |||
801 | if (ref_tree) | ||
802 | ocfs2_unlock_refcount_tree(osb, ref_tree, 1); | ||
803 | |||
804 | return ret; | ||
805 | } | ||
806 | |||
807 | /* | ||
808 | * Helper to calculate the defraging length in one run according to threshold. | ||
809 | */ | ||
810 | static void ocfs2_calc_extent_defrag_len(u32 *alloc_size, u32 *len_defraged, | ||
811 | u32 threshold, int *skip) | ||
812 | { | ||
813 | if ((*alloc_size + *len_defraged) < threshold) { | ||
814 | /* | ||
815 | * proceed defragmentation until we meet the thresh | ||
816 | */ | ||
817 | *len_defraged += *alloc_size; | ||
818 | } else if (*len_defraged == 0) { | ||
819 | /* | ||
820 | * XXX: skip a large extent. | ||
821 | */ | ||
822 | *skip = 1; | ||
823 | } else { | ||
824 | /* | ||
825 | * split this extent to coalesce with former pieces as | ||
826 | * to reach the threshold. | ||
827 | * | ||
828 | * we're done here with one cycle of defragmentation | ||
829 | * in a size of 'thresh', resetting 'len_defraged' | ||
830 | * forces a new defragmentation. | ||
831 | */ | ||
832 | *alloc_size = threshold - *len_defraged; | ||
833 | *len_defraged = 0; | ||
834 | } | ||
835 | } | ||
836 | |||
837 | static int __ocfs2_move_extents_range(struct buffer_head *di_bh, | ||
838 | struct ocfs2_move_extents_context *context) | ||
839 | { | ||
840 | int ret = 0, flags, do_defrag, skip = 0; | ||
841 | u32 cpos, phys_cpos, move_start, len_to_move, alloc_size; | ||
842 | u32 len_defraged = 0, defrag_thresh = 0, new_phys_cpos = 0; | ||
843 | |||
844 | struct inode *inode = context->inode; | ||
845 | struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data; | ||
846 | struct ocfs2_move_extents *range = context->range; | ||
847 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
848 | |||
849 | if ((inode->i_size == 0) || (range->me_len == 0)) | ||
850 | return 0; | ||
851 | |||
852 | if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) | ||
853 | return 0; | ||
854 | |||
855 | context->refcount_loc = le64_to_cpu(di->i_refcount_loc); | ||
856 | |||
857 | ocfs2_init_dinode_extent_tree(&context->et, INODE_CACHE(inode), di_bh); | ||
858 | ocfs2_init_dealloc_ctxt(&context->dealloc); | ||
859 | |||
860 | /* | ||
861 | * TO-DO XXX: | ||
862 | * | ||
863 | * - xattr extents. | ||
864 | */ | ||
865 | |||
866 | do_defrag = context->auto_defrag; | ||
867 | |||
868 | /* | ||
869 | * extents moving happens in unit of clusters, for the sake | ||
870 | * of simplicity, we may ignore two clusters where 'byte_start' | ||
871 | * and 'byte_start + len' were within. | ||
872 | */ | ||
873 | move_start = ocfs2_clusters_for_bytes(osb->sb, range->me_start); | ||
874 | len_to_move = (range->me_start + range->me_len) >> | ||
875 | osb->s_clustersize_bits; | ||
876 | if (len_to_move >= move_start) | ||
877 | len_to_move -= move_start; | ||
878 | else | ||
879 | len_to_move = 0; | ||
880 | |||
881 | if (do_defrag) { | ||
882 | defrag_thresh = range->me_threshold >> osb->s_clustersize_bits; | ||
883 | if (defrag_thresh <= 1) | ||
884 | goto done; | ||
885 | } else | ||
886 | new_phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, | ||
887 | range->me_goal); | ||
888 | |||
889 | mlog(0, "Inode: %llu, start: %llu, len: %llu, cstart: %u, clen: %u, " | ||
890 | "thresh: %u\n", | ||
891 | (unsigned long long)OCFS2_I(inode)->ip_blkno, | ||
892 | (unsigned long long)range->me_start, | ||
893 | (unsigned long long)range->me_len, | ||
894 | move_start, len_to_move, defrag_thresh); | ||
895 | |||
896 | cpos = move_start; | ||
897 | while (len_to_move) { | ||
898 | ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &alloc_size, | ||
899 | &flags); | ||
900 | if (ret) { | ||
901 | mlog_errno(ret); | ||
902 | goto out; | ||
903 | } | ||
904 | |||
905 | if (alloc_size > len_to_move) | ||
906 | alloc_size = len_to_move; | ||
907 | |||
908 | /* | ||
909 | * XXX: how to deal with a hole: | ||
910 | * | ||
911 | * - skip the hole of course | ||
912 | * - force a new defragmentation | ||
913 | */ | ||
914 | if (!phys_cpos) { | ||
915 | if (do_defrag) | ||
916 | len_defraged = 0; | ||
917 | |||
918 | goto next; | ||
919 | } | ||
920 | |||
921 | if (do_defrag) { | ||
922 | ocfs2_calc_extent_defrag_len(&alloc_size, &len_defraged, | ||
923 | defrag_thresh, &skip); | ||
924 | /* | ||
925 | * skip large extents | ||
926 | */ | ||
927 | if (skip) { | ||
928 | skip = 0; | ||
929 | goto next; | ||
930 | } | ||
931 | |||
932 | mlog(0, "#Defrag: cpos: %u, phys_cpos: %u, " | ||
933 | "alloc_size: %u, len_defraged: %u\n", | ||
934 | cpos, phys_cpos, alloc_size, len_defraged); | ||
935 | |||
936 | ret = ocfs2_defrag_extent(context, cpos, phys_cpos, | ||
937 | &alloc_size, flags); | ||
938 | } else { | ||
939 | ret = ocfs2_move_extent(context, cpos, phys_cpos, | ||
940 | &new_phys_cpos, alloc_size, | ||
941 | flags); | ||
942 | |||
943 | new_phys_cpos += alloc_size; | ||
944 | } | ||
945 | |||
946 | if (ret < 0) { | ||
947 | mlog_errno(ret); | ||
948 | goto out; | ||
949 | } | ||
950 | |||
951 | context->clusters_moved += alloc_size; | ||
952 | next: | ||
953 | cpos += alloc_size; | ||
954 | len_to_move -= alloc_size; | ||
955 | } | ||
956 | |||
957 | done: | ||
958 | range->me_flags |= OCFS2_MOVE_EXT_FL_COMPLETE; | ||
959 | |||
960 | out: | ||
961 | range->me_moved_len = ocfs2_clusters_to_bytes(osb->sb, | ||
962 | context->clusters_moved); | ||
963 | range->me_new_offset = ocfs2_clusters_to_bytes(osb->sb, | ||
964 | context->new_phys_cpos); | ||
965 | |||
966 | ocfs2_schedule_truncate_log_flush(osb, 1); | ||
967 | ocfs2_run_deallocs(osb, &context->dealloc); | ||
968 | |||
969 | return ret; | ||
970 | } | ||
971 | |||
972 | static int ocfs2_move_extents(struct ocfs2_move_extents_context *context) | ||
973 | { | ||
974 | int status; | ||
975 | handle_t *handle; | ||
976 | struct inode *inode = context->inode; | ||
977 | struct ocfs2_dinode *di; | ||
978 | struct buffer_head *di_bh = NULL; | ||
979 | struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); | ||
980 | |||
981 | if (!inode) | ||
982 | return -ENOENT; | ||
983 | |||
984 | if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb)) | ||
985 | return -EROFS; | ||
986 | |||
987 | mutex_lock(&inode->i_mutex); | ||
988 | |||
989 | /* | ||
990 | * This prevents concurrent writes from other nodes | ||
991 | */ | ||
992 | status = ocfs2_rw_lock(inode, 1); | ||
993 | if (status) { | ||
994 | mlog_errno(status); | ||
995 | goto out; | ||
996 | } | ||
997 | |||
998 | status = ocfs2_inode_lock(inode, &di_bh, 1); | ||
999 | if (status) { | ||
1000 | mlog_errno(status); | ||
1001 | goto out_rw_unlock; | ||
1002 | } | ||
1003 | |||
1004 | /* | ||
1005 | * rememer ip_xattr_sem also needs to be held if necessary | ||
1006 | */ | ||
1007 | down_write(&OCFS2_I(inode)->ip_alloc_sem); | ||
1008 | |||
1009 | status = __ocfs2_move_extents_range(di_bh, context); | ||
1010 | |||
1011 | up_write(&OCFS2_I(inode)->ip_alloc_sem); | ||
1012 | if (status) { | ||
1013 | mlog_errno(status); | ||
1014 | goto out_inode_unlock; | ||
1015 | } | ||
1016 | |||
1017 | /* | ||
1018 | * We update ctime for these changes | ||
1019 | */ | ||
1020 | handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS); | ||
1021 | if (IS_ERR(handle)) { | ||
1022 | status = PTR_ERR(handle); | ||
1023 | mlog_errno(status); | ||
1024 | goto out_inode_unlock; | ||
1025 | } | ||
1026 | |||
1027 | status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh, | ||
1028 | OCFS2_JOURNAL_ACCESS_WRITE); | ||
1029 | if (status) { | ||
1030 | mlog_errno(status); | ||
1031 | goto out_commit; | ||
1032 | } | ||
1033 | |||
1034 | di = (struct ocfs2_dinode *)di_bh->b_data; | ||
1035 | inode->i_ctime = CURRENT_TIME; | ||
1036 | di->i_ctime = cpu_to_le64(inode->i_ctime.tv_sec); | ||
1037 | di->i_ctime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec); | ||
1038 | |||
1039 | ocfs2_journal_dirty(handle, di_bh); | ||
1040 | |||
1041 | out_commit: | ||
1042 | ocfs2_commit_trans(osb, handle); | ||
1043 | |||
1044 | out_inode_unlock: | ||
1045 | brelse(di_bh); | ||
1046 | ocfs2_inode_unlock(inode, 1); | ||
1047 | out_rw_unlock: | ||
1048 | ocfs2_rw_unlock(inode, 1); | ||
1049 | out: | ||
1050 | mutex_unlock(&inode->i_mutex); | ||
1051 | |||
1052 | return status; | ||
1053 | } | ||
1054 | |||
1055 | int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp) | ||
1056 | { | ||
1057 | int status; | ||
1058 | |||
1059 | struct inode *inode = filp->f_path.dentry->d_inode; | ||
1060 | struct ocfs2_move_extents range; | ||
1061 | struct ocfs2_move_extents_context *context = NULL; | ||
1062 | |||
1063 | status = mnt_want_write(filp->f_path.mnt); | ||
1064 | if (status) | ||
1065 | return status; | ||
1066 | |||
1067 | if ((!S_ISREG(inode->i_mode)) || !(filp->f_mode & FMODE_WRITE)) | ||
1068 | goto out; | ||
1069 | |||
1070 | if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) { | ||
1071 | status = -EPERM; | ||
1072 | goto out; | ||
1073 | } | ||
1074 | |||
1075 | context = kzalloc(sizeof(struct ocfs2_move_extents_context), GFP_NOFS); | ||
1076 | if (!context) { | ||
1077 | status = -ENOMEM; | ||
1078 | mlog_errno(status); | ||
1079 | goto out; | ||
1080 | } | ||
1081 | |||
1082 | context->inode = inode; | ||
1083 | context->file = filp; | ||
1084 | |||
1085 | if (argp) { | ||
1086 | if (copy_from_user(&range, (struct ocfs2_move_extents *)argp, | ||
1087 | sizeof(range))) { | ||
1088 | status = -EFAULT; | ||
1089 | goto out; | ||
1090 | } | ||
1091 | } else { | ||
1092 | status = -EINVAL; | ||
1093 | goto out; | ||
1094 | } | ||
1095 | |||
1096 | if (range.me_start > i_size_read(inode)) | ||
1097 | goto out; | ||
1098 | |||
1099 | if (range.me_start + range.me_len > i_size_read(inode)) | ||
1100 | range.me_len = i_size_read(inode) - range.me_start; | ||
1101 | |||
1102 | context->range = ⦥ | ||
1103 | |||
1104 | if (range.me_flags & OCFS2_MOVE_EXT_FL_AUTO_DEFRAG) { | ||
1105 | context->auto_defrag = 1; | ||
1106 | /* | ||
1107 | * ok, the default theshold for the defragmentation | ||
1108 | * is 1M, since our maximum clustersize was 1M also. | ||
1109 | * any thought? | ||
1110 | */ | ||
1111 | if (!range.me_threshold) | ||
1112 | range.me_threshold = 1024 * 1024; | ||
1113 | |||
1114 | if (range.me_threshold > i_size_read(inode)) | ||
1115 | range.me_threshold = i_size_read(inode); | ||
1116 | |||
1117 | if (range.me_flags & OCFS2_MOVE_EXT_FL_PART_DEFRAG) | ||
1118 | context->partial = 1; | ||
1119 | } else { | ||
1120 | /* | ||
1121 | * first best-effort attempt to validate and adjust the goal | ||
1122 | * (physical address in block), while it can't guarantee later | ||
1123 | * operation can succeed all the time since global_bitmap may | ||
1124 | * change a bit over time. | ||
1125 | */ | ||
1126 | |||
1127 | status = ocfs2_validate_and_adjust_move_goal(inode, &range); | ||
1128 | if (status) | ||
1129 | goto out; | ||
1130 | } | ||
1131 | |||
1132 | status = ocfs2_move_extents(context); | ||
1133 | if (status) | ||
1134 | mlog_errno(status); | ||
1135 | out: | ||
1136 | /* | ||
1137 | * movement/defragmentation may end up being partially completed, | ||
1138 | * that's the reason why we need to return userspace the finished | ||
1139 | * length and new_offset even if failure happens somewhere. | ||
1140 | */ | ||
1141 | if (argp) { | ||
1142 | if (copy_to_user((struct ocfs2_move_extents *)argp, &range, | ||
1143 | sizeof(range))) | ||
1144 | status = -EFAULT; | ||
1145 | } | ||
1146 | |||
1147 | kfree(context); | ||
1148 | |||
1149 | mnt_drop_write(filp->f_path.mnt); | ||
1150 | |||
1151 | return status; | ||
1152 | } | ||
diff --git a/fs/ocfs2/move_extents.h b/fs/ocfs2/move_extents.h new file mode 100644 index 000000000000..4e143e811441 --- /dev/null +++ b/fs/ocfs2/move_extents.h | |||
@@ -0,0 +1,22 @@ | |||
1 | /* -*- mode: c; c-basic-offset: 8; -*- | ||
2 | * vim: noexpandtab sw=8 ts=8 sts=0: | ||
3 | * | ||
4 | * move_extents.h | ||
5 | * | ||
6 | * Copyright (C) 2011 Oracle. All rights reserved. | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public | ||
10 | * License version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | */ | ||
17 | #ifndef OCFS2_MOVE_EXTENTS_H | ||
18 | #define OCFS2_MOVE_EXTENTS_H | ||
19 | |||
20 | int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp); | ||
21 | |||
22 | #endif /* OCFS2_MOVE_EXTENTS_H */ | ||
diff --git a/fs/ocfs2/ocfs2_ioctl.h b/fs/ocfs2/ocfs2_ioctl.h index b46f39bf7438..5b27ff1fa577 100644 --- a/fs/ocfs2/ocfs2_ioctl.h +++ b/fs/ocfs2/ocfs2_ioctl.h | |||
@@ -142,6 +142,38 @@ struct ocfs2_info_journal_size { | |||
142 | __u64 ij_journal_size; | 142 | __u64 ij_journal_size; |
143 | }; | 143 | }; |
144 | 144 | ||
145 | struct ocfs2_info_freeinode { | ||
146 | struct ocfs2_info_request ifi_req; | ||
147 | struct ocfs2_info_local_freeinode { | ||
148 | __u64 lfi_total; | ||
149 | __u64 lfi_free; | ||
150 | } ifi_stat[OCFS2_MAX_SLOTS]; | ||
151 | __u32 ifi_slotnum; /* out */ | ||
152 | __u32 ifi_pad; | ||
153 | }; | ||
154 | |||
155 | #define OCFS2_INFO_MAX_HIST (32) | ||
156 | |||
157 | struct ocfs2_info_freefrag { | ||
158 | struct ocfs2_info_request iff_req; | ||
159 | struct ocfs2_info_freefrag_stats { /* (out) */ | ||
160 | struct ocfs2_info_free_chunk_list { | ||
161 | __u32 fc_chunks[OCFS2_INFO_MAX_HIST]; | ||
162 | __u32 fc_clusters[OCFS2_INFO_MAX_HIST]; | ||
163 | } ffs_fc_hist; | ||
164 | __u32 ffs_clusters; | ||
165 | __u32 ffs_free_clusters; | ||
166 | __u32 ffs_free_chunks; | ||
167 | __u32 ffs_free_chunks_real; | ||
168 | __u32 ffs_min; /* Minimum free chunksize in clusters */ | ||
169 | __u32 ffs_max; | ||
170 | __u32 ffs_avg; | ||
171 | __u32 ffs_pad; | ||
172 | } iff_ffs; | ||
173 | __u32 iff_chunksize; /* chunksize in clusters(in) */ | ||
174 | __u32 iff_pad; | ||
175 | }; | ||
176 | |||
145 | /* Codes for ocfs2_info_request */ | 177 | /* Codes for ocfs2_info_request */ |
146 | enum ocfs2_info_type { | 178 | enum ocfs2_info_type { |
147 | OCFS2_INFO_CLUSTERSIZE = 1, | 179 | OCFS2_INFO_CLUSTERSIZE = 1, |
@@ -151,6 +183,8 @@ enum ocfs2_info_type { | |||
151 | OCFS2_INFO_UUID, | 183 | OCFS2_INFO_UUID, |
152 | OCFS2_INFO_FS_FEATURES, | 184 | OCFS2_INFO_FS_FEATURES, |
153 | OCFS2_INFO_JOURNAL_SIZE, | 185 | OCFS2_INFO_JOURNAL_SIZE, |
186 | OCFS2_INFO_FREEINODE, | ||
187 | OCFS2_INFO_FREEFRAG, | ||
154 | OCFS2_INFO_NUM_TYPES | 188 | OCFS2_INFO_NUM_TYPES |
155 | }; | 189 | }; |
156 | 190 | ||
@@ -171,4 +205,38 @@ enum ocfs2_info_type { | |||
171 | 205 | ||
172 | #define OCFS2_IOC_INFO _IOR('o', 5, struct ocfs2_info) | 206 | #define OCFS2_IOC_INFO _IOR('o', 5, struct ocfs2_info) |
173 | 207 | ||
208 | struct ocfs2_move_extents { | ||
209 | /* All values are in bytes */ | ||
210 | /* in */ | ||
211 | __u64 me_start; /* Virtual start in the file to move */ | ||
212 | __u64 me_len; /* Length of the extents to be moved */ | ||
213 | __u64 me_goal; /* Physical offset of the goal, | ||
214 | it's in block unit */ | ||
215 | __u64 me_threshold; /* Maximum distance from goal or threshold | ||
216 | for auto defragmentation */ | ||
217 | __u64 me_flags; /* Flags for the operation: | ||
218 | * - auto defragmentation. | ||
219 | * - refcount,xattr cases. | ||
220 | */ | ||
221 | /* out */ | ||
222 | __u64 me_moved_len; /* Moved/defraged length */ | ||
223 | __u64 me_new_offset; /* Resulting physical location */ | ||
224 | __u32 me_reserved[2]; /* Reserved for futhure */ | ||
225 | }; | ||
226 | |||
227 | #define OCFS2_MOVE_EXT_FL_AUTO_DEFRAG (0x00000001) /* Kernel manages to | ||
228 | claim new clusters | ||
229 | as the goal place | ||
230 | for extents moving */ | ||
231 | #define OCFS2_MOVE_EXT_FL_PART_DEFRAG (0x00000002) /* Allow partial extent | ||
232 | moving, is to make | ||
233 | movement less likely | ||
234 | to fail, may make fs | ||
235 | even more fragmented */ | ||
236 | #define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmenation | ||
237 | completely gets done. | ||
238 | */ | ||
239 | |||
240 | #define OCFS2_IOC_MOVE_EXT _IOW('o', 6, struct ocfs2_move_extents) | ||
241 | |||
174 | #endif /* OCFS2_IOCTL_H */ | 242 | #endif /* OCFS2_IOCTL_H */ |
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h index a1dae5bb54ac..3b481f490633 100644 --- a/fs/ocfs2/ocfs2_trace.h +++ b/fs/ocfs2/ocfs2_trace.h | |||
@@ -688,6 +688,31 @@ TRACE_EVENT(ocfs2_cache_block_dealloc, | |||
688 | __entry->blkno, __entry->bit) | 688 | __entry->blkno, __entry->bit) |
689 | ); | 689 | ); |
690 | 690 | ||
691 | TRACE_EVENT(ocfs2_trim_extent, | ||
692 | TP_PROTO(struct super_block *sb, unsigned long long blk, | ||
693 | unsigned long long count), | ||
694 | TP_ARGS(sb, blk, count), | ||
695 | TP_STRUCT__entry( | ||
696 | __field(int, dev_major) | ||
697 | __field(int, dev_minor) | ||
698 | __field(unsigned long long, blk) | ||
699 | __field(__u64, count) | ||
700 | ), | ||
701 | TP_fast_assign( | ||
702 | __entry->dev_major = MAJOR(sb->s_dev); | ||
703 | __entry->dev_minor = MINOR(sb->s_dev); | ||
704 | __entry->blk = blk; | ||
705 | __entry->count = count; | ||
706 | ), | ||
707 | TP_printk("%d %d %llu %llu", | ||
708 | __entry->dev_major, __entry->dev_minor, | ||
709 | __entry->blk, __entry->count) | ||
710 | ); | ||
711 | |||
712 | DEFINE_OCFS2_ULL_UINT_UINT_UINT_EVENT(ocfs2_trim_group); | ||
713 | |||
714 | DEFINE_OCFS2_ULL_ULL_ULL_EVENT(ocfs2_trim_fs); | ||
715 | |||
691 | /* End of trace events for fs/ocfs2/alloc.c. */ | 716 | /* End of trace events for fs/ocfs2/alloc.c. */ |
692 | 717 | ||
693 | /* Trace events for fs/ocfs2/localalloc.c. */ | 718 | /* Trace events for fs/ocfs2/localalloc.c. */ |
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c index 3c7606cff1ab..ebfd3825f12a 100644 --- a/fs/ocfs2/refcounttree.c +++ b/fs/ocfs2/refcounttree.c | |||
@@ -66,7 +66,7 @@ struct ocfs2_cow_context { | |||
66 | u32 *num_clusters, | 66 | u32 *num_clusters, |
67 | unsigned int *extent_flags); | 67 | unsigned int *extent_flags); |
68 | int (*cow_duplicate_clusters)(handle_t *handle, | 68 | int (*cow_duplicate_clusters)(handle_t *handle, |
69 | struct ocfs2_cow_context *context, | 69 | struct file *file, |
70 | u32 cpos, u32 old_cluster, | 70 | u32 cpos, u32 old_cluster, |
71 | u32 new_cluster, u32 new_len); | 71 | u32 new_cluster, u32 new_len); |
72 | }; | 72 | }; |
@@ -2921,20 +2921,21 @@ static int ocfs2_clear_cow_buffer(handle_t *handle, struct buffer_head *bh) | |||
2921 | return 0; | 2921 | return 0; |
2922 | } | 2922 | } |
2923 | 2923 | ||
2924 | static int ocfs2_duplicate_clusters_by_page(handle_t *handle, | 2924 | int ocfs2_duplicate_clusters_by_page(handle_t *handle, |
2925 | struct ocfs2_cow_context *context, | 2925 | struct file *file, |
2926 | u32 cpos, u32 old_cluster, | 2926 | u32 cpos, u32 old_cluster, |
2927 | u32 new_cluster, u32 new_len) | 2927 | u32 new_cluster, u32 new_len) |
2928 | { | 2928 | { |
2929 | int ret = 0, partial; | 2929 | int ret = 0, partial; |
2930 | struct ocfs2_caching_info *ci = context->data_et.et_ci; | 2930 | struct inode *inode = file->f_path.dentry->d_inode; |
2931 | struct ocfs2_caching_info *ci = INODE_CACHE(inode); | ||
2931 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); | 2932 | struct super_block *sb = ocfs2_metadata_cache_get_super(ci); |
2932 | u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); | 2933 | u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); |
2933 | struct page *page; | 2934 | struct page *page; |
2934 | pgoff_t page_index; | 2935 | pgoff_t page_index; |
2935 | unsigned int from, to, readahead_pages; | 2936 | unsigned int from, to, readahead_pages; |
2936 | loff_t offset, end, map_end; | 2937 | loff_t offset, end, map_end; |
2937 | struct address_space *mapping = context->inode->i_mapping; | 2938 | struct address_space *mapping = inode->i_mapping; |
2938 | 2939 | ||
2939 | trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, | 2940 | trace_ocfs2_duplicate_clusters_by_page(cpos, old_cluster, |
2940 | new_cluster, new_len); | 2941 | new_cluster, new_len); |
@@ -2948,8 +2949,8 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, | |||
2948 | * We only duplicate pages until we reach the page contains i_size - 1. | 2949 | * We only duplicate pages until we reach the page contains i_size - 1. |
2949 | * So trim 'end' to i_size. | 2950 | * So trim 'end' to i_size. |
2950 | */ | 2951 | */ |
2951 | if (end > i_size_read(context->inode)) | 2952 | if (end > i_size_read(inode)) |
2952 | end = i_size_read(context->inode); | 2953 | end = i_size_read(inode); |
2953 | 2954 | ||
2954 | while (offset < end) { | 2955 | while (offset < end) { |
2955 | page_index = offset >> PAGE_CACHE_SHIFT; | 2956 | page_index = offset >> PAGE_CACHE_SHIFT; |
@@ -2972,10 +2973,9 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, | |||
2972 | if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) | 2973 | if (PAGE_CACHE_SIZE <= OCFS2_SB(sb)->s_clustersize) |
2973 | BUG_ON(PageDirty(page)); | 2974 | BUG_ON(PageDirty(page)); |
2974 | 2975 | ||
2975 | if (PageReadahead(page) && context->file) { | 2976 | if (PageReadahead(page)) { |
2976 | page_cache_async_readahead(mapping, | 2977 | page_cache_async_readahead(mapping, |
2977 | &context->file->f_ra, | 2978 | &file->f_ra, file, |
2978 | context->file, | ||
2979 | page, page_index, | 2979 | page, page_index, |
2980 | readahead_pages); | 2980 | readahead_pages); |
2981 | } | 2981 | } |
@@ -2999,8 +2999,7 @@ static int ocfs2_duplicate_clusters_by_page(handle_t *handle, | |||
2999 | } | 2999 | } |
3000 | } | 3000 | } |
3001 | 3001 | ||
3002 | ocfs2_map_and_dirty_page(context->inode, | 3002 | ocfs2_map_and_dirty_page(inode, handle, from, to, |
3003 | handle, from, to, | ||
3004 | page, 0, &new_block); | 3003 | page, 0, &new_block); |
3005 | mark_page_accessed(page); | 3004 | mark_page_accessed(page); |
3006 | unlock: | 3005 | unlock: |
@@ -3015,14 +3014,15 @@ unlock: | |||
3015 | return ret; | 3014 | return ret; |
3016 | } | 3015 | } |
3017 | 3016 | ||
3018 | static int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, | 3017 | int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, |
3019 | struct ocfs2_cow_context *context, | 3018 | struct file *file, |
3020 | u32 cpos, u32 old_cluster, | 3019 | u32 cpos, u32 old_cluster, |
3021 | u32 new_cluster, u32 new_len) | 3020 | u32 new_cluster, u32 new_len) |
3022 | { | 3021 | { |
3023 | int ret = 0; | 3022 | int ret = 0; |
3024 | struct super_block *sb = context->inode->i_sb; | 3023 | struct inode *inode = file->f_path.dentry->d_inode; |
3025 | struct ocfs2_caching_info *ci = context->data_et.et_ci; | 3024 | struct super_block *sb = inode->i_sb; |
3025 | struct ocfs2_caching_info *ci = INODE_CACHE(inode); | ||
3026 | int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); | 3026 | int i, blocks = ocfs2_clusters_to_blocks(sb, new_len); |
3027 | u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster); | 3027 | u64 old_block = ocfs2_clusters_to_blocks(sb, old_cluster); |
3028 | u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); | 3028 | u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster); |
@@ -3145,8 +3145,8 @@ static int ocfs2_replace_clusters(handle_t *handle, | |||
3145 | 3145 | ||
3146 | /*If the old clusters is unwritten, no need to duplicate. */ | 3146 | /*If the old clusters is unwritten, no need to duplicate. */ |
3147 | if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { | 3147 | if (!(ext_flags & OCFS2_EXT_UNWRITTEN)) { |
3148 | ret = context->cow_duplicate_clusters(handle, context, cpos, | 3148 | ret = context->cow_duplicate_clusters(handle, context->file, |
3149 | old, new, len); | 3149 | cpos, old, new, len); |
3150 | if (ret) { | 3150 | if (ret) { |
3151 | mlog_errno(ret); | 3151 | mlog_errno(ret); |
3152 | goto out; | 3152 | goto out; |
@@ -3162,22 +3162,22 @@ out: | |||
3162 | return ret; | 3162 | return ret; |
3163 | } | 3163 | } |
3164 | 3164 | ||
3165 | static int ocfs2_cow_sync_writeback(struct super_block *sb, | 3165 | int ocfs2_cow_sync_writeback(struct super_block *sb, |
3166 | struct ocfs2_cow_context *context, | 3166 | struct inode *inode, |
3167 | u32 cpos, u32 num_clusters) | 3167 | u32 cpos, u32 num_clusters) |
3168 | { | 3168 | { |
3169 | int ret = 0; | 3169 | int ret = 0; |
3170 | loff_t offset, end, map_end; | 3170 | loff_t offset, end, map_end; |
3171 | pgoff_t page_index; | 3171 | pgoff_t page_index; |
3172 | struct page *page; | 3172 | struct page *page; |
3173 | 3173 | ||
3174 | if (ocfs2_should_order_data(context->inode)) | 3174 | if (ocfs2_should_order_data(inode)) |
3175 | return 0; | 3175 | return 0; |
3176 | 3176 | ||
3177 | offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; | 3177 | offset = ((loff_t)cpos) << OCFS2_SB(sb)->s_clustersize_bits; |
3178 | end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits); | 3178 | end = offset + (num_clusters << OCFS2_SB(sb)->s_clustersize_bits); |
3179 | 3179 | ||
3180 | ret = filemap_fdatawrite_range(context->inode->i_mapping, | 3180 | ret = filemap_fdatawrite_range(inode->i_mapping, |
3181 | offset, end - 1); | 3181 | offset, end - 1); |
3182 | if (ret < 0) { | 3182 | if (ret < 0) { |
3183 | mlog_errno(ret); | 3183 | mlog_errno(ret); |
@@ -3190,7 +3190,7 @@ static int ocfs2_cow_sync_writeback(struct super_block *sb, | |||
3190 | if (map_end > end) | 3190 | if (map_end > end) |
3191 | map_end = end; | 3191 | map_end = end; |
3192 | 3192 | ||
3193 | page = find_or_create_page(context->inode->i_mapping, | 3193 | page = find_or_create_page(inode->i_mapping, |
3194 | page_index, GFP_NOFS); | 3194 | page_index, GFP_NOFS); |
3195 | BUG_ON(!page); | 3195 | BUG_ON(!page); |
3196 | 3196 | ||
@@ -3349,7 +3349,7 @@ static int ocfs2_make_clusters_writable(struct super_block *sb, | |||
3349 | * in write-back mode. | 3349 | * in write-back mode. |
3350 | */ | 3350 | */ |
3351 | if (context->get_clusters == ocfs2_di_get_clusters) { | 3351 | if (context->get_clusters == ocfs2_di_get_clusters) { |
3352 | ret = ocfs2_cow_sync_writeback(sb, context, cpos, | 3352 | ret = ocfs2_cow_sync_writeback(sb, context->inode, cpos, |
3353 | orig_num_clusters); | 3353 | orig_num_clusters); |
3354 | if (ret) | 3354 | if (ret) |
3355 | mlog_errno(ret); | 3355 | mlog_errno(ret); |
diff --git a/fs/ocfs2/refcounttree.h b/fs/ocfs2/refcounttree.h index c8ce46f7d8e3..7754608c83a4 100644 --- a/fs/ocfs2/refcounttree.h +++ b/fs/ocfs2/refcounttree.h | |||
@@ -84,6 +84,17 @@ int ocfs2_refcount_cow_xattr(struct inode *inode, | |||
84 | struct buffer_head *ref_root_bh, | 84 | struct buffer_head *ref_root_bh, |
85 | u32 cpos, u32 write_len, | 85 | u32 cpos, u32 write_len, |
86 | struct ocfs2_post_refcount *post); | 86 | struct ocfs2_post_refcount *post); |
87 | int ocfs2_duplicate_clusters_by_page(handle_t *handle, | ||
88 | struct file *file, | ||
89 | u32 cpos, u32 old_cluster, | ||
90 | u32 new_cluster, u32 new_len); | ||
91 | int ocfs2_duplicate_clusters_by_jbd(handle_t *handle, | ||
92 | struct file *file, | ||
93 | u32 cpos, u32 old_cluster, | ||
94 | u32 new_cluster, u32 new_len); | ||
95 | int ocfs2_cow_sync_writeback(struct super_block *sb, | ||
96 | struct inode *inode, | ||
97 | u32 cpos, u32 num_clusters); | ||
87 | int ocfs2_add_refcount_flag(struct inode *inode, | 98 | int ocfs2_add_refcount_flag(struct inode *inode, |
88 | struct ocfs2_extent_tree *data_et, | 99 | struct ocfs2_extent_tree *data_et, |
89 | struct ocfs2_caching_info *ref_ci, | 100 | struct ocfs2_caching_info *ref_ci, |
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c index 5a521c748859..cdbaf5e97308 100644 --- a/fs/ocfs2/super.c +++ b/fs/ocfs2/super.c | |||
@@ -41,6 +41,7 @@ | |||
41 | #include <linux/mount.h> | 41 | #include <linux/mount.h> |
42 | #include <linux/seq_file.h> | 42 | #include <linux/seq_file.h> |
43 | #include <linux/quotaops.h> | 43 | #include <linux/quotaops.h> |
44 | #include <linux/cleancache.h> | ||
44 | 45 | ||
45 | #define CREATE_TRACE_POINTS | 46 | #define CREATE_TRACE_POINTS |
46 | #include "ocfs2_trace.h" | 47 | #include "ocfs2_trace.h" |
@@ -1566,7 +1567,7 @@ static int ocfs2_show_options(struct seq_file *s, struct vfsmount *mnt) | |||
1566 | if (osb->preferred_slot != OCFS2_INVALID_SLOT) | 1567 | if (osb->preferred_slot != OCFS2_INVALID_SLOT) |
1567 | seq_printf(s, ",preferred_slot=%d", osb->preferred_slot); | 1568 | seq_printf(s, ",preferred_slot=%d", osb->preferred_slot); |
1568 | 1569 | ||
1569 | if (osb->s_atime_quantum != OCFS2_DEFAULT_ATIME_QUANTUM) | 1570 | if (!(mnt->mnt_flags & MNT_NOATIME) && !(mnt->mnt_flags & MNT_RELATIME)) |
1570 | seq_printf(s, ",atime_quantum=%u", osb->s_atime_quantum); | 1571 | seq_printf(s, ",atime_quantum=%u", osb->s_atime_quantum); |
1571 | 1572 | ||
1572 | if (osb->osb_commit_interval) | 1573 | if (osb->osb_commit_interval) |
@@ -2352,6 +2353,7 @@ static int ocfs2_initialize_super(struct super_block *sb, | |||
2352 | mlog_errno(status); | 2353 | mlog_errno(status); |
2353 | goto bail; | 2354 | goto bail; |
2354 | } | 2355 | } |
2356 | cleancache_init_shared_fs((char *)&uuid_net_key, sb); | ||
2355 | 2357 | ||
2356 | bail: | 2358 | bail: |
2357 | return status; | 2359 | return status; |
diff --git a/fs/omfs/dir.c b/fs/omfs/dir.c index de4ff29f1e05..c368360c35a1 100644 --- a/fs/omfs/dir.c +++ b/fs/omfs/dir.c | |||
@@ -240,8 +240,12 @@ static int omfs_remove(struct inode *dir, struct dentry *dentry) | |||
240 | struct inode *inode = dentry->d_inode; | 240 | struct inode *inode = dentry->d_inode; |
241 | int ret; | 241 | int ret; |
242 | 242 | ||
243 | if (S_ISDIR(inode->i_mode) && !omfs_dir_is_empty(inode)) | 243 | |
244 | return -ENOTEMPTY; | 244 | if (S_ISDIR(inode->i_mode)) { |
245 | dentry_unhash(dentry); | ||
246 | if (!omfs_dir_is_empty(inode)) | ||
247 | return -ENOTEMPTY; | ||
248 | } | ||
245 | 249 | ||
246 | ret = omfs_delete_entry(dentry); | 250 | ret = omfs_delete_entry(dentry); |
247 | if (ret) | 251 | if (ret) |
@@ -378,6 +382,9 @@ static int omfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
378 | int err; | 382 | int err; |
379 | 383 | ||
380 | if (new_inode) { | 384 | if (new_inode) { |
385 | if (S_ISDIR(new_inode->i_mode)) | ||
386 | dentry_unhash(new_dentry); | ||
387 | |||
381 | /* overwriting existing file/dir */ | 388 | /* overwriting existing file/dir */ |
382 | err = omfs_remove(new_dir, new_dentry); | 389 | err = omfs_remove(new_dir, new_dentry); |
383 | if (err) | 390 | if (err) |
diff --git a/fs/partitions/check.c b/fs/partitions/check.c index 8ed4d3433199..f82e762eeca2 100644 --- a/fs/partitions/check.c +++ b/fs/partitions/check.c | |||
@@ -256,10 +256,12 @@ ssize_t part_discard_alignment_show(struct device *dev, | |||
256 | { | 256 | { |
257 | struct hd_struct *p = dev_to_part(dev); | 257 | struct hd_struct *p = dev_to_part(dev); |
258 | struct gendisk *disk = dev_to_disk(dev); | 258 | struct gendisk *disk = dev_to_disk(dev); |
259 | unsigned int alignment = 0; | ||
259 | 260 | ||
260 | return sprintf(buf, "%u\n", | 261 | if (disk->queue) |
261 | queue_limit_discard_alignment(&disk->queue->limits, | 262 | alignment = queue_limit_discard_alignment(&disk->queue->limits, |
262 | p->start_sect)); | 263 | p->start_sect); |
264 | return sprintf(buf, "%u\n", alignment); | ||
263 | } | 265 | } |
264 | 266 | ||
265 | ssize_t part_stat_show(struct device *dev, | 267 | ssize_t part_stat_show(struct device *dev, |
diff --git a/fs/partitions/efi.c b/fs/partitions/efi.c index 19d6750d1d6c..6296b403c67a 100644 --- a/fs/partitions/efi.c +++ b/fs/partitions/efi.c | |||
@@ -310,6 +310,15 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba, | |||
310 | goto fail; | 310 | goto fail; |
311 | } | 311 | } |
312 | 312 | ||
313 | /* Check the GUID Partition Table header size */ | ||
314 | if (le32_to_cpu((*gpt)->header_size) > | ||
315 | bdev_logical_block_size(state->bdev)) { | ||
316 | pr_debug("GUID Partition Table Header size is wrong: %u > %u\n", | ||
317 | le32_to_cpu((*gpt)->header_size), | ||
318 | bdev_logical_block_size(state->bdev)); | ||
319 | goto fail; | ||
320 | } | ||
321 | |||
313 | /* Check the GUID Partition Table CRC */ | 322 | /* Check the GUID Partition Table CRC */ |
314 | origcrc = le32_to_cpu((*gpt)->header_crc32); | 323 | origcrc = le32_to_cpu((*gpt)->header_crc32); |
315 | (*gpt)->header_crc32 = 0; | 324 | (*gpt)->header_crc32 = 0; |
diff --git a/fs/proc/array.c b/fs/proc/array.c index 5e4f776b0917..9b45ee84fbcc 100644 --- a/fs/proc/array.c +++ b/fs/proc/array.c | |||
@@ -131,7 +131,7 @@ static inline void task_name(struct seq_file *m, struct task_struct *p) | |||
131 | * you can test for combinations of others with | 131 | * you can test for combinations of others with |
132 | * simple bit tests. | 132 | * simple bit tests. |
133 | */ | 133 | */ |
134 | static const char *task_state_array[] = { | 134 | static const char * const task_state_array[] = { |
135 | "R (running)", /* 0 */ | 135 | "R (running)", /* 0 */ |
136 | "S (sleeping)", /* 1 */ | 136 | "S (sleeping)", /* 1 */ |
137 | "D (disk sleep)", /* 2 */ | 137 | "D (disk sleep)", /* 2 */ |
@@ -147,7 +147,7 @@ static const char *task_state_array[] = { | |||
147 | static inline const char *get_task_state(struct task_struct *tsk) | 147 | static inline const char *get_task_state(struct task_struct *tsk) |
148 | { | 148 | { |
149 | unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; | 149 | unsigned int state = (tsk->state & TASK_REPORT) | tsk->exit_state; |
150 | const char **p = &task_state_array[0]; | 150 | const char * const *p = &task_state_array[0]; |
151 | 151 | ||
152 | BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array)); | 152 | BUILD_BUG_ON(1 + ilog2(TASK_STATE_MAX) != ARRAY_SIZE(task_state_array)); |
153 | 153 | ||
diff --git a/fs/proc/base.c b/fs/proc/base.c index dc8bca72b002..4ede550517a6 100644 --- a/fs/proc/base.c +++ b/fs/proc/base.c | |||
@@ -894,20 +894,20 @@ static ssize_t mem_write(struct file * file, const char __user *buf, | |||
894 | if (!task) | 894 | if (!task) |
895 | goto out_no_task; | 895 | goto out_no_task; |
896 | 896 | ||
897 | copied = -ENOMEM; | ||
898 | page = (char *)__get_free_page(GFP_TEMPORARY); | ||
899 | if (!page) | ||
900 | goto out_task; | ||
901 | |||
897 | mm = check_mem_permission(task); | 902 | mm = check_mem_permission(task); |
898 | copied = PTR_ERR(mm); | 903 | copied = PTR_ERR(mm); |
899 | if (IS_ERR(mm)) | 904 | if (IS_ERR(mm)) |
900 | goto out_task; | 905 | goto out_free; |
901 | 906 | ||
902 | copied = -EIO; | 907 | copied = -EIO; |
903 | if (file->private_data != (void *)((long)current->self_exec_id)) | 908 | if (file->private_data != (void *)((long)current->self_exec_id)) |
904 | goto out_mm; | 909 | goto out_mm; |
905 | 910 | ||
906 | copied = -ENOMEM; | ||
907 | page = (char *)__get_free_page(GFP_TEMPORARY); | ||
908 | if (!page) | ||
909 | goto out_mm; | ||
910 | |||
911 | copied = 0; | 911 | copied = 0; |
912 | while (count > 0) { | 912 | while (count > 0) { |
913 | int this_len, retval; | 913 | int this_len, retval; |
@@ -929,9 +929,11 @@ static ssize_t mem_write(struct file * file, const char __user *buf, | |||
929 | count -= retval; | 929 | count -= retval; |
930 | } | 930 | } |
931 | *ppos = dst; | 931 | *ppos = dst; |
932 | free_page((unsigned long) page); | 932 | |
933 | out_mm: | 933 | out_mm: |
934 | mmput(mm); | 934 | mmput(mm); |
935 | out_free: | ||
936 | free_page((unsigned long) page); | ||
935 | out_task: | 937 | out_task: |
936 | put_task_struct(task); | 938 | put_task_struct(task); |
937 | out_no_task: | 939 | out_no_task: |
@@ -1059,7 +1061,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, | |||
1059 | { | 1061 | { |
1060 | struct task_struct *task; | 1062 | struct task_struct *task; |
1061 | char buffer[PROC_NUMBUF]; | 1063 | char buffer[PROC_NUMBUF]; |
1062 | long oom_adjust; | 1064 | int oom_adjust; |
1063 | unsigned long flags; | 1065 | unsigned long flags; |
1064 | int err; | 1066 | int err; |
1065 | 1067 | ||
@@ -1071,7 +1073,7 @@ static ssize_t oom_adjust_write(struct file *file, const char __user *buf, | |||
1071 | goto out; | 1073 | goto out; |
1072 | } | 1074 | } |
1073 | 1075 | ||
1074 | err = strict_strtol(strstrip(buffer), 0, &oom_adjust); | 1076 | err = kstrtoint(strstrip(buffer), 0, &oom_adjust); |
1075 | if (err) | 1077 | if (err) |
1076 | goto out; | 1078 | goto out; |
1077 | if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) && | 1079 | if ((oom_adjust < OOM_ADJUST_MIN || oom_adjust > OOM_ADJUST_MAX) && |
@@ -1168,7 +1170,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, | |||
1168 | struct task_struct *task; | 1170 | struct task_struct *task; |
1169 | char buffer[PROC_NUMBUF]; | 1171 | char buffer[PROC_NUMBUF]; |
1170 | unsigned long flags; | 1172 | unsigned long flags; |
1171 | long oom_score_adj; | 1173 | int oom_score_adj; |
1172 | int err; | 1174 | int err; |
1173 | 1175 | ||
1174 | memset(buffer, 0, sizeof(buffer)); | 1176 | memset(buffer, 0, sizeof(buffer)); |
@@ -1179,7 +1181,7 @@ static ssize_t oom_score_adj_write(struct file *file, const char __user *buf, | |||
1179 | goto out; | 1181 | goto out; |
1180 | } | 1182 | } |
1181 | 1183 | ||
1182 | err = strict_strtol(strstrip(buffer), 0, &oom_score_adj); | 1184 | err = kstrtoint(strstrip(buffer), 0, &oom_score_adj); |
1183 | if (err) | 1185 | if (err) |
1184 | goto out; | 1186 | goto out; |
1185 | if (oom_score_adj < OOM_SCORE_ADJ_MIN || | 1187 | if (oom_score_adj < OOM_SCORE_ADJ_MIN || |
@@ -1468,7 +1470,7 @@ sched_autogroup_write(struct file *file, const char __user *buf, | |||
1468 | struct inode *inode = file->f_path.dentry->d_inode; | 1470 | struct inode *inode = file->f_path.dentry->d_inode; |
1469 | struct task_struct *p; | 1471 | struct task_struct *p; |
1470 | char buffer[PROC_NUMBUF]; | 1472 | char buffer[PROC_NUMBUF]; |
1471 | long nice; | 1473 | int nice; |
1472 | int err; | 1474 | int err; |
1473 | 1475 | ||
1474 | memset(buffer, 0, sizeof(buffer)); | 1476 | memset(buffer, 0, sizeof(buffer)); |
@@ -1477,9 +1479,9 @@ sched_autogroup_write(struct file *file, const char __user *buf, | |||
1477 | if (copy_from_user(buffer, buf, count)) | 1479 | if (copy_from_user(buffer, buf, count)) |
1478 | return -EFAULT; | 1480 | return -EFAULT; |
1479 | 1481 | ||
1480 | err = strict_strtol(strstrip(buffer), 0, &nice); | 1482 | err = kstrtoint(strstrip(buffer), 0, &nice); |
1481 | if (err) | 1483 | if (err < 0) |
1482 | return -EINVAL; | 1484 | return err; |
1483 | 1485 | ||
1484 | p = get_proc_task(inode); | 1486 | p = get_proc_task(inode); |
1485 | if (!p) | 1487 | if (!p) |
@@ -1576,57 +1578,6 @@ static const struct file_operations proc_pid_set_comm_operations = { | |||
1576 | .release = single_release, | 1578 | .release = single_release, |
1577 | }; | 1579 | }; |
1578 | 1580 | ||
1579 | /* | ||
1580 | * We added or removed a vma mapping the executable. The vmas are only mapped | ||
1581 | * during exec and are not mapped with the mmap system call. | ||
1582 | * Callers must hold down_write() on the mm's mmap_sem for these | ||
1583 | */ | ||
1584 | void added_exe_file_vma(struct mm_struct *mm) | ||
1585 | { | ||
1586 | mm->num_exe_file_vmas++; | ||
1587 | } | ||
1588 | |||
1589 | void removed_exe_file_vma(struct mm_struct *mm) | ||
1590 | { | ||
1591 | mm->num_exe_file_vmas--; | ||
1592 | if ((mm->num_exe_file_vmas == 0) && mm->exe_file){ | ||
1593 | fput(mm->exe_file); | ||
1594 | mm->exe_file = NULL; | ||
1595 | } | ||
1596 | |||
1597 | } | ||
1598 | |||
1599 | void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) | ||
1600 | { | ||
1601 | if (new_exe_file) | ||
1602 | get_file(new_exe_file); | ||
1603 | if (mm->exe_file) | ||
1604 | fput(mm->exe_file); | ||
1605 | mm->exe_file = new_exe_file; | ||
1606 | mm->num_exe_file_vmas = 0; | ||
1607 | } | ||
1608 | |||
1609 | struct file *get_mm_exe_file(struct mm_struct *mm) | ||
1610 | { | ||
1611 | struct file *exe_file; | ||
1612 | |||
1613 | /* We need mmap_sem to protect against races with removal of | ||
1614 | * VM_EXECUTABLE vmas */ | ||
1615 | down_read(&mm->mmap_sem); | ||
1616 | exe_file = mm->exe_file; | ||
1617 | if (exe_file) | ||
1618 | get_file(exe_file); | ||
1619 | up_read(&mm->mmap_sem); | ||
1620 | return exe_file; | ||
1621 | } | ||
1622 | |||
1623 | void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm) | ||
1624 | { | ||
1625 | /* It's safe to write the exe_file pointer without exe_file_lock because | ||
1626 | * this is called during fork when the task is not yet in /proc */ | ||
1627 | newmm->exe_file = get_mm_exe_file(oldmm); | ||
1628 | } | ||
1629 | |||
1630 | static int proc_exe_link(struct inode *inode, struct path *exe_path) | 1581 | static int proc_exe_link(struct inode *inode, struct path *exe_path) |
1631 | { | 1582 | { |
1632 | struct task_struct *task; | 1583 | struct task_struct *task; |
diff --git a/fs/proc/stat.c b/fs/proc/stat.c index 1cffa2b8a2fc..9758b654a1bc 100644 --- a/fs/proc/stat.c +++ b/fs/proc/stat.c | |||
@@ -138,9 +138,9 @@ static int stat_open(struct inode *inode, struct file *file) | |||
138 | struct seq_file *m; | 138 | struct seq_file *m; |
139 | int res; | 139 | int res; |
140 | 140 | ||
141 | /* don't ask for more than the kmalloc() max size, currently 128 KB */ | 141 | /* don't ask for more than the kmalloc() max size */ |
142 | if (size > 128 * 1024) | 142 | if (size > KMALLOC_MAX_SIZE) |
143 | size = 128 * 1024; | 143 | size = KMALLOC_MAX_SIZE; |
144 | buf = kmalloc(size, GFP_KERNEL); | 144 | buf = kmalloc(size, GFP_KERNEL); |
145 | if (!buf) | 145 | if (!buf) |
146 | return -ENOMEM; | 146 | return -ENOMEM; |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 2c9db29ea358..25b6a887adb9 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -211,7 +211,7 @@ static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) | |||
211 | { | 211 | { |
212 | struct mm_struct *mm = vma->vm_mm; | 212 | struct mm_struct *mm = vma->vm_mm; |
213 | struct file *file = vma->vm_file; | 213 | struct file *file = vma->vm_file; |
214 | int flags = vma->vm_flags; | 214 | vm_flags_t flags = vma->vm_flags; |
215 | unsigned long ino = 0; | 215 | unsigned long ino = 0; |
216 | unsigned long long pgoff = 0; | 216 | unsigned long long pgoff = 0; |
217 | unsigned long start, end; | 217 | unsigned long start, end; |
@@ -536,15 +536,17 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf, | |||
536 | char buffer[PROC_NUMBUF]; | 536 | char buffer[PROC_NUMBUF]; |
537 | struct mm_struct *mm; | 537 | struct mm_struct *mm; |
538 | struct vm_area_struct *vma; | 538 | struct vm_area_struct *vma; |
539 | long type; | 539 | int type; |
540 | int rv; | ||
540 | 541 | ||
541 | memset(buffer, 0, sizeof(buffer)); | 542 | memset(buffer, 0, sizeof(buffer)); |
542 | if (count > sizeof(buffer) - 1) | 543 | if (count > sizeof(buffer) - 1) |
543 | count = sizeof(buffer) - 1; | 544 | count = sizeof(buffer) - 1; |
544 | if (copy_from_user(buffer, buf, count)) | 545 | if (copy_from_user(buffer, buf, count)) |
545 | return -EFAULT; | 546 | return -EFAULT; |
546 | if (strict_strtol(strstrip(buffer), 10, &type)) | 547 | rv = kstrtoint(strstrip(buffer), 10, &type); |
547 | return -EINVAL; | 548 | if (rv < 0) |
549 | return rv; | ||
548 | if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) | 550 | if (type < CLEAR_REFS_ALL || type > CLEAR_REFS_MAPPED) |
549 | return -EINVAL; | 551 | return -EINVAL; |
550 | task = get_proc_task(file->f_path.dentry->d_inode); | 552 | task = get_proc_task(file->f_path.dentry->d_inode); |
@@ -769,18 +771,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
769 | if (!task) | 771 | if (!task) |
770 | goto out; | 772 | goto out; |
771 | 773 | ||
772 | mm = mm_for_maps(task); | ||
773 | ret = PTR_ERR(mm); | ||
774 | if (!mm || IS_ERR(mm)) | ||
775 | goto out_task; | ||
776 | |||
777 | ret = -EINVAL; | 774 | ret = -EINVAL; |
778 | /* file position must be aligned */ | 775 | /* file position must be aligned */ |
779 | if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) | 776 | if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES)) |
780 | goto out_task; | 777 | goto out_task; |
781 | 778 | ||
782 | ret = 0; | 779 | ret = 0; |
783 | |||
784 | if (!count) | 780 | if (!count) |
785 | goto out_task; | 781 | goto out_task; |
786 | 782 | ||
@@ -788,7 +784,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
788 | pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); | 784 | pm.buffer = kmalloc(pm.len, GFP_TEMPORARY); |
789 | ret = -ENOMEM; | 785 | ret = -ENOMEM; |
790 | if (!pm.buffer) | 786 | if (!pm.buffer) |
791 | goto out_mm; | 787 | goto out_task; |
788 | |||
789 | mm = mm_for_maps(task); | ||
790 | ret = PTR_ERR(mm); | ||
791 | if (!mm || IS_ERR(mm)) | ||
792 | goto out_free; | ||
792 | 793 | ||
793 | pagemap_walk.pmd_entry = pagemap_pte_range; | 794 | pagemap_walk.pmd_entry = pagemap_pte_range; |
794 | pagemap_walk.pte_hole = pagemap_pte_hole; | 795 | pagemap_walk.pte_hole = pagemap_pte_hole; |
@@ -831,7 +832,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
831 | len = min(count, PM_ENTRY_BYTES * pm.pos); | 832 | len = min(count, PM_ENTRY_BYTES * pm.pos); |
832 | if (copy_to_user(buf, pm.buffer, len)) { | 833 | if (copy_to_user(buf, pm.buffer, len)) { |
833 | ret = -EFAULT; | 834 | ret = -EFAULT; |
834 | goto out_free; | 835 | goto out_mm; |
835 | } | 836 | } |
836 | copied += len; | 837 | copied += len; |
837 | buf += len; | 838 | buf += len; |
@@ -841,10 +842,10 @@ static ssize_t pagemap_read(struct file *file, char __user *buf, | |||
841 | if (!ret || ret == PM_END_OF_BUFFER) | 842 | if (!ret || ret == PM_END_OF_BUFFER) |
842 | ret = copied; | 843 | ret = copied; |
843 | 844 | ||
844 | out_free: | ||
845 | kfree(pm.buffer); | ||
846 | out_mm: | 845 | out_mm: |
847 | mmput(mm); | 846 | mmput(mm); |
847 | out_free: | ||
848 | kfree(pm.buffer); | ||
848 | out_task: | 849 | out_task: |
849 | put_task_struct(task); | 850 | put_task_struct(task); |
850 | out: | 851 | out: |
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c index 74802bc5ded9..cd99bf557650 100644 --- a/fs/proc/vmcore.c +++ b/fs/proc/vmcore.c | |||
@@ -35,6 +35,46 @@ static u64 vmcore_size; | |||
35 | 35 | ||
36 | static struct proc_dir_entry *proc_vmcore = NULL; | 36 | static struct proc_dir_entry *proc_vmcore = NULL; |
37 | 37 | ||
38 | /* | ||
39 | * Returns > 0 for RAM pages, 0 for non-RAM pages, < 0 on error | ||
40 | * The called function has to take care of module refcounting. | ||
41 | */ | ||
42 | static int (*oldmem_pfn_is_ram)(unsigned long pfn); | ||
43 | |||
44 | int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)) | ||
45 | { | ||
46 | if (oldmem_pfn_is_ram) | ||
47 | return -EBUSY; | ||
48 | oldmem_pfn_is_ram = fn; | ||
49 | return 0; | ||
50 | } | ||
51 | EXPORT_SYMBOL_GPL(register_oldmem_pfn_is_ram); | ||
52 | |||
53 | void unregister_oldmem_pfn_is_ram(void) | ||
54 | { | ||
55 | oldmem_pfn_is_ram = NULL; | ||
56 | wmb(); | ||
57 | } | ||
58 | EXPORT_SYMBOL_GPL(unregister_oldmem_pfn_is_ram); | ||
59 | |||
60 | static int pfn_is_ram(unsigned long pfn) | ||
61 | { | ||
62 | int (*fn)(unsigned long pfn); | ||
63 | /* pfn is ram unless fn() checks pagetype */ | ||
64 | int ret = 1; | ||
65 | |||
66 | /* | ||
67 | * Ask hypervisor if the pfn is really ram. | ||
68 | * A ballooned page contains no data and reading from such a page | ||
69 | * will cause high load in the hypervisor. | ||
70 | */ | ||
71 | fn = oldmem_pfn_is_ram; | ||
72 | if (fn) | ||
73 | ret = fn(pfn); | ||
74 | |||
75 | return ret; | ||
76 | } | ||
77 | |||
38 | /* Reads a page from the oldmem device from given offset. */ | 78 | /* Reads a page from the oldmem device from given offset. */ |
39 | static ssize_t read_from_oldmem(char *buf, size_t count, | 79 | static ssize_t read_from_oldmem(char *buf, size_t count, |
40 | u64 *ppos, int userbuf) | 80 | u64 *ppos, int userbuf) |
@@ -55,9 +95,15 @@ static ssize_t read_from_oldmem(char *buf, size_t count, | |||
55 | else | 95 | else |
56 | nr_bytes = count; | 96 | nr_bytes = count; |
57 | 97 | ||
58 | tmp = copy_oldmem_page(pfn, buf, nr_bytes, offset, userbuf); | 98 | /* If pfn is not ram, return zeros for sparse dump files */ |
59 | if (tmp < 0) | 99 | if (pfn_is_ram(pfn) == 0) |
60 | return tmp; | 100 | memset(buf, 0, nr_bytes); |
101 | else { | ||
102 | tmp = copy_oldmem_page(pfn, buf, nr_bytes, | ||
103 | offset, userbuf); | ||
104 | if (tmp < 0) | ||
105 | return tmp; | ||
106 | } | ||
61 | *ppos += nr_bytes; | 107 | *ppos += nr_bytes; |
62 | count -= nr_bytes; | 108 | count -= nr_bytes; |
63 | buf += nr_bytes; | 109 | buf += nr_bytes; |
diff --git a/fs/reiserfs/namei.c b/fs/reiserfs/namei.c index 118662690cdf..76c8164d5651 100644 --- a/fs/reiserfs/namei.c +++ b/fs/reiserfs/namei.c | |||
@@ -831,6 +831,8 @@ static int reiserfs_rmdir(struct inode *dir, struct dentry *dentry) | |||
831 | INITIALIZE_PATH(path); | 831 | INITIALIZE_PATH(path); |
832 | struct reiserfs_dir_entry de; | 832 | struct reiserfs_dir_entry de; |
833 | 833 | ||
834 | dentry_unhash(dentry); | ||
835 | |||
834 | /* we will be doing 2 balancings and update 2 stat data, we change quotas | 836 | /* we will be doing 2 balancings and update 2 stat data, we change quotas |
835 | * of the owner of the directory and of the owner of the parent directory. | 837 | * of the owner of the directory and of the owner of the parent directory. |
836 | * The quota structure is possibly deleted only on last iput => outside | 838 | * The quota structure is possibly deleted only on last iput => outside |
@@ -1225,6 +1227,9 @@ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1225 | unsigned long savelink = 1; | 1227 | unsigned long savelink = 1; |
1226 | struct timespec ctime; | 1228 | struct timespec ctime; |
1227 | 1229 | ||
1230 | if (new_dentry->d_inode && S_ISDIR(new_dentry->d_inode->i_mode)) | ||
1231 | dentry_unhash(new_dentry); | ||
1232 | |||
1228 | /* three balancings: (1) old name removal, (2) new name insertion | 1233 | /* three balancings: (1) old name removal, (2) new name insertion |
1229 | and (3) maybe "save" link insertion | 1234 | and (3) maybe "save" link insertion |
1230 | stat data updates: (1) old directory, | 1235 | stat data updates: (1) old directory, |
diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index 47d2a4498b03..50f1abccd1cd 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c | |||
@@ -105,7 +105,6 @@ static int xattr_rmdir(struct inode *dir, struct dentry *dentry) | |||
105 | mutex_unlock(&dentry->d_inode->i_mutex); | 105 | mutex_unlock(&dentry->d_inode->i_mutex); |
106 | if (!error) | 106 | if (!error) |
107 | d_delete(dentry); | 107 | d_delete(dentry); |
108 | dput(dentry); | ||
109 | 108 | ||
110 | return error; | 109 | return error; |
111 | } | 110 | } |
diff --git a/fs/squashfs/block.c b/fs/squashfs/block.c index 8ab48bc2fa7d..ed0eb2a921f4 100644 --- a/fs/squashfs/block.c +++ b/fs/squashfs/block.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/squashfs/cache.c b/fs/squashfs/cache.c index 4b5a3fbb1f1f..f744be98cd5a 100644 --- a/fs/squashfs/cache.c +++ b/fs/squashfs/cache.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
@@ -393,19 +393,36 @@ struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *sb, | |||
393 | /* | 393 | /* |
394 | * Read a filesystem table (uncompressed sequence of bytes) from disk | 394 | * Read a filesystem table (uncompressed sequence of bytes) from disk |
395 | */ | 395 | */ |
396 | int squashfs_read_table(struct super_block *sb, void *buffer, u64 block, | 396 | void *squashfs_read_table(struct super_block *sb, u64 block, int length) |
397 | int length) | ||
398 | { | 397 | { |
399 | int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; | 398 | int pages = (length + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; |
400 | int i, res; | 399 | int i, res; |
401 | void **data = kcalloc(pages, sizeof(void *), GFP_KERNEL); | 400 | void *table, *buffer, **data; |
402 | if (data == NULL) | 401 | |
403 | return -ENOMEM; | 402 | table = buffer = kmalloc(length, GFP_KERNEL); |
403 | if (table == NULL) | ||
404 | return ERR_PTR(-ENOMEM); | ||
405 | |||
406 | data = kcalloc(pages, sizeof(void *), GFP_KERNEL); | ||
407 | if (data == NULL) { | ||
408 | res = -ENOMEM; | ||
409 | goto failed; | ||
410 | } | ||
404 | 411 | ||
405 | for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) | 412 | for (i = 0; i < pages; i++, buffer += PAGE_CACHE_SIZE) |
406 | data[i] = buffer; | 413 | data[i] = buffer; |
414 | |||
407 | res = squashfs_read_data(sb, data, block, length | | 415 | res = squashfs_read_data(sb, data, block, length | |
408 | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages); | 416 | SQUASHFS_COMPRESSED_BIT_BLOCK, NULL, length, pages); |
417 | |||
409 | kfree(data); | 418 | kfree(data); |
410 | return res; | 419 | |
420 | if (res < 0) | ||
421 | goto failed; | ||
422 | |||
423 | return table; | ||
424 | |||
425 | failed: | ||
426 | kfree(table); | ||
427 | return ERR_PTR(res); | ||
411 | } | 428 | } |
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c index e921bd213738..9f1b0bb96f13 100644 --- a/fs/squashfs/decompressor.c +++ b/fs/squashfs/decompressor.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h index 099745ad5691..8ba70cff09a6 100644 --- a/fs/squashfs/decompressor.h +++ b/fs/squashfs/decompressor.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Squashfs - a compressed read only filesystem for Linux | 4 | * Squashfs - a compressed read only filesystem for Linux |
5 | * | 5 | * |
6 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 | 6 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 |
7 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 7 | * Phillip Lougher <phillip@squashfs.org.uk> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/squashfs/dir.c b/fs/squashfs/dir.c index 3f79cd1d0c19..9dfe2ce0fb70 100644 --- a/fs/squashfs/dir.c +++ b/fs/squashfs/dir.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/squashfs/export.c b/fs/squashfs/export.c index 7f93d5a9ee05..730c56248c9b 100644 --- a/fs/squashfs/export.c +++ b/fs/squashfs/export.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
@@ -121,30 +121,38 @@ static struct dentry *squashfs_get_parent(struct dentry *child) | |||
121 | * Read uncompressed inode lookup table indexes off disk into memory | 121 | * Read uncompressed inode lookup table indexes off disk into memory |
122 | */ | 122 | */ |
123 | __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, | 123 | __le64 *squashfs_read_inode_lookup_table(struct super_block *sb, |
124 | u64 lookup_table_start, unsigned int inodes) | 124 | u64 lookup_table_start, u64 next_table, unsigned int inodes) |
125 | { | 125 | { |
126 | unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); | 126 | unsigned int length = SQUASHFS_LOOKUP_BLOCK_BYTES(inodes); |
127 | __le64 *inode_lookup_table; | 127 | __le64 *table; |
128 | int err; | ||
129 | 128 | ||
130 | TRACE("In read_inode_lookup_table, length %d\n", length); | 129 | TRACE("In read_inode_lookup_table, length %d\n", length); |
131 | 130 | ||
132 | /* Allocate inode lookup table indexes */ | 131 | /* Sanity check values */ |
133 | inode_lookup_table = kmalloc(length, GFP_KERNEL); | 132 | |
134 | if (inode_lookup_table == NULL) { | 133 | /* there should always be at least one inode */ |
135 | ERROR("Failed to allocate inode lookup table\n"); | 134 | if (inodes == 0) |
136 | return ERR_PTR(-ENOMEM); | 135 | return ERR_PTR(-EINVAL); |
137 | } | 136 | |
137 | /* length bytes should not extend into the next table - this check | ||
138 | * also traps instances where lookup_table_start is incorrectly larger | ||
139 | * than the next table start | ||
140 | */ | ||
141 | if (lookup_table_start + length > next_table) | ||
142 | return ERR_PTR(-EINVAL); | ||
143 | |||
144 | table = squashfs_read_table(sb, lookup_table_start, length); | ||
138 | 145 | ||
139 | err = squashfs_read_table(sb, inode_lookup_table, lookup_table_start, | 146 | /* |
140 | length); | 147 | * table[0] points to the first inode lookup table metadata block, |
141 | if (err < 0) { | 148 | * this should be less than lookup_table_start |
142 | ERROR("unable to read inode lookup table\n"); | 149 | */ |
143 | kfree(inode_lookup_table); | 150 | if (!IS_ERR(table) && table[0] >= lookup_table_start) { |
144 | return ERR_PTR(err); | 151 | kfree(table); |
152 | return ERR_PTR(-EINVAL); | ||
145 | } | 153 | } |
146 | 154 | ||
147 | return inode_lookup_table; | 155 | return table; |
148 | } | 156 | } |
149 | 157 | ||
150 | 158 | ||
diff --git a/fs/squashfs/file.c b/fs/squashfs/file.c index a25c5060bdcb..38bb1c640559 100644 --- a/fs/squashfs/file.c +++ b/fs/squashfs/file.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/squashfs/fragment.c b/fs/squashfs/fragment.c index 7eef571443c6..1516a6490bfb 100644 --- a/fs/squashfs/fragment.c +++ b/fs/squashfs/fragment.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
@@ -71,26 +71,29 @@ int squashfs_frag_lookup(struct super_block *sb, unsigned int fragment, | |||
71 | * Read the uncompressed fragment lookup table indexes off disk into memory | 71 | * Read the uncompressed fragment lookup table indexes off disk into memory |
72 | */ | 72 | */ |
73 | __le64 *squashfs_read_fragment_index_table(struct super_block *sb, | 73 | __le64 *squashfs_read_fragment_index_table(struct super_block *sb, |
74 | u64 fragment_table_start, unsigned int fragments) | 74 | u64 fragment_table_start, u64 next_table, unsigned int fragments) |
75 | { | 75 | { |
76 | unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments); | 76 | unsigned int length = SQUASHFS_FRAGMENT_INDEX_BYTES(fragments); |
77 | __le64 *fragment_index; | 77 | __le64 *table; |
78 | int err; | ||
79 | 78 | ||
80 | /* Allocate fragment lookup table indexes */ | 79 | /* |
81 | fragment_index = kmalloc(length, GFP_KERNEL); | 80 | * Sanity check, length bytes should not extend into the next table - |
82 | if (fragment_index == NULL) { | 81 | * this check also traps instances where fragment_table_start is |
83 | ERROR("Failed to allocate fragment index table\n"); | 82 | * incorrectly larger than the next table start |
84 | return ERR_PTR(-ENOMEM); | 83 | */ |
85 | } | 84 | if (fragment_table_start + length > next_table) |
85 | return ERR_PTR(-EINVAL); | ||
86 | |||
87 | table = squashfs_read_table(sb, fragment_table_start, length); | ||
86 | 88 | ||
87 | err = squashfs_read_table(sb, fragment_index, fragment_table_start, | 89 | /* |
88 | length); | 90 | * table[0] points to the first fragment table metadata block, this |
89 | if (err < 0) { | 91 | * should be less than fragment_table_start |
90 | ERROR("unable to read fragment index table\n"); | 92 | */ |
91 | kfree(fragment_index); | 93 | if (!IS_ERR(table) && table[0] >= fragment_table_start) { |
92 | return ERR_PTR(err); | 94 | kfree(table); |
95 | return ERR_PTR(-EINVAL); | ||
93 | } | 96 | } |
94 | 97 | ||
95 | return fragment_index; | 98 | return table; |
96 | } | 99 | } |
diff --git a/fs/squashfs/id.c b/fs/squashfs/id.c index d8f32452638e..a70858e0fb44 100644 --- a/fs/squashfs/id.c +++ b/fs/squashfs/id.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
@@ -66,27 +66,37 @@ int squashfs_get_id(struct super_block *sb, unsigned int index, | |||
66 | * Read uncompressed id lookup table indexes from disk into memory | 66 | * Read uncompressed id lookup table indexes from disk into memory |
67 | */ | 67 | */ |
68 | __le64 *squashfs_read_id_index_table(struct super_block *sb, | 68 | __le64 *squashfs_read_id_index_table(struct super_block *sb, |
69 | u64 id_table_start, unsigned short no_ids) | 69 | u64 id_table_start, u64 next_table, unsigned short no_ids) |
70 | { | 70 | { |
71 | unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); | 71 | unsigned int length = SQUASHFS_ID_BLOCK_BYTES(no_ids); |
72 | __le64 *id_table; | 72 | __le64 *table; |
73 | int err; | ||
74 | 73 | ||
75 | TRACE("In read_id_index_table, length %d\n", length); | 74 | TRACE("In read_id_index_table, length %d\n", length); |
76 | 75 | ||
77 | /* Allocate id lookup table indexes */ | 76 | /* Sanity check values */ |
78 | id_table = kmalloc(length, GFP_KERNEL); | 77 | |
79 | if (id_table == NULL) { | 78 | /* there should always be at least one id */ |
80 | ERROR("Failed to allocate id index table\n"); | 79 | if (no_ids == 0) |
81 | return ERR_PTR(-ENOMEM); | 80 | return ERR_PTR(-EINVAL); |
82 | } | 81 | |
82 | /* | ||
83 | * length bytes should not extend into the next table - this check | ||
84 | * also traps instances where id_table_start is incorrectly larger | ||
85 | * than the next table start | ||
86 | */ | ||
87 | if (id_table_start + length > next_table) | ||
88 | return ERR_PTR(-EINVAL); | ||
89 | |||
90 | table = squashfs_read_table(sb, id_table_start, length); | ||
83 | 91 | ||
84 | err = squashfs_read_table(sb, id_table, id_table_start, length); | 92 | /* |
85 | if (err < 0) { | 93 | * table[0] points to the first id lookup table metadata block, this |
86 | ERROR("unable to read id index table\n"); | 94 | * should be less than id_table_start |
87 | kfree(id_table); | 95 | */ |
88 | return ERR_PTR(err); | 96 | if (!IS_ERR(table) && table[0] >= id_table_start) { |
97 | kfree(table); | ||
98 | return ERR_PTR(-EINVAL); | ||
89 | } | 99 | } |
90 | 100 | ||
91 | return id_table; | 101 | return table; |
92 | } | 102 | } |
diff --git a/fs/squashfs/inode.c b/fs/squashfs/inode.c index 62e63ad25075..04bebcaa2373 100644 --- a/fs/squashfs/inode.c +++ b/fs/squashfs/inode.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/squashfs/namei.c b/fs/squashfs/namei.c index 5d922a6701ab..4bc63ac64bc0 100644 --- a/fs/squashfs/namei.c +++ b/fs/squashfs/namei.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/squashfs/squashfs.h b/fs/squashfs/squashfs.h index 1f2e608b8785..e3be6a71cfa7 100644 --- a/fs/squashfs/squashfs.h +++ b/fs/squashfs/squashfs.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
@@ -44,24 +44,24 @@ extern struct squashfs_cache_entry *squashfs_get_fragment(struct super_block *, | |||
44 | u64, int); | 44 | u64, int); |
45 | extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *, | 45 | extern struct squashfs_cache_entry *squashfs_get_datablock(struct super_block *, |
46 | u64, int); | 46 | u64, int); |
47 | extern int squashfs_read_table(struct super_block *, void *, u64, int); | 47 | extern void *squashfs_read_table(struct super_block *, u64, int); |
48 | 48 | ||
49 | /* decompressor.c */ | 49 | /* decompressor.c */ |
50 | extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int); | 50 | extern const struct squashfs_decompressor *squashfs_lookup_decompressor(int); |
51 | extern void *squashfs_decompressor_init(struct super_block *, unsigned short); | 51 | extern void *squashfs_decompressor_init(struct super_block *, unsigned short); |
52 | 52 | ||
53 | /* export.c */ | 53 | /* export.c */ |
54 | extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, | 54 | extern __le64 *squashfs_read_inode_lookup_table(struct super_block *, u64, u64, |
55 | unsigned int); | 55 | unsigned int); |
56 | 56 | ||
57 | /* fragment.c */ | 57 | /* fragment.c */ |
58 | extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *); | 58 | extern int squashfs_frag_lookup(struct super_block *, unsigned int, u64 *); |
59 | extern __le64 *squashfs_read_fragment_index_table(struct super_block *, | 59 | extern __le64 *squashfs_read_fragment_index_table(struct super_block *, |
60 | u64, unsigned int); | 60 | u64, u64, unsigned int); |
61 | 61 | ||
62 | /* id.c */ | 62 | /* id.c */ |
63 | extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); | 63 | extern int squashfs_get_id(struct super_block *, unsigned int, unsigned int *); |
64 | extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, | 64 | extern __le64 *squashfs_read_id_index_table(struct super_block *, u64, u64, |
65 | unsigned short); | 65 | unsigned short); |
66 | 66 | ||
67 | /* inode.c */ | 67 | /* inode.c */ |
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h index 4582c568ef4d..b4a4e539a08c 100644 --- a/fs/squashfs/squashfs_fs.h +++ b/fs/squashfs/squashfs_fs.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Squashfs | 4 | * Squashfs |
5 | * | 5 | * |
6 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 6 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
7 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 7 | * Phillip Lougher <phillip@squashfs.org.uk> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/squashfs/squashfs_fs_i.h b/fs/squashfs/squashfs_fs_i.h index 359baefc01fc..73588e7700ed 100644 --- a/fs/squashfs/squashfs_fs_i.h +++ b/fs/squashfs/squashfs_fs_i.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Squashfs | 4 | * Squashfs |
5 | * | 5 | * |
6 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 6 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
7 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 7 | * Phillip Lougher <phillip@squashfs.org.uk> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/squashfs/squashfs_fs_sb.h b/fs/squashfs/squashfs_fs_sb.h index d9037a5215f0..651f0b31d296 100644 --- a/fs/squashfs/squashfs_fs_sb.h +++ b/fs/squashfs/squashfs_fs_sb.h | |||
@@ -4,7 +4,7 @@ | |||
4 | * Squashfs | 4 | * Squashfs |
5 | * | 5 | * |
6 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 6 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
7 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 7 | * Phillip Lougher <phillip@squashfs.org.uk> |
8 | * | 8 | * |
9 | * This program is free software; you can redistribute it and/or | 9 | * This program is free software; you can redistribute it and/or |
10 | * modify it under the terms of the GNU General Public License | 10 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/squashfs/super.c b/fs/squashfs/super.c index 5c8184c061a4..6f26abee3597 100644 --- a/fs/squashfs/super.c +++ b/fs/squashfs/super.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
@@ -83,7 +83,7 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) | |||
83 | long long root_inode; | 83 | long long root_inode; |
84 | unsigned short flags; | 84 | unsigned short flags; |
85 | unsigned int fragments; | 85 | unsigned int fragments; |
86 | u64 lookup_table_start, xattr_id_table_start; | 86 | u64 lookup_table_start, xattr_id_table_start, next_table; |
87 | int err; | 87 | int err; |
88 | 88 | ||
89 | TRACE("Entered squashfs_fill_superblock\n"); | 89 | TRACE("Entered squashfs_fill_superblock\n"); |
@@ -95,12 +95,6 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) | |||
95 | } | 95 | } |
96 | msblk = sb->s_fs_info; | 96 | msblk = sb->s_fs_info; |
97 | 97 | ||
98 | sblk = kzalloc(sizeof(*sblk), GFP_KERNEL); | ||
99 | if (sblk == NULL) { | ||
100 | ERROR("Failed to allocate squashfs_super_block\n"); | ||
101 | goto failure; | ||
102 | } | ||
103 | |||
104 | msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE); | 98 | msblk->devblksize = sb_min_blocksize(sb, BLOCK_SIZE); |
105 | msblk->devblksize_log2 = ffz(~msblk->devblksize); | 99 | msblk->devblksize_log2 = ffz(~msblk->devblksize); |
106 | 100 | ||
@@ -114,10 +108,12 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) | |||
114 | * of bytes_used) we need to set it to an initial sensible dummy value | 108 | * of bytes_used) we need to set it to an initial sensible dummy value |
115 | */ | 109 | */ |
116 | msblk->bytes_used = sizeof(*sblk); | 110 | msblk->bytes_used = sizeof(*sblk); |
117 | err = squashfs_read_table(sb, sblk, SQUASHFS_START, sizeof(*sblk)); | 111 | sblk = squashfs_read_table(sb, SQUASHFS_START, sizeof(*sblk)); |
118 | 112 | ||
119 | if (err < 0) { | 113 | if (IS_ERR(sblk)) { |
120 | ERROR("unable to read squashfs_super_block\n"); | 114 | ERROR("unable to read squashfs_super_block\n"); |
115 | err = PTR_ERR(sblk); | ||
116 | sblk = NULL; | ||
121 | goto failed_mount; | 117 | goto failed_mount; |
122 | } | 118 | } |
123 | 119 | ||
@@ -218,18 +214,61 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) | |||
218 | goto failed_mount; | 214 | goto failed_mount; |
219 | } | 215 | } |
220 | 216 | ||
217 | /* Handle xattrs */ | ||
218 | sb->s_xattr = squashfs_xattr_handlers; | ||
219 | xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start); | ||
220 | if (xattr_id_table_start == SQUASHFS_INVALID_BLK) { | ||
221 | next_table = msblk->bytes_used; | ||
222 | goto allocate_id_index_table; | ||
223 | } | ||
224 | |||
225 | /* Allocate and read xattr id lookup table */ | ||
226 | msblk->xattr_id_table = squashfs_read_xattr_id_table(sb, | ||
227 | xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids); | ||
228 | if (IS_ERR(msblk->xattr_id_table)) { | ||
229 | ERROR("unable to read xattr id index table\n"); | ||
230 | err = PTR_ERR(msblk->xattr_id_table); | ||
231 | msblk->xattr_id_table = NULL; | ||
232 | if (err != -ENOTSUPP) | ||
233 | goto failed_mount; | ||
234 | } | ||
235 | next_table = msblk->xattr_table; | ||
236 | |||
237 | allocate_id_index_table: | ||
221 | /* Allocate and read id index table */ | 238 | /* Allocate and read id index table */ |
222 | msblk->id_table = squashfs_read_id_index_table(sb, | 239 | msblk->id_table = squashfs_read_id_index_table(sb, |
223 | le64_to_cpu(sblk->id_table_start), le16_to_cpu(sblk->no_ids)); | 240 | le64_to_cpu(sblk->id_table_start), next_table, |
241 | le16_to_cpu(sblk->no_ids)); | ||
224 | if (IS_ERR(msblk->id_table)) { | 242 | if (IS_ERR(msblk->id_table)) { |
243 | ERROR("unable to read id index table\n"); | ||
225 | err = PTR_ERR(msblk->id_table); | 244 | err = PTR_ERR(msblk->id_table); |
226 | msblk->id_table = NULL; | 245 | msblk->id_table = NULL; |
227 | goto failed_mount; | 246 | goto failed_mount; |
228 | } | 247 | } |
248 | next_table = msblk->id_table[0]; | ||
249 | |||
250 | /* Handle inode lookup table */ | ||
251 | lookup_table_start = le64_to_cpu(sblk->lookup_table_start); | ||
252 | if (lookup_table_start == SQUASHFS_INVALID_BLK) | ||
253 | goto handle_fragments; | ||
254 | |||
255 | /* Allocate and read inode lookup table */ | ||
256 | msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb, | ||
257 | lookup_table_start, next_table, msblk->inodes); | ||
258 | if (IS_ERR(msblk->inode_lookup_table)) { | ||
259 | ERROR("unable to read inode lookup table\n"); | ||
260 | err = PTR_ERR(msblk->inode_lookup_table); | ||
261 | msblk->inode_lookup_table = NULL; | ||
262 | goto failed_mount; | ||
263 | } | ||
264 | next_table = msblk->inode_lookup_table[0]; | ||
229 | 265 | ||
266 | sb->s_export_op = &squashfs_export_ops; | ||
267 | |||
268 | handle_fragments: | ||
230 | fragments = le32_to_cpu(sblk->fragments); | 269 | fragments = le32_to_cpu(sblk->fragments); |
231 | if (fragments == 0) | 270 | if (fragments == 0) |
232 | goto allocate_lookup_table; | 271 | goto check_directory_table; |
233 | 272 | ||
234 | msblk->fragment_cache = squashfs_cache_init("fragment", | 273 | msblk->fragment_cache = squashfs_cache_init("fragment", |
235 | SQUASHFS_CACHED_FRAGMENTS, msblk->block_size); | 274 | SQUASHFS_CACHED_FRAGMENTS, msblk->block_size); |
@@ -240,45 +279,29 @@ static int squashfs_fill_super(struct super_block *sb, void *data, int silent) | |||
240 | 279 | ||
241 | /* Allocate and read fragment index table */ | 280 | /* Allocate and read fragment index table */ |
242 | msblk->fragment_index = squashfs_read_fragment_index_table(sb, | 281 | msblk->fragment_index = squashfs_read_fragment_index_table(sb, |
243 | le64_to_cpu(sblk->fragment_table_start), fragments); | 282 | le64_to_cpu(sblk->fragment_table_start), next_table, fragments); |
244 | if (IS_ERR(msblk->fragment_index)) { | 283 | if (IS_ERR(msblk->fragment_index)) { |
284 | ERROR("unable to read fragment index table\n"); | ||
245 | err = PTR_ERR(msblk->fragment_index); | 285 | err = PTR_ERR(msblk->fragment_index); |
246 | msblk->fragment_index = NULL; | 286 | msblk->fragment_index = NULL; |
247 | goto failed_mount; | 287 | goto failed_mount; |
248 | } | 288 | } |
289 | next_table = msblk->fragment_index[0]; | ||
249 | 290 | ||
250 | allocate_lookup_table: | 291 | check_directory_table: |
251 | lookup_table_start = le64_to_cpu(sblk->lookup_table_start); | 292 | /* Sanity check directory_table */ |
252 | if (lookup_table_start == SQUASHFS_INVALID_BLK) | 293 | if (msblk->directory_table >= next_table) { |
253 | goto allocate_xattr_table; | 294 | err = -EINVAL; |
254 | |||
255 | /* Allocate and read inode lookup table */ | ||
256 | msblk->inode_lookup_table = squashfs_read_inode_lookup_table(sb, | ||
257 | lookup_table_start, msblk->inodes); | ||
258 | if (IS_ERR(msblk->inode_lookup_table)) { | ||
259 | err = PTR_ERR(msblk->inode_lookup_table); | ||
260 | msblk->inode_lookup_table = NULL; | ||
261 | goto failed_mount; | 295 | goto failed_mount; |
262 | } | 296 | } |
263 | 297 | ||
264 | sb->s_export_op = &squashfs_export_ops; | 298 | /* Sanity check inode_table */ |
265 | 299 | if (msblk->inode_table >= msblk->directory_table) { | |
266 | allocate_xattr_table: | 300 | err = -EINVAL; |
267 | sb->s_xattr = squashfs_xattr_handlers; | 301 | goto failed_mount; |
268 | xattr_id_table_start = le64_to_cpu(sblk->xattr_id_table_start); | ||
269 | if (xattr_id_table_start == SQUASHFS_INVALID_BLK) | ||
270 | goto allocate_root; | ||
271 | |||
272 | /* Allocate and read xattr id lookup table */ | ||
273 | msblk->xattr_id_table = squashfs_read_xattr_id_table(sb, | ||
274 | xattr_id_table_start, &msblk->xattr_table, &msblk->xattr_ids); | ||
275 | if (IS_ERR(msblk->xattr_id_table)) { | ||
276 | err = PTR_ERR(msblk->xattr_id_table); | ||
277 | msblk->xattr_id_table = NULL; | ||
278 | if (err != -ENOTSUPP) | ||
279 | goto failed_mount; | ||
280 | } | 302 | } |
281 | allocate_root: | 303 | |
304 | /* allocate root */ | ||
282 | root = new_inode(sb); | 305 | root = new_inode(sb); |
283 | if (!root) { | 306 | if (!root) { |
284 | err = -ENOMEM; | 307 | err = -ENOMEM; |
@@ -318,11 +341,6 @@ failed_mount: | |||
318 | sb->s_fs_info = NULL; | 341 | sb->s_fs_info = NULL; |
319 | kfree(sblk); | 342 | kfree(sblk); |
320 | return err; | 343 | return err; |
321 | |||
322 | failure: | ||
323 | kfree(sb->s_fs_info); | ||
324 | sb->s_fs_info = NULL; | ||
325 | return -ENOMEM; | ||
326 | } | 344 | } |
327 | 345 | ||
328 | 346 | ||
@@ -475,5 +493,5 @@ static const struct super_operations squashfs_super_ops = { | |||
475 | module_init(init_squashfs_fs); | 493 | module_init(init_squashfs_fs); |
476 | module_exit(exit_squashfs_fs); | 494 | module_exit(exit_squashfs_fs); |
477 | MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem"); | 495 | MODULE_DESCRIPTION("squashfs 4.0, a compressed read-only filesystem"); |
478 | MODULE_AUTHOR("Phillip Lougher <phillip@lougher.demon.co.uk>"); | 496 | MODULE_AUTHOR("Phillip Lougher <phillip@squashfs.org.uk>"); |
479 | MODULE_LICENSE("GPL"); | 497 | MODULE_LICENSE("GPL"); |
diff --git a/fs/squashfs/symlink.c b/fs/squashfs/symlink.c index ec86434921e1..1191817264cc 100644 --- a/fs/squashfs/symlink.c +++ b/fs/squashfs/symlink.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/squashfs/xattr.c b/fs/squashfs/xattr.c index 3876c36699a1..92fcde7b4d61 100644 --- a/fs/squashfs/xattr.c +++ b/fs/squashfs/xattr.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2010 | 4 | * Copyright (c) 2010 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/squashfs/xattr.h b/fs/squashfs/xattr.h index b634efce4bde..c83f5d9ec125 100644 --- a/fs/squashfs/xattr.h +++ b/fs/squashfs/xattr.h | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2010 | 4 | * Copyright (c) 2010 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
@@ -31,6 +31,7 @@ static inline __le64 *squashfs_read_xattr_id_table(struct super_block *sb, | |||
31 | u64 start, u64 *xattr_table_start, int *xattr_ids) | 31 | u64 start, u64 *xattr_table_start, int *xattr_ids) |
32 | { | 32 | { |
33 | ERROR("Xattrs in filesystem, these will be ignored\n"); | 33 | ERROR("Xattrs in filesystem, these will be ignored\n"); |
34 | *xattr_table_start = start; | ||
34 | return ERR_PTR(-ENOTSUPP); | 35 | return ERR_PTR(-ENOTSUPP); |
35 | } | 36 | } |
36 | 37 | ||
diff --git a/fs/squashfs/xattr_id.c b/fs/squashfs/xattr_id.c index 05385dbe1465..c89607d690c4 100644 --- a/fs/squashfs/xattr_id.c +++ b/fs/squashfs/xattr_id.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2010 | 4 | * Copyright (c) 2010 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
@@ -67,34 +67,29 @@ __le64 *squashfs_read_xattr_id_table(struct super_block *sb, u64 start, | |||
67 | u64 *xattr_table_start, int *xattr_ids) | 67 | u64 *xattr_table_start, int *xattr_ids) |
68 | { | 68 | { |
69 | unsigned int len; | 69 | unsigned int len; |
70 | __le64 *xid_table; | 70 | struct squashfs_xattr_id_table *id_table; |
71 | struct squashfs_xattr_id_table id_table; | 71 | |
72 | int err; | 72 | id_table = squashfs_read_table(sb, start, sizeof(*id_table)); |
73 | if (IS_ERR(id_table)) | ||
74 | return (__le64 *) id_table; | ||
75 | |||
76 | *xattr_table_start = le64_to_cpu(id_table->xattr_table_start); | ||
77 | *xattr_ids = le32_to_cpu(id_table->xattr_ids); | ||
78 | kfree(id_table); | ||
79 | |||
80 | /* Sanity check values */ | ||
81 | |||
82 | /* there is always at least one xattr id */ | ||
83 | if (*xattr_ids == 0) | ||
84 | return ERR_PTR(-EINVAL); | ||
85 | |||
86 | /* xattr_table should be less than start */ | ||
87 | if (*xattr_table_start >= start) | ||
88 | return ERR_PTR(-EINVAL); | ||
73 | 89 | ||
74 | err = squashfs_read_table(sb, &id_table, start, sizeof(id_table)); | ||
75 | if (err < 0) { | ||
76 | ERROR("unable to read xattr id table\n"); | ||
77 | return ERR_PTR(err); | ||
78 | } | ||
79 | *xattr_table_start = le64_to_cpu(id_table.xattr_table_start); | ||
80 | *xattr_ids = le32_to_cpu(id_table.xattr_ids); | ||
81 | len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); | 90 | len = SQUASHFS_XATTR_BLOCK_BYTES(*xattr_ids); |
82 | 91 | ||
83 | TRACE("In read_xattr_index_table, length %d\n", len); | 92 | TRACE("In read_xattr_index_table, length %d\n", len); |
84 | 93 | ||
85 | /* Allocate xattr id lookup table indexes */ | 94 | return squashfs_read_table(sb, start + sizeof(*id_table), len); |
86 | xid_table = kmalloc(len, GFP_KERNEL); | ||
87 | if (xid_table == NULL) { | ||
88 | ERROR("Failed to allocate xattr id index table\n"); | ||
89 | return ERR_PTR(-ENOMEM); | ||
90 | } | ||
91 | |||
92 | err = squashfs_read_table(sb, xid_table, start + sizeof(id_table), len); | ||
93 | if (err < 0) { | ||
94 | ERROR("unable to read xattr id index table\n"); | ||
95 | kfree(xid_table); | ||
96 | return ERR_PTR(err); | ||
97 | } | ||
98 | |||
99 | return xid_table; | ||
100 | } | 95 | } |
diff --git a/fs/squashfs/xz_wrapper.c b/fs/squashfs/xz_wrapper.c index aa47a286d1f8..1760b7d108f6 100644 --- a/fs/squashfs/xz_wrapper.c +++ b/fs/squashfs/xz_wrapper.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/squashfs/zlib_wrapper.c b/fs/squashfs/zlib_wrapper.c index 517688b32ffa..55d918fd2d86 100644 --- a/fs/squashfs/zlib_wrapper.c +++ b/fs/squashfs/zlib_wrapper.c | |||
@@ -2,7 +2,7 @@ | |||
2 | * Squashfs - a compressed read only filesystem for Linux | 2 | * Squashfs - a compressed read only filesystem for Linux |
3 | * | 3 | * |
4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 | 4 | * Copyright (c) 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009 |
5 | * Phillip Lougher <phillip@lougher.demon.co.uk> | 5 | * Phillip Lougher <phillip@squashfs.org.uk> |
6 | * | 6 | * |
7 | * This program is free software; you can redistribute it and/or | 7 | * This program is free software; you can redistribute it and/or |
8 | * modify it under the terms of the GNU General Public License | 8 | * modify it under the terms of the GNU General Public License |
diff --git a/fs/super.c b/fs/super.c index c04f7e0b7ed2..c75593953c52 100644 --- a/fs/super.c +++ b/fs/super.c | |||
@@ -31,6 +31,7 @@ | |||
31 | #include <linux/mutex.h> | 31 | #include <linux/mutex.h> |
32 | #include <linux/backing-dev.h> | 32 | #include <linux/backing-dev.h> |
33 | #include <linux/rculist_bl.h> | 33 | #include <linux/rculist_bl.h> |
34 | #include <linux/cleancache.h> | ||
34 | #include "internal.h" | 35 | #include "internal.h" |
35 | 36 | ||
36 | 37 | ||
@@ -112,6 +113,7 @@ static struct super_block *alloc_super(struct file_system_type *type) | |||
112 | s->s_maxbytes = MAX_NON_LFS; | 113 | s->s_maxbytes = MAX_NON_LFS; |
113 | s->s_op = &default_op; | 114 | s->s_op = &default_op; |
114 | s->s_time_gran = 1000000000; | 115 | s->s_time_gran = 1000000000; |
116 | s->cleancache_poolid = -1; | ||
115 | } | 117 | } |
116 | out: | 118 | out: |
117 | return s; | 119 | return s; |
@@ -177,6 +179,7 @@ void deactivate_locked_super(struct super_block *s) | |||
177 | { | 179 | { |
178 | struct file_system_type *fs = s->s_type; | 180 | struct file_system_type *fs = s->s_type; |
179 | if (atomic_dec_and_test(&s->s_active)) { | 181 | if (atomic_dec_and_test(&s->s_active)) { |
182 | cleancache_flush_fs(s); | ||
180 | fs->kill_sb(s); | 183 | fs->kill_sb(s); |
181 | /* | 184 | /* |
182 | * We need to call rcu_barrier so all the delayed rcu free | 185 | * We need to call rcu_barrier so all the delayed rcu free |
diff --git a/fs/sysv/namei.c b/fs/sysv/namei.c index e474fbcf8bde..e2cc6756f3b1 100644 --- a/fs/sysv/namei.c +++ b/fs/sysv/namei.c | |||
@@ -196,6 +196,8 @@ static int sysv_rmdir(struct inode * dir, struct dentry * dentry) | |||
196 | struct inode *inode = dentry->d_inode; | 196 | struct inode *inode = dentry->d_inode; |
197 | int err = -ENOTEMPTY; | 197 | int err = -ENOTEMPTY; |
198 | 198 | ||
199 | dentry_unhash(dentry); | ||
200 | |||
199 | if (sysv_empty_dir(inode)) { | 201 | if (sysv_empty_dir(inode)) { |
200 | err = sysv_unlink(dir, dentry); | 202 | err = sysv_unlink(dir, dentry); |
201 | if (!err) { | 203 | if (!err) { |
@@ -222,6 +224,9 @@ static int sysv_rename(struct inode * old_dir, struct dentry * old_dentry, | |||
222 | struct sysv_dir_entry * old_de; | 224 | struct sysv_dir_entry * old_de; |
223 | int err = -ENOENT; | 225 | int err = -ENOENT; |
224 | 226 | ||
227 | if (new_inode && S_ISDIR(new_inode->i_mode)) | ||
228 | dentry_unhash(new_dentry); | ||
229 | |||
225 | old_de = sysv_find_entry(old_dentry, &old_page); | 230 | old_de = sysv_find_entry(old_dentry, &old_page); |
226 | if (!old_de) | 231 | if (!old_de) |
227 | goto out; | 232 | goto out; |
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index ef5abd38f0bf..c2b80943560d 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c | |||
@@ -656,6 +656,8 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry) | |||
656 | struct ubifs_inode *dir_ui = ubifs_inode(dir); | 656 | struct ubifs_inode *dir_ui = ubifs_inode(dir); |
657 | struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 }; | 657 | struct ubifs_budget_req req = { .mod_dent = 1, .dirtied_ino = 2 }; |
658 | 658 | ||
659 | dentry_unhash(dentry); | ||
660 | |||
659 | /* | 661 | /* |
660 | * Budget request settings: deletion direntry, deletion inode and | 662 | * Budget request settings: deletion direntry, deletion inode and |
661 | * changing the parent inode. If budgeting fails, go ahead anyway | 663 | * changing the parent inode. If budgeting fails, go ahead anyway |
@@ -976,6 +978,9 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
976 | .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) }; | 978 | .dirtied_ino_d = ALIGN(old_inode_ui->data_len, 8) }; |
977 | struct timespec time; | 979 | struct timespec time; |
978 | 980 | ||
981 | if (new_inode && S_ISDIR(new_inode->i_mode)) | ||
982 | dentry_unhash(new_dentry); | ||
983 | |||
979 | /* | 984 | /* |
980 | * Budget request settings: deletion direntry, new direntry, removing | 985 | * Budget request settings: deletion direntry, new direntry, removing |
981 | * the old inode, and changing old and new parent directory inodes. | 986 | * the old inode, and changing old and new parent directory inodes. |
diff --git a/fs/udf/namei.c b/fs/udf/namei.c index f1dce848ef96..4d76594c2a8f 100644 --- a/fs/udf/namei.c +++ b/fs/udf/namei.c | |||
@@ -783,6 +783,8 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry) | |||
783 | struct fileIdentDesc *fi, cfi; | 783 | struct fileIdentDesc *fi, cfi; |
784 | struct kernel_lb_addr tloc; | 784 | struct kernel_lb_addr tloc; |
785 | 785 | ||
786 | dentry_unhash(dentry); | ||
787 | |||
786 | retval = -ENOENT; | 788 | retval = -ENOENT; |
787 | fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); | 789 | fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); |
788 | if (!fi) | 790 | if (!fi) |
@@ -1081,6 +1083,9 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
1081 | struct kernel_lb_addr tloc; | 1083 | struct kernel_lb_addr tloc; |
1082 | struct udf_inode_info *old_iinfo = UDF_I(old_inode); | 1084 | struct udf_inode_info *old_iinfo = UDF_I(old_inode); |
1083 | 1085 | ||
1086 | if (new_inode && S_ISDIR(new_inode->i_mode)) | ||
1087 | dentry_unhash(new_dentry); | ||
1088 | |||
1084 | ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); | 1089 | ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); |
1085 | if (ofi) { | 1090 | if (ofi) { |
1086 | if (ofibh.sbh != ofibh.ebh) | 1091 | if (ofibh.sbh != ofibh.ebh) |
diff --git a/fs/ufs/balloc.c b/fs/ufs/balloc.c index 46f7a807bbc1..42694e11c23d 100644 --- a/fs/ufs/balloc.c +++ b/fs/ufs/balloc.c | |||
@@ -424,8 +424,7 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
424 | ufs_cpu_to_data_ptr(sb, p, result); | 424 | ufs_cpu_to_data_ptr(sb, p, result); |
425 | *err = 0; | 425 | *err = 0; |
426 | UFS_I(inode)->i_lastfrag = | 426 | UFS_I(inode)->i_lastfrag = |
427 | max_t(u32, UFS_I(inode)->i_lastfrag, | 427 | max(UFS_I(inode)->i_lastfrag, fragment + count); |
428 | fragment + count); | ||
429 | ufs_clear_frags(inode, result + oldcount, | 428 | ufs_clear_frags(inode, result + oldcount, |
430 | newcount - oldcount, locked_page != NULL); | 429 | newcount - oldcount, locked_page != NULL); |
431 | } | 430 | } |
@@ -440,7 +439,8 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
440 | result = ufs_add_fragments (inode, tmp, oldcount, newcount, err); | 439 | result = ufs_add_fragments (inode, tmp, oldcount, newcount, err); |
441 | if (result) { | 440 | if (result) { |
442 | *err = 0; | 441 | *err = 0; |
443 | UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); | 442 | UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, |
443 | fragment + count); | ||
444 | ufs_clear_frags(inode, result + oldcount, newcount - oldcount, | 444 | ufs_clear_frags(inode, result + oldcount, newcount - oldcount, |
445 | locked_page != NULL); | 445 | locked_page != NULL); |
446 | unlock_super(sb); | 446 | unlock_super(sb); |
@@ -479,7 +479,8 @@ u64 ufs_new_fragments(struct inode *inode, void *p, u64 fragment, | |||
479 | uspi->s_sbbase + result, locked_page); | 479 | uspi->s_sbbase + result, locked_page); |
480 | ufs_cpu_to_data_ptr(sb, p, result); | 480 | ufs_cpu_to_data_ptr(sb, p, result); |
481 | *err = 0; | 481 | *err = 0; |
482 | UFS_I(inode)->i_lastfrag = max_t(u32, UFS_I(inode)->i_lastfrag, fragment + count); | 482 | UFS_I(inode)->i_lastfrag = max(UFS_I(inode)->i_lastfrag, |
483 | fragment + count); | ||
483 | unlock_super(sb); | 484 | unlock_super(sb); |
484 | if (newcount < request) | 485 | if (newcount < request) |
485 | ufs_free_fragments (inode, result + newcount, request - newcount); | 486 | ufs_free_fragments (inode, result + newcount, request - newcount); |
diff --git a/fs/ufs/namei.c b/fs/ufs/namei.c index 29309e25417f..953ebdfc5bf7 100644 --- a/fs/ufs/namei.c +++ b/fs/ufs/namei.c | |||
@@ -258,6 +258,8 @@ static int ufs_rmdir (struct inode * dir, struct dentry *dentry) | |||
258 | struct inode * inode = dentry->d_inode; | 258 | struct inode * inode = dentry->d_inode; |
259 | int err= -ENOTEMPTY; | 259 | int err= -ENOTEMPTY; |
260 | 260 | ||
261 | dentry_unhash(dentry); | ||
262 | |||
261 | lock_ufs(dir->i_sb); | 263 | lock_ufs(dir->i_sb); |
262 | if (ufs_empty_dir (inode)) { | 264 | if (ufs_empty_dir (inode)) { |
263 | err = ufs_unlink(dir, dentry); | 265 | err = ufs_unlink(dir, dentry); |
@@ -282,6 +284,9 @@ static int ufs_rename(struct inode *old_dir, struct dentry *old_dentry, | |||
282 | struct ufs_dir_entry *old_de; | 284 | struct ufs_dir_entry *old_de; |
283 | int err = -ENOENT; | 285 | int err = -ENOENT; |
284 | 286 | ||
287 | if (new_inode && S_ISDIR(new_inode->i_mode)) | ||
288 | dentry_unhash(new_dentry); | ||
289 | |||
285 | old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page); | 290 | old_de = ufs_find_entry(old_dir, &old_dentry->d_name, &old_page); |
286 | if (!old_de) | 291 | if (!old_de) |
287 | goto out; | 292 | goto out; |
diff --git a/fs/ufs/truncate.c b/fs/ufs/truncate.c index 5f821dbc0579..f04f89fbd4d9 100644 --- a/fs/ufs/truncate.c +++ b/fs/ufs/truncate.c | |||
@@ -84,7 +84,7 @@ static int ufs_trunc_direct(struct inode *inode) | |||
84 | retry = 0; | 84 | retry = 0; |
85 | 85 | ||
86 | frag1 = DIRECT_FRAGMENT; | 86 | frag1 = DIRECT_FRAGMENT; |
87 | frag4 = min_t(u32, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag); | 87 | frag4 = min_t(u64, UFS_NDIR_FRAGMENT, ufsi->i_lastfrag); |
88 | frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1); | 88 | frag2 = ((frag1 & uspi->s_fpbmask) ? ((frag1 | uspi->s_fpbmask) + 1) : frag1); |
89 | frag3 = frag4 & ~uspi->s_fpbmask; | 89 | frag3 = frag4 & ~uspi->s_fpbmask; |
90 | block1 = block2 = 0; | 90 | block1 = block2 = 0; |
diff --git a/fs/xfs/linux-2.6/xfs_discard.c b/fs/xfs/linux-2.6/xfs_discard.c index d61611c88012..244e797dae32 100644 --- a/fs/xfs/linux-2.6/xfs_discard.c +++ b/fs/xfs/linux-2.6/xfs_discard.c | |||
@@ -191,3 +191,32 @@ xfs_ioc_trim( | |||
191 | return -XFS_ERROR(EFAULT); | 191 | return -XFS_ERROR(EFAULT); |
192 | return 0; | 192 | return 0; |
193 | } | 193 | } |
194 | |||
195 | int | ||
196 | xfs_discard_extents( | ||
197 | struct xfs_mount *mp, | ||
198 | struct list_head *list) | ||
199 | { | ||
200 | struct xfs_busy_extent *busyp; | ||
201 | int error = 0; | ||
202 | |||
203 | list_for_each_entry(busyp, list, list) { | ||
204 | trace_xfs_discard_extent(mp, busyp->agno, busyp->bno, | ||
205 | busyp->length); | ||
206 | |||
207 | error = -blkdev_issue_discard(mp->m_ddev_targp->bt_bdev, | ||
208 | XFS_AGB_TO_DADDR(mp, busyp->agno, busyp->bno), | ||
209 | XFS_FSB_TO_BB(mp, busyp->length), | ||
210 | GFP_NOFS, 0); | ||
211 | if (error && error != EOPNOTSUPP) { | ||
212 | xfs_info(mp, | ||
213 | "discard failed for extent [0x%llu,%u], error %d", | ||
214 | (unsigned long long)busyp->bno, | ||
215 | busyp->length, | ||
216 | error); | ||
217 | return error; | ||
218 | } | ||
219 | } | ||
220 | |||
221 | return 0; | ||
222 | } | ||
diff --git a/fs/xfs/linux-2.6/xfs_discard.h b/fs/xfs/linux-2.6/xfs_discard.h index e82b6dd3e127..344879aea646 100644 --- a/fs/xfs/linux-2.6/xfs_discard.h +++ b/fs/xfs/linux-2.6/xfs_discard.h | |||
@@ -2,7 +2,9 @@ | |||
2 | #define XFS_DISCARD_H 1 | 2 | #define XFS_DISCARD_H 1 |
3 | 3 | ||
4 | struct fstrim_range; | 4 | struct fstrim_range; |
5 | struct list_head; | ||
5 | 6 | ||
6 | extern int xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *); | 7 | extern int xfs_ioc_trim(struct xfs_mount *, struct fstrim_range __user *); |
8 | extern int xfs_discard_extents(struct xfs_mount *, struct list_head *); | ||
7 | 9 | ||
8 | #endif /* XFS_DISCARD_H */ | 10 | #endif /* XFS_DISCARD_H */ |
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c index b0aa59e51fd0..98b9c91fcdf1 100644 --- a/fs/xfs/linux-2.6/xfs_super.c +++ b/fs/xfs/linux-2.6/xfs_super.c | |||
@@ -110,8 +110,10 @@ mempool_t *xfs_ioend_pool; | |||
110 | #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ | 110 | #define MNTOPT_GQUOTANOENF "gqnoenforce"/* group quota limit enforcement */ |
111 | #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ | 111 | #define MNTOPT_PQUOTANOENF "pqnoenforce"/* project quota limit enforcement */ |
112 | #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ | 112 | #define MNTOPT_QUOTANOENF "qnoenforce" /* same as uqnoenforce */ |
113 | #define MNTOPT_DELAYLOG "delaylog" /* Delayed loging enabled */ | 113 | #define MNTOPT_DELAYLOG "delaylog" /* Delayed logging enabled */ |
114 | #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed loging disabled */ | 114 | #define MNTOPT_NODELAYLOG "nodelaylog" /* Delayed logging disabled */ |
115 | #define MNTOPT_DISCARD "discard" /* Discard unused blocks */ | ||
116 | #define MNTOPT_NODISCARD "nodiscard" /* Do not discard unused blocks */ | ||
115 | 117 | ||
116 | /* | 118 | /* |
117 | * Table driven mount option parser. | 119 | * Table driven mount option parser. |
@@ -355,6 +357,10 @@ xfs_parseargs( | |||
355 | mp->m_flags |= XFS_MOUNT_DELAYLOG; | 357 | mp->m_flags |= XFS_MOUNT_DELAYLOG; |
356 | } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { | 358 | } else if (!strcmp(this_char, MNTOPT_NODELAYLOG)) { |
357 | mp->m_flags &= ~XFS_MOUNT_DELAYLOG; | 359 | mp->m_flags &= ~XFS_MOUNT_DELAYLOG; |
360 | } else if (!strcmp(this_char, MNTOPT_DISCARD)) { | ||
361 | mp->m_flags |= XFS_MOUNT_DISCARD; | ||
362 | } else if (!strcmp(this_char, MNTOPT_NODISCARD)) { | ||
363 | mp->m_flags &= ~XFS_MOUNT_DISCARD; | ||
358 | } else if (!strcmp(this_char, "ihashsize")) { | 364 | } else if (!strcmp(this_char, "ihashsize")) { |
359 | xfs_warn(mp, | 365 | xfs_warn(mp, |
360 | "ihashsize no longer used, option is deprecated."); | 366 | "ihashsize no longer used, option is deprecated."); |
@@ -388,6 +394,13 @@ xfs_parseargs( | |||
388 | return EINVAL; | 394 | return EINVAL; |
389 | } | 395 | } |
390 | 396 | ||
397 | if ((mp->m_flags & XFS_MOUNT_DISCARD) && | ||
398 | !(mp->m_flags & XFS_MOUNT_DELAYLOG)) { | ||
399 | xfs_warn(mp, | ||
400 | "the discard option is incompatible with the nodelaylog option"); | ||
401 | return EINVAL; | ||
402 | } | ||
403 | |||
391 | #ifndef CONFIG_XFS_QUOTA | 404 | #ifndef CONFIG_XFS_QUOTA |
392 | if (XFS_IS_QUOTA_RUNNING(mp)) { | 405 | if (XFS_IS_QUOTA_RUNNING(mp)) { |
393 | xfs_warn(mp, "quota support not available in this kernel."); | 406 | xfs_warn(mp, "quota support not available in this kernel."); |
@@ -488,6 +501,7 @@ xfs_showargs( | |||
488 | { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, | 501 | { XFS_MOUNT_FILESTREAMS, "," MNTOPT_FILESTREAM }, |
489 | { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, | 502 | { XFS_MOUNT_GRPID, "," MNTOPT_GRPID }, |
490 | { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG }, | 503 | { XFS_MOUNT_DELAYLOG, "," MNTOPT_DELAYLOG }, |
504 | { XFS_MOUNT_DISCARD, "," MNTOPT_DISCARD }, | ||
491 | { 0, NULL } | 505 | { 0, NULL } |
492 | }; | 506 | }; |
493 | static struct proc_xfs_info xfs_info_unset[] = { | 507 | static struct proc_xfs_info xfs_info_unset[] = { |
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h index da0a561ffba2..6530769a999b 100644 --- a/fs/xfs/xfs_ag.h +++ b/fs/xfs/xfs_ag.h | |||
@@ -187,6 +187,9 @@ struct xfs_busy_extent { | |||
187 | xfs_agnumber_t agno; | 187 | xfs_agnumber_t agno; |
188 | xfs_agblock_t bno; | 188 | xfs_agblock_t bno; |
189 | xfs_extlen_t length; | 189 | xfs_extlen_t length; |
190 | unsigned int flags; | ||
191 | #define XFS_ALLOC_BUSY_DISCARDED 0x01 /* undergoing a discard op. */ | ||
192 | #define XFS_ALLOC_BUSY_SKIP_DISCARD 0x02 /* do not discard */ | ||
190 | }; | 193 | }; |
191 | 194 | ||
192 | /* | 195 | /* |
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c index acdced86413c..95862bbff56b 100644 --- a/fs/xfs/xfs_alloc.c +++ b/fs/xfs/xfs_alloc.c | |||
@@ -2469,7 +2469,7 @@ xfs_free_extent( | |||
2469 | 2469 | ||
2470 | error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); | 2470 | error = xfs_free_ag_extent(tp, args.agbp, args.agno, args.agbno, len, 0); |
2471 | if (!error) | 2471 | if (!error) |
2472 | xfs_alloc_busy_insert(tp, args.agno, args.agbno, len); | 2472 | xfs_alloc_busy_insert(tp, args.agno, args.agbno, len, 0); |
2473 | error0: | 2473 | error0: |
2474 | xfs_perag_put(args.pag); | 2474 | xfs_perag_put(args.pag); |
2475 | return error; | 2475 | return error; |
@@ -2480,7 +2480,8 @@ xfs_alloc_busy_insert( | |||
2480 | struct xfs_trans *tp, | 2480 | struct xfs_trans *tp, |
2481 | xfs_agnumber_t agno, | 2481 | xfs_agnumber_t agno, |
2482 | xfs_agblock_t bno, | 2482 | xfs_agblock_t bno, |
2483 | xfs_extlen_t len) | 2483 | xfs_extlen_t len, |
2484 | unsigned int flags) | ||
2484 | { | 2485 | { |
2485 | struct xfs_busy_extent *new; | 2486 | struct xfs_busy_extent *new; |
2486 | struct xfs_busy_extent *busyp; | 2487 | struct xfs_busy_extent *busyp; |
@@ -2504,6 +2505,7 @@ xfs_alloc_busy_insert( | |||
2504 | new->bno = bno; | 2505 | new->bno = bno; |
2505 | new->length = len; | 2506 | new->length = len; |
2506 | INIT_LIST_HEAD(&new->list); | 2507 | INIT_LIST_HEAD(&new->list); |
2508 | new->flags = flags; | ||
2507 | 2509 | ||
2508 | /* trace before insert to be able to see failed inserts */ | 2510 | /* trace before insert to be able to see failed inserts */ |
2509 | trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len); | 2511 | trace_xfs_alloc_busy(tp->t_mountp, agno, bno, len); |
@@ -2609,6 +2611,18 @@ xfs_alloc_busy_update_extent( | |||
2609 | xfs_agblock_t bend = bbno + busyp->length; | 2611 | xfs_agblock_t bend = bbno + busyp->length; |
2610 | 2612 | ||
2611 | /* | 2613 | /* |
2614 | * This extent is currently being discarded. Give the thread | ||
2615 | * performing the discard a chance to mark the extent unbusy | ||
2616 | * and retry. | ||
2617 | */ | ||
2618 | if (busyp->flags & XFS_ALLOC_BUSY_DISCARDED) { | ||
2619 | spin_unlock(&pag->pagb_lock); | ||
2620 | delay(1); | ||
2621 | spin_lock(&pag->pagb_lock); | ||
2622 | return false; | ||
2623 | } | ||
2624 | |||
2625 | /* | ||
2612 | * If there is a busy extent overlapping a user allocation, we have | 2626 | * If there is a busy extent overlapping a user allocation, we have |
2613 | * no choice but to force the log and retry the search. | 2627 | * no choice but to force the log and retry the search. |
2614 | * | 2628 | * |
@@ -2813,7 +2827,8 @@ restart: | |||
2813 | * If this is a metadata allocation, try to reuse the busy | 2827 | * If this is a metadata allocation, try to reuse the busy |
2814 | * extent instead of trimming the allocation. | 2828 | * extent instead of trimming the allocation. |
2815 | */ | 2829 | */ |
2816 | if (!args->userdata) { | 2830 | if (!args->userdata && |
2831 | !(busyp->flags & XFS_ALLOC_BUSY_DISCARDED)) { | ||
2817 | if (!xfs_alloc_busy_update_extent(args->mp, args->pag, | 2832 | if (!xfs_alloc_busy_update_extent(args->mp, args->pag, |
2818 | busyp, fbno, flen, | 2833 | busyp, fbno, flen, |
2819 | false)) | 2834 | false)) |
@@ -2979,10 +2994,16 @@ xfs_alloc_busy_clear_one( | |||
2979 | kmem_free(busyp); | 2994 | kmem_free(busyp); |
2980 | } | 2995 | } |
2981 | 2996 | ||
2997 | /* | ||
2998 | * Remove all extents on the passed in list from the busy extents tree. | ||
2999 | * If do_discard is set skip extents that need to be discarded, and mark | ||
3000 | * these as undergoing a discard operation instead. | ||
3001 | */ | ||
2982 | void | 3002 | void |
2983 | xfs_alloc_busy_clear( | 3003 | xfs_alloc_busy_clear( |
2984 | struct xfs_mount *mp, | 3004 | struct xfs_mount *mp, |
2985 | struct list_head *list) | 3005 | struct list_head *list, |
3006 | bool do_discard) | ||
2986 | { | 3007 | { |
2987 | struct xfs_busy_extent *busyp, *n; | 3008 | struct xfs_busy_extent *busyp, *n; |
2988 | struct xfs_perag *pag = NULL; | 3009 | struct xfs_perag *pag = NULL; |
@@ -2999,7 +3020,11 @@ xfs_alloc_busy_clear( | |||
2999 | agno = busyp->agno; | 3020 | agno = busyp->agno; |
3000 | } | 3021 | } |
3001 | 3022 | ||
3002 | xfs_alloc_busy_clear_one(mp, pag, busyp); | 3023 | if (do_discard && busyp->length && |
3024 | !(busyp->flags & XFS_ALLOC_BUSY_SKIP_DISCARD)) | ||
3025 | busyp->flags = XFS_ALLOC_BUSY_DISCARDED; | ||
3026 | else | ||
3027 | xfs_alloc_busy_clear_one(mp, pag, busyp); | ||
3003 | } | 3028 | } |
3004 | 3029 | ||
3005 | if (pag) { | 3030 | if (pag) { |
diff --git a/fs/xfs/xfs_alloc.h b/fs/xfs/xfs_alloc.h index 240ad288f2f9..2f52b924be79 100644 --- a/fs/xfs/xfs_alloc.h +++ b/fs/xfs/xfs_alloc.h | |||
@@ -137,10 +137,11 @@ xfs_alloc_longest_free_extent(struct xfs_mount *mp, | |||
137 | #ifdef __KERNEL__ | 137 | #ifdef __KERNEL__ |
138 | void | 138 | void |
139 | xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno, | 139 | xfs_alloc_busy_insert(struct xfs_trans *tp, xfs_agnumber_t agno, |
140 | xfs_agblock_t bno, xfs_extlen_t len); | 140 | xfs_agblock_t bno, xfs_extlen_t len, unsigned int flags); |
141 | 141 | ||
142 | void | 142 | void |
143 | xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list); | 143 | xfs_alloc_busy_clear(struct xfs_mount *mp, struct list_head *list, |
144 | bool do_discard); | ||
144 | 145 | ||
145 | int | 146 | int |
146 | xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno, | 147 | xfs_alloc_busy_search(struct xfs_mount *mp, xfs_agnumber_t agno, |
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c index 8b469d53599f..2b3518826a69 100644 --- a/fs/xfs/xfs_alloc_btree.c +++ b/fs/xfs/xfs_alloc_btree.c | |||
@@ -120,7 +120,8 @@ xfs_allocbt_free_block( | |||
120 | if (error) | 120 | if (error) |
121 | return error; | 121 | return error; |
122 | 122 | ||
123 | xfs_alloc_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1); | 123 | xfs_alloc_busy_insert(cur->bc_tp, be32_to_cpu(agf->agf_seqno), bno, 1, |
124 | XFS_ALLOC_BUSY_SKIP_DISCARD); | ||
124 | xfs_trans_agbtree_delta(cur->bc_tp, -1); | 125 | xfs_trans_agbtree_delta(cur->bc_tp, -1); |
125 | return 0; | 126 | return 0; |
126 | } | 127 | } |
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c index fa00788de2f5..e546a33214c9 100644 --- a/fs/xfs/xfs_bmap.c +++ b/fs/xfs/xfs_bmap.c | |||
@@ -89,36 +89,19 @@ xfs_bmap_add_attrfork_local( | |||
89 | int *flags); /* inode logging flags */ | 89 | int *flags); /* inode logging flags */ |
90 | 90 | ||
91 | /* | 91 | /* |
92 | * Called by xfs_bmapi to update file extent records and the btree | ||
93 | * after allocating space (or doing a delayed allocation). | ||
94 | */ | ||
95 | STATIC int /* error */ | ||
96 | xfs_bmap_add_extent( | ||
97 | xfs_inode_t *ip, /* incore inode pointer */ | ||
98 | xfs_extnum_t idx, /* extent number to update/insert */ | ||
99 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ | ||
100 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | ||
101 | xfs_fsblock_t *first, /* pointer to firstblock variable */ | ||
102 | xfs_bmap_free_t *flist, /* list of extents to be freed */ | ||
103 | int *logflagsp, /* inode logging flags */ | ||
104 | int whichfork, /* data or attr fork */ | ||
105 | int rsvd); /* OK to allocate reserved blocks */ | ||
106 | |||
107 | /* | ||
108 | * Called by xfs_bmap_add_extent to handle cases converting a delayed | 92 | * Called by xfs_bmap_add_extent to handle cases converting a delayed |
109 | * allocation to a real allocation. | 93 | * allocation to a real allocation. |
110 | */ | 94 | */ |
111 | STATIC int /* error */ | 95 | STATIC int /* error */ |
112 | xfs_bmap_add_extent_delay_real( | 96 | xfs_bmap_add_extent_delay_real( |
113 | xfs_inode_t *ip, /* incore inode pointer */ | 97 | xfs_inode_t *ip, /* incore inode pointer */ |
114 | xfs_extnum_t idx, /* extent number to update/insert */ | 98 | xfs_extnum_t *idx, /* extent number to update/insert */ |
115 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ | 99 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ |
116 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 100 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
117 | xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ | 101 | xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ |
118 | xfs_fsblock_t *first, /* pointer to firstblock variable */ | 102 | xfs_fsblock_t *first, /* pointer to firstblock variable */ |
119 | xfs_bmap_free_t *flist, /* list of extents to be freed */ | 103 | xfs_bmap_free_t *flist, /* list of extents to be freed */ |
120 | int *logflagsp, /* inode logging flags */ | 104 | int *logflagsp); /* inode logging flags */ |
121 | int rsvd); /* OK to allocate reserved blocks */ | ||
122 | 105 | ||
123 | /* | 106 | /* |
124 | * Called by xfs_bmap_add_extent to handle cases converting a hole | 107 | * Called by xfs_bmap_add_extent to handle cases converting a hole |
@@ -127,10 +110,9 @@ xfs_bmap_add_extent_delay_real( | |||
127 | STATIC int /* error */ | 110 | STATIC int /* error */ |
128 | xfs_bmap_add_extent_hole_delay( | 111 | xfs_bmap_add_extent_hole_delay( |
129 | xfs_inode_t *ip, /* incore inode pointer */ | 112 | xfs_inode_t *ip, /* incore inode pointer */ |
130 | xfs_extnum_t idx, /* extent number to update/insert */ | 113 | xfs_extnum_t *idx, /* extent number to update/insert */ |
131 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 114 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
132 | int *logflagsp,/* inode logging flags */ | 115 | int *logflagsp); /* inode logging flags */ |
133 | int rsvd); /* OK to allocate reserved blocks */ | ||
134 | 116 | ||
135 | /* | 117 | /* |
136 | * Called by xfs_bmap_add_extent to handle cases converting a hole | 118 | * Called by xfs_bmap_add_extent to handle cases converting a hole |
@@ -139,7 +121,7 @@ xfs_bmap_add_extent_hole_delay( | |||
139 | STATIC int /* error */ | 121 | STATIC int /* error */ |
140 | xfs_bmap_add_extent_hole_real( | 122 | xfs_bmap_add_extent_hole_real( |
141 | xfs_inode_t *ip, /* incore inode pointer */ | 123 | xfs_inode_t *ip, /* incore inode pointer */ |
142 | xfs_extnum_t idx, /* extent number to update/insert */ | 124 | xfs_extnum_t *idx, /* extent number to update/insert */ |
143 | xfs_btree_cur_t *cur, /* if null, not a btree */ | 125 | xfs_btree_cur_t *cur, /* if null, not a btree */ |
144 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 126 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
145 | int *logflagsp, /* inode logging flags */ | 127 | int *logflagsp, /* inode logging flags */ |
@@ -152,7 +134,7 @@ xfs_bmap_add_extent_hole_real( | |||
152 | STATIC int /* error */ | 134 | STATIC int /* error */ |
153 | xfs_bmap_add_extent_unwritten_real( | 135 | xfs_bmap_add_extent_unwritten_real( |
154 | xfs_inode_t *ip, /* incore inode pointer */ | 136 | xfs_inode_t *ip, /* incore inode pointer */ |
155 | xfs_extnum_t idx, /* extent number to update/insert */ | 137 | xfs_extnum_t *idx, /* extent number to update/insert */ |
156 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ | 138 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ |
157 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 139 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
158 | int *logflagsp); /* inode logging flags */ | 140 | int *logflagsp); /* inode logging flags */ |
@@ -180,22 +162,6 @@ xfs_bmap_btree_to_extents( | |||
180 | int whichfork); /* data or attr fork */ | 162 | int whichfork); /* data or attr fork */ |
181 | 163 | ||
182 | /* | 164 | /* |
183 | * Called by xfs_bmapi to update file extent records and the btree | ||
184 | * after removing space (or undoing a delayed allocation). | ||
185 | */ | ||
186 | STATIC int /* error */ | ||
187 | xfs_bmap_del_extent( | ||
188 | xfs_inode_t *ip, /* incore inode pointer */ | ||
189 | xfs_trans_t *tp, /* current trans pointer */ | ||
190 | xfs_extnum_t idx, /* extent number to update/insert */ | ||
191 | xfs_bmap_free_t *flist, /* list of extents to be freed */ | ||
192 | xfs_btree_cur_t *cur, /* if null, not a btree */ | ||
193 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | ||
194 | int *logflagsp,/* inode logging flags */ | ||
195 | int whichfork, /* data or attr fork */ | ||
196 | int rsvd); /* OK to allocate reserved blocks */ | ||
197 | |||
198 | /* | ||
199 | * Remove the entry "free" from the free item list. Prev points to the | 165 | * Remove the entry "free" from the free item list. Prev points to the |
200 | * previous entry, unless "free" is the head of the list. | 166 | * previous entry, unless "free" is the head of the list. |
201 | */ | 167 | */ |
@@ -474,14 +440,13 @@ xfs_bmap_add_attrfork_local( | |||
474 | STATIC int /* error */ | 440 | STATIC int /* error */ |
475 | xfs_bmap_add_extent( | 441 | xfs_bmap_add_extent( |
476 | xfs_inode_t *ip, /* incore inode pointer */ | 442 | xfs_inode_t *ip, /* incore inode pointer */ |
477 | xfs_extnum_t idx, /* extent number to update/insert */ | 443 | xfs_extnum_t *idx, /* extent number to update/insert */ |
478 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ | 444 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ |
479 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 445 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
480 | xfs_fsblock_t *first, /* pointer to firstblock variable */ | 446 | xfs_fsblock_t *first, /* pointer to firstblock variable */ |
481 | xfs_bmap_free_t *flist, /* list of extents to be freed */ | 447 | xfs_bmap_free_t *flist, /* list of extents to be freed */ |
482 | int *logflagsp, /* inode logging flags */ | 448 | int *logflagsp, /* inode logging flags */ |
483 | int whichfork, /* data or attr fork */ | 449 | int whichfork) /* data or attr fork */ |
484 | int rsvd) /* OK to use reserved data blocks */ | ||
485 | { | 450 | { |
486 | xfs_btree_cur_t *cur; /* btree cursor or null */ | 451 | xfs_btree_cur_t *cur; /* btree cursor or null */ |
487 | xfs_filblks_t da_new; /* new count del alloc blocks used */ | 452 | xfs_filblks_t da_new; /* new count del alloc blocks used */ |
@@ -492,23 +457,27 @@ xfs_bmap_add_extent( | |||
492 | xfs_extnum_t nextents; /* number of extents in file now */ | 457 | xfs_extnum_t nextents; /* number of extents in file now */ |
493 | 458 | ||
494 | XFS_STATS_INC(xs_add_exlist); | 459 | XFS_STATS_INC(xs_add_exlist); |
460 | |||
495 | cur = *curp; | 461 | cur = *curp; |
496 | ifp = XFS_IFORK_PTR(ip, whichfork); | 462 | ifp = XFS_IFORK_PTR(ip, whichfork); |
497 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); | 463 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
498 | ASSERT(idx <= nextents); | ||
499 | da_old = da_new = 0; | 464 | da_old = da_new = 0; |
500 | error = 0; | 465 | error = 0; |
466 | |||
467 | ASSERT(*idx >= 0); | ||
468 | ASSERT(*idx <= nextents); | ||
469 | |||
501 | /* | 470 | /* |
502 | * This is the first extent added to a new/empty file. | 471 | * This is the first extent added to a new/empty file. |
503 | * Special case this one, so other routines get to assume there are | 472 | * Special case this one, so other routines get to assume there are |
504 | * already extents in the list. | 473 | * already extents in the list. |
505 | */ | 474 | */ |
506 | if (nextents == 0) { | 475 | if (nextents == 0) { |
507 | xfs_iext_insert(ip, 0, 1, new, | 476 | xfs_iext_insert(ip, *idx, 1, new, |
508 | whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); | 477 | whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); |
509 | 478 | ||
510 | ASSERT(cur == NULL); | 479 | ASSERT(cur == NULL); |
511 | ifp->if_lastex = 0; | 480 | |
512 | if (!isnullstartblock(new->br_startblock)) { | 481 | if (!isnullstartblock(new->br_startblock)) { |
513 | XFS_IFORK_NEXT_SET(ip, whichfork, 1); | 482 | XFS_IFORK_NEXT_SET(ip, whichfork, 1); |
514 | logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); | 483 | logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork); |
@@ -522,27 +491,25 @@ xfs_bmap_add_extent( | |||
522 | if (cur) | 491 | if (cur) |
523 | ASSERT((cur->bc_private.b.flags & | 492 | ASSERT((cur->bc_private.b.flags & |
524 | XFS_BTCUR_BPRV_WASDEL) == 0); | 493 | XFS_BTCUR_BPRV_WASDEL) == 0); |
525 | if ((error = xfs_bmap_add_extent_hole_delay(ip, idx, new, | 494 | error = xfs_bmap_add_extent_hole_delay(ip, idx, new, |
526 | &logflags, rsvd))) | 495 | &logflags); |
527 | goto done; | ||
528 | } | 496 | } |
529 | /* | 497 | /* |
530 | * Real allocation off the end of the file. | 498 | * Real allocation off the end of the file. |
531 | */ | 499 | */ |
532 | else if (idx == nextents) { | 500 | else if (*idx == nextents) { |
533 | if (cur) | 501 | if (cur) |
534 | ASSERT((cur->bc_private.b.flags & | 502 | ASSERT((cur->bc_private.b.flags & |
535 | XFS_BTCUR_BPRV_WASDEL) == 0); | 503 | XFS_BTCUR_BPRV_WASDEL) == 0); |
536 | if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new, | 504 | error = xfs_bmap_add_extent_hole_real(ip, idx, cur, new, |
537 | &logflags, whichfork))) | 505 | &logflags, whichfork); |
538 | goto done; | ||
539 | } else { | 506 | } else { |
540 | xfs_bmbt_irec_t prev; /* old extent at offset idx */ | 507 | xfs_bmbt_irec_t prev; /* old extent at offset idx */ |
541 | 508 | ||
542 | /* | 509 | /* |
543 | * Get the record referred to by idx. | 510 | * Get the record referred to by idx. |
544 | */ | 511 | */ |
545 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx), &prev); | 512 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &prev); |
546 | /* | 513 | /* |
547 | * If it's a real allocation record, and the new allocation ends | 514 | * If it's a real allocation record, and the new allocation ends |
548 | * after the start of the referred to record, then we're filling | 515 | * after the start of the referred to record, then we're filling |
@@ -557,22 +524,18 @@ xfs_bmap_add_extent( | |||
557 | if (cur) | 524 | if (cur) |
558 | ASSERT(cur->bc_private.b.flags & | 525 | ASSERT(cur->bc_private.b.flags & |
559 | XFS_BTCUR_BPRV_WASDEL); | 526 | XFS_BTCUR_BPRV_WASDEL); |
560 | if ((error = xfs_bmap_add_extent_delay_real(ip, | 527 | error = xfs_bmap_add_extent_delay_real(ip, |
561 | idx, &cur, new, &da_new, first, flist, | 528 | idx, &cur, new, &da_new, |
562 | &logflags, rsvd))) | 529 | first, flist, &logflags); |
563 | goto done; | ||
564 | } else if (new->br_state == XFS_EXT_NORM) { | ||
565 | ASSERT(new->br_state == XFS_EXT_NORM); | ||
566 | if ((error = xfs_bmap_add_extent_unwritten_real( | ||
567 | ip, idx, &cur, new, &logflags))) | ||
568 | goto done; | ||
569 | } else { | 530 | } else { |
570 | ASSERT(new->br_state == XFS_EXT_UNWRITTEN); | 531 | ASSERT(new->br_state == XFS_EXT_NORM || |
571 | if ((error = xfs_bmap_add_extent_unwritten_real( | 532 | new->br_state == XFS_EXT_UNWRITTEN); |
572 | ip, idx, &cur, new, &logflags))) | 533 | |
534 | error = xfs_bmap_add_extent_unwritten_real(ip, | ||
535 | idx, &cur, new, &logflags); | ||
536 | if (error) | ||
573 | goto done; | 537 | goto done; |
574 | } | 538 | } |
575 | ASSERT(*curp == cur || *curp == NULL); | ||
576 | } | 539 | } |
577 | /* | 540 | /* |
578 | * Otherwise we're filling in a hole with an allocation. | 541 | * Otherwise we're filling in a hole with an allocation. |
@@ -581,13 +544,15 @@ xfs_bmap_add_extent( | |||
581 | if (cur) | 544 | if (cur) |
582 | ASSERT((cur->bc_private.b.flags & | 545 | ASSERT((cur->bc_private.b.flags & |
583 | XFS_BTCUR_BPRV_WASDEL) == 0); | 546 | XFS_BTCUR_BPRV_WASDEL) == 0); |
584 | if ((error = xfs_bmap_add_extent_hole_real(ip, idx, cur, | 547 | error = xfs_bmap_add_extent_hole_real(ip, idx, cur, |
585 | new, &logflags, whichfork))) | 548 | new, &logflags, whichfork); |
586 | goto done; | ||
587 | } | 549 | } |
588 | } | 550 | } |
589 | 551 | ||
552 | if (error) | ||
553 | goto done; | ||
590 | ASSERT(*curp == cur || *curp == NULL); | 554 | ASSERT(*curp == cur || *curp == NULL); |
555 | |||
591 | /* | 556 | /* |
592 | * Convert to a btree if necessary. | 557 | * Convert to a btree if necessary. |
593 | */ | 558 | */ |
@@ -615,7 +580,7 @@ xfs_bmap_add_extent( | |||
615 | ASSERT(nblks <= da_old); | 580 | ASSERT(nblks <= da_old); |
616 | if (nblks < da_old) | 581 | if (nblks < da_old) |
617 | xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, | 582 | xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, |
618 | (int64_t)(da_old - nblks), rsvd); | 583 | (int64_t)(da_old - nblks), 0); |
619 | } | 584 | } |
620 | /* | 585 | /* |
621 | * Clear out the allocated field, done with it now in any case. | 586 | * Clear out the allocated field, done with it now in any case. |
@@ -640,14 +605,13 @@ done: | |||
640 | STATIC int /* error */ | 605 | STATIC int /* error */ |
641 | xfs_bmap_add_extent_delay_real( | 606 | xfs_bmap_add_extent_delay_real( |
642 | xfs_inode_t *ip, /* incore inode pointer */ | 607 | xfs_inode_t *ip, /* incore inode pointer */ |
643 | xfs_extnum_t idx, /* extent number to update/insert */ | 608 | xfs_extnum_t *idx, /* extent number to update/insert */ |
644 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ | 609 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ |
645 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 610 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
646 | xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ | 611 | xfs_filblks_t *dnew, /* new delayed-alloc indirect blocks */ |
647 | xfs_fsblock_t *first, /* pointer to firstblock variable */ | 612 | xfs_fsblock_t *first, /* pointer to firstblock variable */ |
648 | xfs_bmap_free_t *flist, /* list of extents to be freed */ | 613 | xfs_bmap_free_t *flist, /* list of extents to be freed */ |
649 | int *logflagsp, /* inode logging flags */ | 614 | int *logflagsp) /* inode logging flags */ |
650 | int rsvd) /* OK to use reserved data block allocation */ | ||
651 | { | 615 | { |
652 | xfs_btree_cur_t *cur; /* btree cursor */ | 616 | xfs_btree_cur_t *cur; /* btree cursor */ |
653 | int diff; /* temp value */ | 617 | int diff; /* temp value */ |
@@ -673,7 +637,7 @@ xfs_bmap_add_extent_delay_real( | |||
673 | */ | 637 | */ |
674 | cur = *curp; | 638 | cur = *curp; |
675 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); | 639 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); |
676 | ep = xfs_iext_get_ext(ifp, idx); | 640 | ep = xfs_iext_get_ext(ifp, *idx); |
677 | xfs_bmbt_get_all(ep, &PREV); | 641 | xfs_bmbt_get_all(ep, &PREV); |
678 | new_endoff = new->br_startoff + new->br_blockcount; | 642 | new_endoff = new->br_startoff + new->br_blockcount; |
679 | ASSERT(PREV.br_startoff <= new->br_startoff); | 643 | ASSERT(PREV.br_startoff <= new->br_startoff); |
@@ -692,9 +656,9 @@ xfs_bmap_add_extent_delay_real( | |||
692 | * Check and set flags if this segment has a left neighbor. | 656 | * Check and set flags if this segment has a left neighbor. |
693 | * Don't set contiguous if the combined extent would be too large. | 657 | * Don't set contiguous if the combined extent would be too large. |
694 | */ | 658 | */ |
695 | if (idx > 0) { | 659 | if (*idx > 0) { |
696 | state |= BMAP_LEFT_VALID; | 660 | state |= BMAP_LEFT_VALID; |
697 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); | 661 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT); |
698 | 662 | ||
699 | if (isnullstartblock(LEFT.br_startblock)) | 663 | if (isnullstartblock(LEFT.br_startblock)) |
700 | state |= BMAP_LEFT_DELAY; | 664 | state |= BMAP_LEFT_DELAY; |
@@ -712,9 +676,9 @@ xfs_bmap_add_extent_delay_real( | |||
712 | * Don't set contiguous if the combined extent would be too large. | 676 | * Don't set contiguous if the combined extent would be too large. |
713 | * Also check for all-three-contiguous being too large. | 677 | * Also check for all-three-contiguous being too large. |
714 | */ | 678 | */ |
715 | if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { | 679 | if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { |
716 | state |= BMAP_RIGHT_VALID; | 680 | state |= BMAP_RIGHT_VALID; |
717 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); | 681 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT); |
718 | 682 | ||
719 | if (isnullstartblock(RIGHT.br_startblock)) | 683 | if (isnullstartblock(RIGHT.br_startblock)) |
720 | state |= BMAP_RIGHT_DELAY; | 684 | state |= BMAP_RIGHT_DELAY; |
@@ -745,14 +709,14 @@ xfs_bmap_add_extent_delay_real( | |||
745 | * Filling in all of a previously delayed allocation extent. | 709 | * Filling in all of a previously delayed allocation extent. |
746 | * The left and right neighbors are both contiguous with new. | 710 | * The left and right neighbors are both contiguous with new. |
747 | */ | 711 | */ |
748 | trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); | 712 | --*idx; |
749 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), | 713 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
714 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), | ||
750 | LEFT.br_blockcount + PREV.br_blockcount + | 715 | LEFT.br_blockcount + PREV.br_blockcount + |
751 | RIGHT.br_blockcount); | 716 | RIGHT.br_blockcount); |
752 | trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); | 717 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
753 | 718 | ||
754 | xfs_iext_remove(ip, idx, 2, state); | 719 | xfs_iext_remove(ip, *idx + 1, 2, state); |
755 | ip->i_df.if_lastex = idx - 1; | ||
756 | ip->i_d.di_nextents--; | 720 | ip->i_d.di_nextents--; |
757 | if (cur == NULL) | 721 | if (cur == NULL) |
758 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | 722 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; |
@@ -784,13 +748,14 @@ xfs_bmap_add_extent_delay_real( | |||
784 | * Filling in all of a previously delayed allocation extent. | 748 | * Filling in all of a previously delayed allocation extent. |
785 | * The left neighbor is contiguous, the right is not. | 749 | * The left neighbor is contiguous, the right is not. |
786 | */ | 750 | */ |
787 | trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); | 751 | --*idx; |
788 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), | 752 | |
753 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | ||
754 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), | ||
789 | LEFT.br_blockcount + PREV.br_blockcount); | 755 | LEFT.br_blockcount + PREV.br_blockcount); |
790 | trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); | 756 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
791 | 757 | ||
792 | ip->i_df.if_lastex = idx - 1; | 758 | xfs_iext_remove(ip, *idx + 1, 1, state); |
793 | xfs_iext_remove(ip, idx, 1, state); | ||
794 | if (cur == NULL) | 759 | if (cur == NULL) |
795 | rval = XFS_ILOG_DEXT; | 760 | rval = XFS_ILOG_DEXT; |
796 | else { | 761 | else { |
@@ -814,14 +779,13 @@ xfs_bmap_add_extent_delay_real( | |||
814 | * Filling in all of a previously delayed allocation extent. | 779 | * Filling in all of a previously delayed allocation extent. |
815 | * The right neighbor is contiguous, the left is not. | 780 | * The right neighbor is contiguous, the left is not. |
816 | */ | 781 | */ |
817 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 782 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
818 | xfs_bmbt_set_startblock(ep, new->br_startblock); | 783 | xfs_bmbt_set_startblock(ep, new->br_startblock); |
819 | xfs_bmbt_set_blockcount(ep, | 784 | xfs_bmbt_set_blockcount(ep, |
820 | PREV.br_blockcount + RIGHT.br_blockcount); | 785 | PREV.br_blockcount + RIGHT.br_blockcount); |
821 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 786 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
822 | 787 | ||
823 | ip->i_df.if_lastex = idx; | 788 | xfs_iext_remove(ip, *idx + 1, 1, state); |
824 | xfs_iext_remove(ip, idx + 1, 1, state); | ||
825 | if (cur == NULL) | 789 | if (cur == NULL) |
826 | rval = XFS_ILOG_DEXT; | 790 | rval = XFS_ILOG_DEXT; |
827 | else { | 791 | else { |
@@ -837,6 +801,7 @@ xfs_bmap_add_extent_delay_real( | |||
837 | RIGHT.br_blockcount, PREV.br_state))) | 801 | RIGHT.br_blockcount, PREV.br_state))) |
838 | goto done; | 802 | goto done; |
839 | } | 803 | } |
804 | |||
840 | *dnew = 0; | 805 | *dnew = 0; |
841 | break; | 806 | break; |
842 | 807 | ||
@@ -846,11 +811,10 @@ xfs_bmap_add_extent_delay_real( | |||
846 | * Neither the left nor right neighbors are contiguous with | 811 | * Neither the left nor right neighbors are contiguous with |
847 | * the new one. | 812 | * the new one. |
848 | */ | 813 | */ |
849 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 814 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
850 | xfs_bmbt_set_startblock(ep, new->br_startblock); | 815 | xfs_bmbt_set_startblock(ep, new->br_startblock); |
851 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 816 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
852 | 817 | ||
853 | ip->i_df.if_lastex = idx; | ||
854 | ip->i_d.di_nextents++; | 818 | ip->i_d.di_nextents++; |
855 | if (cur == NULL) | 819 | if (cur == NULL) |
856 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | 820 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; |
@@ -866,6 +830,7 @@ xfs_bmap_add_extent_delay_real( | |||
866 | goto done; | 830 | goto done; |
867 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); | 831 | XFS_WANT_CORRUPTED_GOTO(i == 1, done); |
868 | } | 832 | } |
833 | |||
869 | *dnew = 0; | 834 | *dnew = 0; |
870 | break; | 835 | break; |
871 | 836 | ||
@@ -874,17 +839,16 @@ xfs_bmap_add_extent_delay_real( | |||
874 | * Filling in the first part of a previous delayed allocation. | 839 | * Filling in the first part of a previous delayed allocation. |
875 | * The left neighbor is contiguous. | 840 | * The left neighbor is contiguous. |
876 | */ | 841 | */ |
877 | trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); | 842 | trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_); |
878 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), | 843 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1), |
879 | LEFT.br_blockcount + new->br_blockcount); | 844 | LEFT.br_blockcount + new->br_blockcount); |
880 | xfs_bmbt_set_startoff(ep, | 845 | xfs_bmbt_set_startoff(ep, |
881 | PREV.br_startoff + new->br_blockcount); | 846 | PREV.br_startoff + new->br_blockcount); |
882 | trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); | 847 | trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_); |
883 | 848 | ||
884 | temp = PREV.br_blockcount - new->br_blockcount; | 849 | temp = PREV.br_blockcount - new->br_blockcount; |
885 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 850 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
886 | xfs_bmbt_set_blockcount(ep, temp); | 851 | xfs_bmbt_set_blockcount(ep, temp); |
887 | ip->i_df.if_lastex = idx - 1; | ||
888 | if (cur == NULL) | 852 | if (cur == NULL) |
889 | rval = XFS_ILOG_DEXT; | 853 | rval = XFS_ILOG_DEXT; |
890 | else { | 854 | else { |
@@ -904,7 +868,9 @@ xfs_bmap_add_extent_delay_real( | |||
904 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | 868 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), |
905 | startblockval(PREV.br_startblock)); | 869 | startblockval(PREV.br_startblock)); |
906 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | 870 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
907 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 871 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
872 | |||
873 | --*idx; | ||
908 | *dnew = temp; | 874 | *dnew = temp; |
909 | break; | 875 | break; |
910 | 876 | ||
@@ -913,12 +879,11 @@ xfs_bmap_add_extent_delay_real( | |||
913 | * Filling in the first part of a previous delayed allocation. | 879 | * Filling in the first part of a previous delayed allocation. |
914 | * The left neighbor is not contiguous. | 880 | * The left neighbor is not contiguous. |
915 | */ | 881 | */ |
916 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 882 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
917 | xfs_bmbt_set_startoff(ep, new_endoff); | 883 | xfs_bmbt_set_startoff(ep, new_endoff); |
918 | temp = PREV.br_blockcount - new->br_blockcount; | 884 | temp = PREV.br_blockcount - new->br_blockcount; |
919 | xfs_bmbt_set_blockcount(ep, temp); | 885 | xfs_bmbt_set_blockcount(ep, temp); |
920 | xfs_iext_insert(ip, idx, 1, new, state); | 886 | xfs_iext_insert(ip, *idx, 1, new, state); |
921 | ip->i_df.if_lastex = idx; | ||
922 | ip->i_d.di_nextents++; | 887 | ip->i_d.di_nextents++; |
923 | if (cur == NULL) | 888 | if (cur == NULL) |
924 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | 889 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; |
@@ -946,9 +911,10 @@ xfs_bmap_add_extent_delay_real( | |||
946 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | 911 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), |
947 | startblockval(PREV.br_startblock) - | 912 | startblockval(PREV.br_startblock) - |
948 | (cur ? cur->bc_private.b.allocated : 0)); | 913 | (cur ? cur->bc_private.b.allocated : 0)); |
949 | ep = xfs_iext_get_ext(ifp, idx + 1); | 914 | ep = xfs_iext_get_ext(ifp, *idx + 1); |
950 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | 915 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
951 | trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_); | 916 | trace_xfs_bmap_post_update(ip, *idx + 1, state, _THIS_IP_); |
917 | |||
952 | *dnew = temp; | 918 | *dnew = temp; |
953 | break; | 919 | break; |
954 | 920 | ||
@@ -958,15 +924,13 @@ xfs_bmap_add_extent_delay_real( | |||
958 | * The right neighbor is contiguous with the new allocation. | 924 | * The right neighbor is contiguous with the new allocation. |
959 | */ | 925 | */ |
960 | temp = PREV.br_blockcount - new->br_blockcount; | 926 | temp = PREV.br_blockcount - new->br_blockcount; |
961 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 927 | trace_xfs_bmap_pre_update(ip, *idx + 1, state, _THIS_IP_); |
962 | trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_); | ||
963 | xfs_bmbt_set_blockcount(ep, temp); | 928 | xfs_bmbt_set_blockcount(ep, temp); |
964 | xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), | 929 | xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx + 1), |
965 | new->br_startoff, new->br_startblock, | 930 | new->br_startoff, new->br_startblock, |
966 | new->br_blockcount + RIGHT.br_blockcount, | 931 | new->br_blockcount + RIGHT.br_blockcount, |
967 | RIGHT.br_state); | 932 | RIGHT.br_state); |
968 | trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_); | 933 | trace_xfs_bmap_post_update(ip, *idx + 1, state, _THIS_IP_); |
969 | ip->i_df.if_lastex = idx + 1; | ||
970 | if (cur == NULL) | 934 | if (cur == NULL) |
971 | rval = XFS_ILOG_DEXT; | 935 | rval = XFS_ILOG_DEXT; |
972 | else { | 936 | else { |
@@ -983,10 +947,14 @@ xfs_bmap_add_extent_delay_real( | |||
983 | RIGHT.br_state))) | 947 | RIGHT.br_state))) |
984 | goto done; | 948 | goto done; |
985 | } | 949 | } |
950 | |||
986 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | 951 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), |
987 | startblockval(PREV.br_startblock)); | 952 | startblockval(PREV.br_startblock)); |
953 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | ||
988 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | 954 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
989 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 955 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
956 | |||
957 | ++*idx; | ||
990 | *dnew = temp; | 958 | *dnew = temp; |
991 | break; | 959 | break; |
992 | 960 | ||
@@ -996,10 +964,9 @@ xfs_bmap_add_extent_delay_real( | |||
996 | * The right neighbor is not contiguous. | 964 | * The right neighbor is not contiguous. |
997 | */ | 965 | */ |
998 | temp = PREV.br_blockcount - new->br_blockcount; | 966 | temp = PREV.br_blockcount - new->br_blockcount; |
999 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 967 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1000 | xfs_bmbt_set_blockcount(ep, temp); | 968 | xfs_bmbt_set_blockcount(ep, temp); |
1001 | xfs_iext_insert(ip, idx + 1, 1, new, state); | 969 | xfs_iext_insert(ip, *idx + 1, 1, new, state); |
1002 | ip->i_df.if_lastex = idx + 1; | ||
1003 | ip->i_d.di_nextents++; | 970 | ip->i_d.di_nextents++; |
1004 | if (cur == NULL) | 971 | if (cur == NULL) |
1005 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | 972 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; |
@@ -1027,9 +994,11 @@ xfs_bmap_add_extent_delay_real( | |||
1027 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | 994 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), |
1028 | startblockval(PREV.br_startblock) - | 995 | startblockval(PREV.br_startblock) - |
1029 | (cur ? cur->bc_private.b.allocated : 0)); | 996 | (cur ? cur->bc_private.b.allocated : 0)); |
1030 | ep = xfs_iext_get_ext(ifp, idx); | 997 | ep = xfs_iext_get_ext(ifp, *idx); |
1031 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | 998 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
1032 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 999 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1000 | |||
1001 | ++*idx; | ||
1033 | *dnew = temp; | 1002 | *dnew = temp; |
1034 | break; | 1003 | break; |
1035 | 1004 | ||
@@ -1056,7 +1025,7 @@ xfs_bmap_add_extent_delay_real( | |||
1056 | */ | 1025 | */ |
1057 | temp = new->br_startoff - PREV.br_startoff; | 1026 | temp = new->br_startoff - PREV.br_startoff; |
1058 | temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; | 1027 | temp2 = PREV.br_startoff + PREV.br_blockcount - new_endoff; |
1059 | trace_xfs_bmap_pre_update(ip, idx, 0, _THIS_IP_); | 1028 | trace_xfs_bmap_pre_update(ip, *idx, 0, _THIS_IP_); |
1060 | xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */ | 1029 | xfs_bmbt_set_blockcount(ep, temp); /* truncate PREV */ |
1061 | LEFT = *new; | 1030 | LEFT = *new; |
1062 | RIGHT.br_state = PREV.br_state; | 1031 | RIGHT.br_state = PREV.br_state; |
@@ -1065,8 +1034,7 @@ xfs_bmap_add_extent_delay_real( | |||
1065 | RIGHT.br_startoff = new_endoff; | 1034 | RIGHT.br_startoff = new_endoff; |
1066 | RIGHT.br_blockcount = temp2; | 1035 | RIGHT.br_blockcount = temp2; |
1067 | /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */ | 1036 | /* insert LEFT (r[0]) and RIGHT (r[1]) at the same time */ |
1068 | xfs_iext_insert(ip, idx + 1, 2, &LEFT, state); | 1037 | xfs_iext_insert(ip, *idx + 1, 2, &LEFT, state); |
1069 | ip->i_df.if_lastex = idx + 1; | ||
1070 | ip->i_d.di_nextents++; | 1038 | ip->i_d.di_nextents++; |
1071 | if (cur == NULL) | 1039 | if (cur == NULL) |
1072 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | 1040 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; |
@@ -1097,7 +1065,7 @@ xfs_bmap_add_extent_delay_real( | |||
1097 | (cur ? cur->bc_private.b.allocated : 0)); | 1065 | (cur ? cur->bc_private.b.allocated : 0)); |
1098 | if (diff > 0 && | 1066 | if (diff > 0 && |
1099 | xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, | 1067 | xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, |
1100 | -((int64_t)diff), rsvd)) { | 1068 | -((int64_t)diff), 0)) { |
1101 | /* | 1069 | /* |
1102 | * Ick gross gag me with a spoon. | 1070 | * Ick gross gag me with a spoon. |
1103 | */ | 1071 | */ |
@@ -1109,7 +1077,7 @@ xfs_bmap_add_extent_delay_real( | |||
1109 | if (!diff || | 1077 | if (!diff || |
1110 | !xfs_icsb_modify_counters(ip->i_mount, | 1078 | !xfs_icsb_modify_counters(ip->i_mount, |
1111 | XFS_SBS_FDBLOCKS, | 1079 | XFS_SBS_FDBLOCKS, |
1112 | -((int64_t)diff), rsvd)) | 1080 | -((int64_t)diff), 0)) |
1113 | break; | 1081 | break; |
1114 | } | 1082 | } |
1115 | if (temp2) { | 1083 | if (temp2) { |
@@ -1118,18 +1086,20 @@ xfs_bmap_add_extent_delay_real( | |||
1118 | if (!diff || | 1086 | if (!diff || |
1119 | !xfs_icsb_modify_counters(ip->i_mount, | 1087 | !xfs_icsb_modify_counters(ip->i_mount, |
1120 | XFS_SBS_FDBLOCKS, | 1088 | XFS_SBS_FDBLOCKS, |
1121 | -((int64_t)diff), rsvd)) | 1089 | -((int64_t)diff), 0)) |
1122 | break; | 1090 | break; |
1123 | } | 1091 | } |
1124 | } | 1092 | } |
1125 | } | 1093 | } |
1126 | ep = xfs_iext_get_ext(ifp, idx); | 1094 | ep = xfs_iext_get_ext(ifp, *idx); |
1127 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | 1095 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
1128 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 1096 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1129 | trace_xfs_bmap_pre_update(ip, idx + 2, state, _THIS_IP_); | 1097 | trace_xfs_bmap_pre_update(ip, *idx + 2, state, _THIS_IP_); |
1130 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2), | 1098 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx + 2), |
1131 | nullstartblock((int)temp2)); | 1099 | nullstartblock((int)temp2)); |
1132 | trace_xfs_bmap_post_update(ip, idx + 2, state, _THIS_IP_); | 1100 | trace_xfs_bmap_post_update(ip, *idx + 2, state, _THIS_IP_); |
1101 | |||
1102 | ++*idx; | ||
1133 | *dnew = temp + temp2; | 1103 | *dnew = temp + temp2; |
1134 | break; | 1104 | break; |
1135 | 1105 | ||
@@ -1161,7 +1131,7 @@ done: | |||
1161 | STATIC int /* error */ | 1131 | STATIC int /* error */ |
1162 | xfs_bmap_add_extent_unwritten_real( | 1132 | xfs_bmap_add_extent_unwritten_real( |
1163 | xfs_inode_t *ip, /* incore inode pointer */ | 1133 | xfs_inode_t *ip, /* incore inode pointer */ |
1164 | xfs_extnum_t idx, /* extent number to update/insert */ | 1134 | xfs_extnum_t *idx, /* extent number to update/insert */ |
1165 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ | 1135 | xfs_btree_cur_t **curp, /* if *curp is null, not a btree */ |
1166 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 1136 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
1167 | int *logflagsp) /* inode logging flags */ | 1137 | int *logflagsp) /* inode logging flags */ |
@@ -1188,7 +1158,7 @@ xfs_bmap_add_extent_unwritten_real( | |||
1188 | error = 0; | 1158 | error = 0; |
1189 | cur = *curp; | 1159 | cur = *curp; |
1190 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); | 1160 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); |
1191 | ep = xfs_iext_get_ext(ifp, idx); | 1161 | ep = xfs_iext_get_ext(ifp, *idx); |
1192 | xfs_bmbt_get_all(ep, &PREV); | 1162 | xfs_bmbt_get_all(ep, &PREV); |
1193 | newext = new->br_state; | 1163 | newext = new->br_state; |
1194 | oldext = (newext == XFS_EXT_UNWRITTEN) ? | 1164 | oldext = (newext == XFS_EXT_UNWRITTEN) ? |
@@ -1211,9 +1181,9 @@ xfs_bmap_add_extent_unwritten_real( | |||
1211 | * Check and set flags if this segment has a left neighbor. | 1181 | * Check and set flags if this segment has a left neighbor. |
1212 | * Don't set contiguous if the combined extent would be too large. | 1182 | * Don't set contiguous if the combined extent would be too large. |
1213 | */ | 1183 | */ |
1214 | if (idx > 0) { | 1184 | if (*idx > 0) { |
1215 | state |= BMAP_LEFT_VALID; | 1185 | state |= BMAP_LEFT_VALID; |
1216 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT); | 1186 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &LEFT); |
1217 | 1187 | ||
1218 | if (isnullstartblock(LEFT.br_startblock)) | 1188 | if (isnullstartblock(LEFT.br_startblock)) |
1219 | state |= BMAP_LEFT_DELAY; | 1189 | state |= BMAP_LEFT_DELAY; |
@@ -1231,9 +1201,9 @@ xfs_bmap_add_extent_unwritten_real( | |||
1231 | * Don't set contiguous if the combined extent would be too large. | 1201 | * Don't set contiguous if the combined extent would be too large. |
1232 | * Also check for all-three-contiguous being too large. | 1202 | * Also check for all-three-contiguous being too large. |
1233 | */ | 1203 | */ |
1234 | if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { | 1204 | if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1) { |
1235 | state |= BMAP_RIGHT_VALID; | 1205 | state |= BMAP_RIGHT_VALID; |
1236 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT); | 1206 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx + 1), &RIGHT); |
1237 | if (isnullstartblock(RIGHT.br_startblock)) | 1207 | if (isnullstartblock(RIGHT.br_startblock)) |
1238 | state |= BMAP_RIGHT_DELAY; | 1208 | state |= BMAP_RIGHT_DELAY; |
1239 | } | 1209 | } |
@@ -1262,14 +1232,15 @@ xfs_bmap_add_extent_unwritten_real( | |||
1262 | * Setting all of a previous oldext extent to newext. | 1232 | * Setting all of a previous oldext extent to newext. |
1263 | * The left and right neighbors are both contiguous with new. | 1233 | * The left and right neighbors are both contiguous with new. |
1264 | */ | 1234 | */ |
1265 | trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); | 1235 | --*idx; |
1266 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), | 1236 | |
1237 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | ||
1238 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), | ||
1267 | LEFT.br_blockcount + PREV.br_blockcount + | 1239 | LEFT.br_blockcount + PREV.br_blockcount + |
1268 | RIGHT.br_blockcount); | 1240 | RIGHT.br_blockcount); |
1269 | trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); | 1241 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1270 | 1242 | ||
1271 | xfs_iext_remove(ip, idx, 2, state); | 1243 | xfs_iext_remove(ip, *idx + 1, 2, state); |
1272 | ip->i_df.if_lastex = idx - 1; | ||
1273 | ip->i_d.di_nextents -= 2; | 1244 | ip->i_d.di_nextents -= 2; |
1274 | if (cur == NULL) | 1245 | if (cur == NULL) |
1275 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | 1246 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; |
@@ -1305,13 +1276,14 @@ xfs_bmap_add_extent_unwritten_real( | |||
1305 | * Setting all of a previous oldext extent to newext. | 1276 | * Setting all of a previous oldext extent to newext. |
1306 | * The left neighbor is contiguous, the right is not. | 1277 | * The left neighbor is contiguous, the right is not. |
1307 | */ | 1278 | */ |
1308 | trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); | 1279 | --*idx; |
1309 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), | 1280 | |
1281 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | ||
1282 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), | ||
1310 | LEFT.br_blockcount + PREV.br_blockcount); | 1283 | LEFT.br_blockcount + PREV.br_blockcount); |
1311 | trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); | 1284 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1312 | 1285 | ||
1313 | ip->i_df.if_lastex = idx - 1; | 1286 | xfs_iext_remove(ip, *idx + 1, 1, state); |
1314 | xfs_iext_remove(ip, idx, 1, state); | ||
1315 | ip->i_d.di_nextents--; | 1287 | ip->i_d.di_nextents--; |
1316 | if (cur == NULL) | 1288 | if (cur == NULL) |
1317 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | 1289 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; |
@@ -1341,13 +1313,12 @@ xfs_bmap_add_extent_unwritten_real( | |||
1341 | * Setting all of a previous oldext extent to newext. | 1313 | * Setting all of a previous oldext extent to newext. |
1342 | * The right neighbor is contiguous, the left is not. | 1314 | * The right neighbor is contiguous, the left is not. |
1343 | */ | 1315 | */ |
1344 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 1316 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1345 | xfs_bmbt_set_blockcount(ep, | 1317 | xfs_bmbt_set_blockcount(ep, |
1346 | PREV.br_blockcount + RIGHT.br_blockcount); | 1318 | PREV.br_blockcount + RIGHT.br_blockcount); |
1347 | xfs_bmbt_set_state(ep, newext); | 1319 | xfs_bmbt_set_state(ep, newext); |
1348 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 1320 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1349 | ip->i_df.if_lastex = idx; | 1321 | xfs_iext_remove(ip, *idx + 1, 1, state); |
1350 | xfs_iext_remove(ip, idx + 1, 1, state); | ||
1351 | ip->i_d.di_nextents--; | 1322 | ip->i_d.di_nextents--; |
1352 | if (cur == NULL) | 1323 | if (cur == NULL) |
1353 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | 1324 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; |
@@ -1378,11 +1349,10 @@ xfs_bmap_add_extent_unwritten_real( | |||
1378 | * Neither the left nor right neighbors are contiguous with | 1349 | * Neither the left nor right neighbors are contiguous with |
1379 | * the new one. | 1350 | * the new one. |
1380 | */ | 1351 | */ |
1381 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 1352 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1382 | xfs_bmbt_set_state(ep, newext); | 1353 | xfs_bmbt_set_state(ep, newext); |
1383 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 1354 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1384 | 1355 | ||
1385 | ip->i_df.if_lastex = idx; | ||
1386 | if (cur == NULL) | 1356 | if (cur == NULL) |
1387 | rval = XFS_ILOG_DEXT; | 1357 | rval = XFS_ILOG_DEXT; |
1388 | else { | 1358 | else { |
@@ -1404,21 +1374,22 @@ xfs_bmap_add_extent_unwritten_real( | |||
1404 | * Setting the first part of a previous oldext extent to newext. | 1374 | * Setting the first part of a previous oldext extent to newext. |
1405 | * The left neighbor is contiguous. | 1375 | * The left neighbor is contiguous. |
1406 | */ | 1376 | */ |
1407 | trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); | 1377 | trace_xfs_bmap_pre_update(ip, *idx - 1, state, _THIS_IP_); |
1408 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), | 1378 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx - 1), |
1409 | LEFT.br_blockcount + new->br_blockcount); | 1379 | LEFT.br_blockcount + new->br_blockcount); |
1410 | xfs_bmbt_set_startoff(ep, | 1380 | xfs_bmbt_set_startoff(ep, |
1411 | PREV.br_startoff + new->br_blockcount); | 1381 | PREV.br_startoff + new->br_blockcount); |
1412 | trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); | 1382 | trace_xfs_bmap_post_update(ip, *idx - 1, state, _THIS_IP_); |
1413 | 1383 | ||
1414 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 1384 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1415 | xfs_bmbt_set_startblock(ep, | 1385 | xfs_bmbt_set_startblock(ep, |
1416 | new->br_startblock + new->br_blockcount); | 1386 | new->br_startblock + new->br_blockcount); |
1417 | xfs_bmbt_set_blockcount(ep, | 1387 | xfs_bmbt_set_blockcount(ep, |
1418 | PREV.br_blockcount - new->br_blockcount); | 1388 | PREV.br_blockcount - new->br_blockcount); |
1419 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 1389 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1390 | |||
1391 | --*idx; | ||
1420 | 1392 | ||
1421 | ip->i_df.if_lastex = idx - 1; | ||
1422 | if (cur == NULL) | 1393 | if (cur == NULL) |
1423 | rval = XFS_ILOG_DEXT; | 1394 | rval = XFS_ILOG_DEXT; |
1424 | else { | 1395 | else { |
@@ -1449,17 +1420,16 @@ xfs_bmap_add_extent_unwritten_real( | |||
1449 | * Setting the first part of a previous oldext extent to newext. | 1420 | * Setting the first part of a previous oldext extent to newext. |
1450 | * The left neighbor is not contiguous. | 1421 | * The left neighbor is not contiguous. |
1451 | */ | 1422 | */ |
1452 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 1423 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1453 | ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); | 1424 | ASSERT(ep && xfs_bmbt_get_state(ep) == oldext); |
1454 | xfs_bmbt_set_startoff(ep, new_endoff); | 1425 | xfs_bmbt_set_startoff(ep, new_endoff); |
1455 | xfs_bmbt_set_blockcount(ep, | 1426 | xfs_bmbt_set_blockcount(ep, |
1456 | PREV.br_blockcount - new->br_blockcount); | 1427 | PREV.br_blockcount - new->br_blockcount); |
1457 | xfs_bmbt_set_startblock(ep, | 1428 | xfs_bmbt_set_startblock(ep, |
1458 | new->br_startblock + new->br_blockcount); | 1429 | new->br_startblock + new->br_blockcount); |
1459 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 1430 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1460 | 1431 | ||
1461 | xfs_iext_insert(ip, idx, 1, new, state); | 1432 | xfs_iext_insert(ip, *idx, 1, new, state); |
1462 | ip->i_df.if_lastex = idx; | ||
1463 | ip->i_d.di_nextents++; | 1433 | ip->i_d.di_nextents++; |
1464 | if (cur == NULL) | 1434 | if (cur == NULL) |
1465 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | 1435 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; |
@@ -1488,17 +1458,19 @@ xfs_bmap_add_extent_unwritten_real( | |||
1488 | * Setting the last part of a previous oldext extent to newext. | 1458 | * Setting the last part of a previous oldext extent to newext. |
1489 | * The right neighbor is contiguous with the new allocation. | 1459 | * The right neighbor is contiguous with the new allocation. |
1490 | */ | 1460 | */ |
1491 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 1461 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1492 | trace_xfs_bmap_pre_update(ip, idx + 1, state, _THIS_IP_); | ||
1493 | xfs_bmbt_set_blockcount(ep, | 1462 | xfs_bmbt_set_blockcount(ep, |
1494 | PREV.br_blockcount - new->br_blockcount); | 1463 | PREV.br_blockcount - new->br_blockcount); |
1495 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 1464 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1496 | xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, idx + 1), | 1465 | |
1466 | ++*idx; | ||
1467 | |||
1468 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); | ||
1469 | xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), | ||
1497 | new->br_startoff, new->br_startblock, | 1470 | new->br_startoff, new->br_startblock, |
1498 | new->br_blockcount + RIGHT.br_blockcount, newext); | 1471 | new->br_blockcount + RIGHT.br_blockcount, newext); |
1499 | trace_xfs_bmap_post_update(ip, idx + 1, state, _THIS_IP_); | 1472 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1500 | 1473 | ||
1501 | ip->i_df.if_lastex = idx + 1; | ||
1502 | if (cur == NULL) | 1474 | if (cur == NULL) |
1503 | rval = XFS_ILOG_DEXT; | 1475 | rval = XFS_ILOG_DEXT; |
1504 | else { | 1476 | else { |
@@ -1528,13 +1500,14 @@ xfs_bmap_add_extent_unwritten_real( | |||
1528 | * Setting the last part of a previous oldext extent to newext. | 1500 | * Setting the last part of a previous oldext extent to newext. |
1529 | * The right neighbor is not contiguous. | 1501 | * The right neighbor is not contiguous. |
1530 | */ | 1502 | */ |
1531 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 1503 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1532 | xfs_bmbt_set_blockcount(ep, | 1504 | xfs_bmbt_set_blockcount(ep, |
1533 | PREV.br_blockcount - new->br_blockcount); | 1505 | PREV.br_blockcount - new->br_blockcount); |
1534 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 1506 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1507 | |||
1508 | ++*idx; | ||
1509 | xfs_iext_insert(ip, *idx, 1, new, state); | ||
1535 | 1510 | ||
1536 | xfs_iext_insert(ip, idx + 1, 1, new, state); | ||
1537 | ip->i_df.if_lastex = idx + 1; | ||
1538 | ip->i_d.di_nextents++; | 1511 | ip->i_d.di_nextents++; |
1539 | if (cur == NULL) | 1512 | if (cur == NULL) |
1540 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | 1513 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; |
@@ -1568,10 +1541,10 @@ xfs_bmap_add_extent_unwritten_real( | |||
1568 | * newext. Contiguity is impossible here. | 1541 | * newext. Contiguity is impossible here. |
1569 | * One extent becomes three extents. | 1542 | * One extent becomes three extents. |
1570 | */ | 1543 | */ |
1571 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 1544 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1572 | xfs_bmbt_set_blockcount(ep, | 1545 | xfs_bmbt_set_blockcount(ep, |
1573 | new->br_startoff - PREV.br_startoff); | 1546 | new->br_startoff - PREV.br_startoff); |
1574 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 1547 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1575 | 1548 | ||
1576 | r[0] = *new; | 1549 | r[0] = *new; |
1577 | r[1].br_startoff = new_endoff; | 1550 | r[1].br_startoff = new_endoff; |
@@ -1579,8 +1552,10 @@ xfs_bmap_add_extent_unwritten_real( | |||
1579 | PREV.br_startoff + PREV.br_blockcount - new_endoff; | 1552 | PREV.br_startoff + PREV.br_blockcount - new_endoff; |
1580 | r[1].br_startblock = new->br_startblock + new->br_blockcount; | 1553 | r[1].br_startblock = new->br_startblock + new->br_blockcount; |
1581 | r[1].br_state = oldext; | 1554 | r[1].br_state = oldext; |
1582 | xfs_iext_insert(ip, idx + 1, 2, &r[0], state); | 1555 | |
1583 | ip->i_df.if_lastex = idx + 1; | 1556 | ++*idx; |
1557 | xfs_iext_insert(ip, *idx, 2, &r[0], state); | ||
1558 | |||
1584 | ip->i_d.di_nextents += 2; | 1559 | ip->i_d.di_nextents += 2; |
1585 | if (cur == NULL) | 1560 | if (cur == NULL) |
1586 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; | 1561 | rval = XFS_ILOG_CORE | XFS_ILOG_DEXT; |
@@ -1650,12 +1625,10 @@ done: | |||
1650 | STATIC int /* error */ | 1625 | STATIC int /* error */ |
1651 | xfs_bmap_add_extent_hole_delay( | 1626 | xfs_bmap_add_extent_hole_delay( |
1652 | xfs_inode_t *ip, /* incore inode pointer */ | 1627 | xfs_inode_t *ip, /* incore inode pointer */ |
1653 | xfs_extnum_t idx, /* extent number to update/insert */ | 1628 | xfs_extnum_t *idx, /* extent number to update/insert */ |
1654 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 1629 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
1655 | int *logflagsp, /* inode logging flags */ | 1630 | int *logflagsp) /* inode logging flags */ |
1656 | int rsvd) /* OK to allocate reserved blocks */ | ||
1657 | { | 1631 | { |
1658 | xfs_bmbt_rec_host_t *ep; /* extent record for idx */ | ||
1659 | xfs_ifork_t *ifp; /* inode fork pointer */ | 1632 | xfs_ifork_t *ifp; /* inode fork pointer */ |
1660 | xfs_bmbt_irec_t left; /* left neighbor extent entry */ | 1633 | xfs_bmbt_irec_t left; /* left neighbor extent entry */ |
1661 | xfs_filblks_t newlen=0; /* new indirect size */ | 1634 | xfs_filblks_t newlen=0; /* new indirect size */ |
@@ -1665,16 +1638,15 @@ xfs_bmap_add_extent_hole_delay( | |||
1665 | xfs_filblks_t temp=0; /* temp for indirect calculations */ | 1638 | xfs_filblks_t temp=0; /* temp for indirect calculations */ |
1666 | 1639 | ||
1667 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); | 1640 | ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK); |
1668 | ep = xfs_iext_get_ext(ifp, idx); | ||
1669 | state = 0; | 1641 | state = 0; |
1670 | ASSERT(isnullstartblock(new->br_startblock)); | 1642 | ASSERT(isnullstartblock(new->br_startblock)); |
1671 | 1643 | ||
1672 | /* | 1644 | /* |
1673 | * Check and set flags if this segment has a left neighbor | 1645 | * Check and set flags if this segment has a left neighbor |
1674 | */ | 1646 | */ |
1675 | if (idx > 0) { | 1647 | if (*idx > 0) { |
1676 | state |= BMAP_LEFT_VALID; | 1648 | state |= BMAP_LEFT_VALID; |
1677 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); | 1649 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left); |
1678 | 1650 | ||
1679 | if (isnullstartblock(left.br_startblock)) | 1651 | if (isnullstartblock(left.br_startblock)) |
1680 | state |= BMAP_LEFT_DELAY; | 1652 | state |= BMAP_LEFT_DELAY; |
@@ -1684,9 +1656,9 @@ xfs_bmap_add_extent_hole_delay( | |||
1684 | * Check and set flags if the current (right) segment exists. | 1656 | * Check and set flags if the current (right) segment exists. |
1685 | * If it doesn't exist, we're converting the hole at end-of-file. | 1657 | * If it doesn't exist, we're converting the hole at end-of-file. |
1686 | */ | 1658 | */ |
1687 | if (idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { | 1659 | if (*idx < ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { |
1688 | state |= BMAP_RIGHT_VALID; | 1660 | state |= BMAP_RIGHT_VALID; |
1689 | xfs_bmbt_get_all(ep, &right); | 1661 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right); |
1690 | 1662 | ||
1691 | if (isnullstartblock(right.br_startblock)) | 1663 | if (isnullstartblock(right.br_startblock)) |
1692 | state |= BMAP_RIGHT_DELAY; | 1664 | state |= BMAP_RIGHT_DELAY; |
@@ -1719,21 +1691,21 @@ xfs_bmap_add_extent_hole_delay( | |||
1719 | * on the left and on the right. | 1691 | * on the left and on the right. |
1720 | * Merge all three into a single extent record. | 1692 | * Merge all three into a single extent record. |
1721 | */ | 1693 | */ |
1694 | --*idx; | ||
1722 | temp = left.br_blockcount + new->br_blockcount + | 1695 | temp = left.br_blockcount + new->br_blockcount + |
1723 | right.br_blockcount; | 1696 | right.br_blockcount; |
1724 | 1697 | ||
1725 | trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); | 1698 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1726 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); | 1699 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); |
1727 | oldlen = startblockval(left.br_startblock) + | 1700 | oldlen = startblockval(left.br_startblock) + |
1728 | startblockval(new->br_startblock) + | 1701 | startblockval(new->br_startblock) + |
1729 | startblockval(right.br_startblock); | 1702 | startblockval(right.br_startblock); |
1730 | newlen = xfs_bmap_worst_indlen(ip, temp); | 1703 | newlen = xfs_bmap_worst_indlen(ip, temp); |
1731 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), | 1704 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), |
1732 | nullstartblock((int)newlen)); | 1705 | nullstartblock((int)newlen)); |
1733 | trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); | 1706 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1734 | 1707 | ||
1735 | xfs_iext_remove(ip, idx, 1, state); | 1708 | xfs_iext_remove(ip, *idx + 1, 1, state); |
1736 | ip->i_df.if_lastex = idx - 1; | ||
1737 | break; | 1709 | break; |
1738 | 1710 | ||
1739 | case BMAP_LEFT_CONTIG: | 1711 | case BMAP_LEFT_CONTIG: |
@@ -1742,17 +1714,17 @@ xfs_bmap_add_extent_hole_delay( | |||
1742 | * on the left. | 1714 | * on the left. |
1743 | * Merge the new allocation with the left neighbor. | 1715 | * Merge the new allocation with the left neighbor. |
1744 | */ | 1716 | */ |
1717 | --*idx; | ||
1745 | temp = left.br_blockcount + new->br_blockcount; | 1718 | temp = left.br_blockcount + new->br_blockcount; |
1746 | trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); | 1719 | |
1747 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp); | 1720 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1721 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), temp); | ||
1748 | oldlen = startblockval(left.br_startblock) + | 1722 | oldlen = startblockval(left.br_startblock) + |
1749 | startblockval(new->br_startblock); | 1723 | startblockval(new->br_startblock); |
1750 | newlen = xfs_bmap_worst_indlen(ip, temp); | 1724 | newlen = xfs_bmap_worst_indlen(ip, temp); |
1751 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1), | 1725 | xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, *idx), |
1752 | nullstartblock((int)newlen)); | 1726 | nullstartblock((int)newlen)); |
1753 | trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); | 1727 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1754 | |||
1755 | ip->i_df.if_lastex = idx - 1; | ||
1756 | break; | 1728 | break; |
1757 | 1729 | ||
1758 | case BMAP_RIGHT_CONTIG: | 1730 | case BMAP_RIGHT_CONTIG: |
@@ -1761,16 +1733,15 @@ xfs_bmap_add_extent_hole_delay( | |||
1761 | * on the right. | 1733 | * on the right. |
1762 | * Merge the new allocation with the right neighbor. | 1734 | * Merge the new allocation with the right neighbor. |
1763 | */ | 1735 | */ |
1764 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 1736 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1765 | temp = new->br_blockcount + right.br_blockcount; | 1737 | temp = new->br_blockcount + right.br_blockcount; |
1766 | oldlen = startblockval(new->br_startblock) + | 1738 | oldlen = startblockval(new->br_startblock) + |
1767 | startblockval(right.br_startblock); | 1739 | startblockval(right.br_startblock); |
1768 | newlen = xfs_bmap_worst_indlen(ip, temp); | 1740 | newlen = xfs_bmap_worst_indlen(ip, temp); |
1769 | xfs_bmbt_set_allf(ep, new->br_startoff, | 1741 | xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), |
1742 | new->br_startoff, | ||
1770 | nullstartblock((int)newlen), temp, right.br_state); | 1743 | nullstartblock((int)newlen), temp, right.br_state); |
1771 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 1744 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1772 | |||
1773 | ip->i_df.if_lastex = idx; | ||
1774 | break; | 1745 | break; |
1775 | 1746 | ||
1776 | case 0: | 1747 | case 0: |
@@ -1780,14 +1751,13 @@ xfs_bmap_add_extent_hole_delay( | |||
1780 | * Insert a new entry. | 1751 | * Insert a new entry. |
1781 | */ | 1752 | */ |
1782 | oldlen = newlen = 0; | 1753 | oldlen = newlen = 0; |
1783 | xfs_iext_insert(ip, idx, 1, new, state); | 1754 | xfs_iext_insert(ip, *idx, 1, new, state); |
1784 | ip->i_df.if_lastex = idx; | ||
1785 | break; | 1755 | break; |
1786 | } | 1756 | } |
1787 | if (oldlen != newlen) { | 1757 | if (oldlen != newlen) { |
1788 | ASSERT(oldlen > newlen); | 1758 | ASSERT(oldlen > newlen); |
1789 | xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, | 1759 | xfs_icsb_modify_counters(ip->i_mount, XFS_SBS_FDBLOCKS, |
1790 | (int64_t)(oldlen - newlen), rsvd); | 1760 | (int64_t)(oldlen - newlen), 0); |
1791 | /* | 1761 | /* |
1792 | * Nothing to do for disk quota accounting here. | 1762 | * Nothing to do for disk quota accounting here. |
1793 | */ | 1763 | */ |
@@ -1803,13 +1773,12 @@ xfs_bmap_add_extent_hole_delay( | |||
1803 | STATIC int /* error */ | 1773 | STATIC int /* error */ |
1804 | xfs_bmap_add_extent_hole_real( | 1774 | xfs_bmap_add_extent_hole_real( |
1805 | xfs_inode_t *ip, /* incore inode pointer */ | 1775 | xfs_inode_t *ip, /* incore inode pointer */ |
1806 | xfs_extnum_t idx, /* extent number to update/insert */ | 1776 | xfs_extnum_t *idx, /* extent number to update/insert */ |
1807 | xfs_btree_cur_t *cur, /* if null, not a btree */ | 1777 | xfs_btree_cur_t *cur, /* if null, not a btree */ |
1808 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ | 1778 | xfs_bmbt_irec_t *new, /* new data to add to file extents */ |
1809 | int *logflagsp, /* inode logging flags */ | 1779 | int *logflagsp, /* inode logging flags */ |
1810 | int whichfork) /* data or attr fork */ | 1780 | int whichfork) /* data or attr fork */ |
1811 | { | 1781 | { |
1812 | xfs_bmbt_rec_host_t *ep; /* pointer to extent entry ins. point */ | ||
1813 | int error; /* error return value */ | 1782 | int error; /* error return value */ |
1814 | int i; /* temp state */ | 1783 | int i; /* temp state */ |
1815 | xfs_ifork_t *ifp; /* inode fork pointer */ | 1784 | xfs_ifork_t *ifp; /* inode fork pointer */ |
@@ -1819,8 +1788,7 @@ xfs_bmap_add_extent_hole_real( | |||
1819 | int state; /* state bits, accessed thru macros */ | 1788 | int state; /* state bits, accessed thru macros */ |
1820 | 1789 | ||
1821 | ifp = XFS_IFORK_PTR(ip, whichfork); | 1790 | ifp = XFS_IFORK_PTR(ip, whichfork); |
1822 | ASSERT(idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); | 1791 | ASSERT(*idx <= ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); |
1823 | ep = xfs_iext_get_ext(ifp, idx); | ||
1824 | state = 0; | 1792 | state = 0; |
1825 | 1793 | ||
1826 | if (whichfork == XFS_ATTR_FORK) | 1794 | if (whichfork == XFS_ATTR_FORK) |
@@ -1829,9 +1797,9 @@ xfs_bmap_add_extent_hole_real( | |||
1829 | /* | 1797 | /* |
1830 | * Check and set flags if this segment has a left neighbor. | 1798 | * Check and set flags if this segment has a left neighbor. |
1831 | */ | 1799 | */ |
1832 | if (idx > 0) { | 1800 | if (*idx > 0) { |
1833 | state |= BMAP_LEFT_VALID; | 1801 | state |= BMAP_LEFT_VALID; |
1834 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left); | 1802 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx - 1), &left); |
1835 | if (isnullstartblock(left.br_startblock)) | 1803 | if (isnullstartblock(left.br_startblock)) |
1836 | state |= BMAP_LEFT_DELAY; | 1804 | state |= BMAP_LEFT_DELAY; |
1837 | } | 1805 | } |
@@ -1840,9 +1808,9 @@ xfs_bmap_add_extent_hole_real( | |||
1840 | * Check and set flags if this segment has a current value. | 1808 | * Check and set flags if this segment has a current value. |
1841 | * Not true if we're inserting into the "hole" at eof. | 1809 | * Not true if we're inserting into the "hole" at eof. |
1842 | */ | 1810 | */ |
1843 | if (idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { | 1811 | if (*idx < ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { |
1844 | state |= BMAP_RIGHT_VALID; | 1812 | state |= BMAP_RIGHT_VALID; |
1845 | xfs_bmbt_get_all(ep, &right); | 1813 | xfs_bmbt_get_all(xfs_iext_get_ext(ifp, *idx), &right); |
1846 | if (isnullstartblock(right.br_startblock)) | 1814 | if (isnullstartblock(right.br_startblock)) |
1847 | state |= BMAP_RIGHT_DELAY; | 1815 | state |= BMAP_RIGHT_DELAY; |
1848 | } | 1816 | } |
@@ -1879,14 +1847,15 @@ xfs_bmap_add_extent_hole_real( | |||
1879 | * left and on the right. | 1847 | * left and on the right. |
1880 | * Merge all three into a single extent record. | 1848 | * Merge all three into a single extent record. |
1881 | */ | 1849 | */ |
1882 | trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); | 1850 | --*idx; |
1883 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), | 1851 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1852 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), | ||
1884 | left.br_blockcount + new->br_blockcount + | 1853 | left.br_blockcount + new->br_blockcount + |
1885 | right.br_blockcount); | 1854 | right.br_blockcount); |
1886 | trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); | 1855 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1856 | |||
1857 | xfs_iext_remove(ip, *idx + 1, 1, state); | ||
1887 | 1858 | ||
1888 | xfs_iext_remove(ip, idx, 1, state); | ||
1889 | ifp->if_lastex = idx - 1; | ||
1890 | XFS_IFORK_NEXT_SET(ip, whichfork, | 1859 | XFS_IFORK_NEXT_SET(ip, whichfork, |
1891 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); | 1860 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); |
1892 | if (cur == NULL) { | 1861 | if (cur == NULL) { |
@@ -1921,12 +1890,12 @@ xfs_bmap_add_extent_hole_real( | |||
1921 | * on the left. | 1890 | * on the left. |
1922 | * Merge the new allocation with the left neighbor. | 1891 | * Merge the new allocation with the left neighbor. |
1923 | */ | 1892 | */ |
1924 | trace_xfs_bmap_pre_update(ip, idx - 1, state, _THIS_IP_); | 1893 | --*idx; |
1925 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), | 1894 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1895 | xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, *idx), | ||
1926 | left.br_blockcount + new->br_blockcount); | 1896 | left.br_blockcount + new->br_blockcount); |
1927 | trace_xfs_bmap_post_update(ip, idx - 1, state, _THIS_IP_); | 1897 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1928 | 1898 | ||
1929 | ifp->if_lastex = idx - 1; | ||
1930 | if (cur == NULL) { | 1899 | if (cur == NULL) { |
1931 | rval = xfs_ilog_fext(whichfork); | 1900 | rval = xfs_ilog_fext(whichfork); |
1932 | } else { | 1901 | } else { |
@@ -1952,13 +1921,13 @@ xfs_bmap_add_extent_hole_real( | |||
1952 | * on the right. | 1921 | * on the right. |
1953 | * Merge the new allocation with the right neighbor. | 1922 | * Merge the new allocation with the right neighbor. |
1954 | */ | 1923 | */ |
1955 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 1924 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
1956 | xfs_bmbt_set_allf(ep, new->br_startoff, new->br_startblock, | 1925 | xfs_bmbt_set_allf(xfs_iext_get_ext(ifp, *idx), |
1926 | new->br_startoff, new->br_startblock, | ||
1957 | new->br_blockcount + right.br_blockcount, | 1927 | new->br_blockcount + right.br_blockcount, |
1958 | right.br_state); | 1928 | right.br_state); |
1959 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 1929 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
1960 | 1930 | ||
1961 | ifp->if_lastex = idx; | ||
1962 | if (cur == NULL) { | 1931 | if (cur == NULL) { |
1963 | rval = xfs_ilog_fext(whichfork); | 1932 | rval = xfs_ilog_fext(whichfork); |
1964 | } else { | 1933 | } else { |
@@ -1984,8 +1953,7 @@ xfs_bmap_add_extent_hole_real( | |||
1984 | * real allocation. | 1953 | * real allocation. |
1985 | * Insert a new entry. | 1954 | * Insert a new entry. |
1986 | */ | 1955 | */ |
1987 | xfs_iext_insert(ip, idx, 1, new, state); | 1956 | xfs_iext_insert(ip, *idx, 1, new, state); |
1988 | ifp->if_lastex = idx; | ||
1989 | XFS_IFORK_NEXT_SET(ip, whichfork, | 1957 | XFS_IFORK_NEXT_SET(ip, whichfork, |
1990 | XFS_IFORK_NEXTENTS(ip, whichfork) + 1); | 1958 | XFS_IFORK_NEXTENTS(ip, whichfork) + 1); |
1991 | if (cur == NULL) { | 1959 | if (cur == NULL) { |
@@ -2833,13 +2801,12 @@ STATIC int /* error */ | |||
2833 | xfs_bmap_del_extent( | 2801 | xfs_bmap_del_extent( |
2834 | xfs_inode_t *ip, /* incore inode pointer */ | 2802 | xfs_inode_t *ip, /* incore inode pointer */ |
2835 | xfs_trans_t *tp, /* current transaction pointer */ | 2803 | xfs_trans_t *tp, /* current transaction pointer */ |
2836 | xfs_extnum_t idx, /* extent number to update/delete */ | 2804 | xfs_extnum_t *idx, /* extent number to update/delete */ |
2837 | xfs_bmap_free_t *flist, /* list of extents to be freed */ | 2805 | xfs_bmap_free_t *flist, /* list of extents to be freed */ |
2838 | xfs_btree_cur_t *cur, /* if null, not a btree */ | 2806 | xfs_btree_cur_t *cur, /* if null, not a btree */ |
2839 | xfs_bmbt_irec_t *del, /* data to remove from extents */ | 2807 | xfs_bmbt_irec_t *del, /* data to remove from extents */ |
2840 | int *logflagsp, /* inode logging flags */ | 2808 | int *logflagsp, /* inode logging flags */ |
2841 | int whichfork, /* data or attr fork */ | 2809 | int whichfork) /* data or attr fork */ |
2842 | int rsvd) /* OK to allocate reserved blocks */ | ||
2843 | { | 2810 | { |
2844 | xfs_filblks_t da_new; /* new delay-alloc indirect blocks */ | 2811 | xfs_filblks_t da_new; /* new delay-alloc indirect blocks */ |
2845 | xfs_filblks_t da_old; /* old delay-alloc indirect blocks */ | 2812 | xfs_filblks_t da_old; /* old delay-alloc indirect blocks */ |
@@ -2870,10 +2837,10 @@ xfs_bmap_del_extent( | |||
2870 | 2837 | ||
2871 | mp = ip->i_mount; | 2838 | mp = ip->i_mount; |
2872 | ifp = XFS_IFORK_PTR(ip, whichfork); | 2839 | ifp = XFS_IFORK_PTR(ip, whichfork); |
2873 | ASSERT((idx >= 0) && (idx < ifp->if_bytes / | 2840 | ASSERT((*idx >= 0) && (*idx < ifp->if_bytes / |
2874 | (uint)sizeof(xfs_bmbt_rec_t))); | 2841 | (uint)sizeof(xfs_bmbt_rec_t))); |
2875 | ASSERT(del->br_blockcount > 0); | 2842 | ASSERT(del->br_blockcount > 0); |
2876 | ep = xfs_iext_get_ext(ifp, idx); | 2843 | ep = xfs_iext_get_ext(ifp, *idx); |
2877 | xfs_bmbt_get_all(ep, &got); | 2844 | xfs_bmbt_get_all(ep, &got); |
2878 | ASSERT(got.br_startoff <= del->br_startoff); | 2845 | ASSERT(got.br_startoff <= del->br_startoff); |
2879 | del_endoff = del->br_startoff + del->br_blockcount; | 2846 | del_endoff = del->br_startoff + del->br_blockcount; |
@@ -2947,11 +2914,12 @@ xfs_bmap_del_extent( | |||
2947 | /* | 2914 | /* |
2948 | * Matches the whole extent. Delete the entry. | 2915 | * Matches the whole extent. Delete the entry. |
2949 | */ | 2916 | */ |
2950 | xfs_iext_remove(ip, idx, 1, | 2917 | xfs_iext_remove(ip, *idx, 1, |
2951 | whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); | 2918 | whichfork == XFS_ATTR_FORK ? BMAP_ATTRFORK : 0); |
2952 | ifp->if_lastex = idx; | 2919 | --*idx; |
2953 | if (delay) | 2920 | if (delay) |
2954 | break; | 2921 | break; |
2922 | |||
2955 | XFS_IFORK_NEXT_SET(ip, whichfork, | 2923 | XFS_IFORK_NEXT_SET(ip, whichfork, |
2956 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); | 2924 | XFS_IFORK_NEXTENTS(ip, whichfork) - 1); |
2957 | flags |= XFS_ILOG_CORE; | 2925 | flags |= XFS_ILOG_CORE; |
@@ -2968,21 +2936,20 @@ xfs_bmap_del_extent( | |||
2968 | /* | 2936 | /* |
2969 | * Deleting the first part of the extent. | 2937 | * Deleting the first part of the extent. |
2970 | */ | 2938 | */ |
2971 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 2939 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
2972 | xfs_bmbt_set_startoff(ep, del_endoff); | 2940 | xfs_bmbt_set_startoff(ep, del_endoff); |
2973 | temp = got.br_blockcount - del->br_blockcount; | 2941 | temp = got.br_blockcount - del->br_blockcount; |
2974 | xfs_bmbt_set_blockcount(ep, temp); | 2942 | xfs_bmbt_set_blockcount(ep, temp); |
2975 | ifp->if_lastex = idx; | ||
2976 | if (delay) { | 2943 | if (delay) { |
2977 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | 2944 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), |
2978 | da_old); | 2945 | da_old); |
2979 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | 2946 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
2980 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 2947 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
2981 | da_new = temp; | 2948 | da_new = temp; |
2982 | break; | 2949 | break; |
2983 | } | 2950 | } |
2984 | xfs_bmbt_set_startblock(ep, del_endblock); | 2951 | xfs_bmbt_set_startblock(ep, del_endblock); |
2985 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 2952 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
2986 | if (!cur) { | 2953 | if (!cur) { |
2987 | flags |= xfs_ilog_fext(whichfork); | 2954 | flags |= xfs_ilog_fext(whichfork); |
2988 | break; | 2955 | break; |
@@ -2998,18 +2965,17 @@ xfs_bmap_del_extent( | |||
2998 | * Deleting the last part of the extent. | 2965 | * Deleting the last part of the extent. |
2999 | */ | 2966 | */ |
3000 | temp = got.br_blockcount - del->br_blockcount; | 2967 | temp = got.br_blockcount - del->br_blockcount; |
3001 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 2968 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
3002 | xfs_bmbt_set_blockcount(ep, temp); | 2969 | xfs_bmbt_set_blockcount(ep, temp); |
3003 | ifp->if_lastex = idx; | ||
3004 | if (delay) { | 2970 | if (delay) { |
3005 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), | 2971 | temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp), |
3006 | da_old); | 2972 | da_old); |
3007 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); | 2973 | xfs_bmbt_set_startblock(ep, nullstartblock((int)temp)); |
3008 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 2974 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
3009 | da_new = temp; | 2975 | da_new = temp; |
3010 | break; | 2976 | break; |
3011 | } | 2977 | } |
3012 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 2978 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
3013 | if (!cur) { | 2979 | if (!cur) { |
3014 | flags |= xfs_ilog_fext(whichfork); | 2980 | flags |= xfs_ilog_fext(whichfork); |
3015 | break; | 2981 | break; |
@@ -3026,7 +2992,7 @@ xfs_bmap_del_extent( | |||
3026 | * Deleting the middle of the extent. | 2992 | * Deleting the middle of the extent. |
3027 | */ | 2993 | */ |
3028 | temp = del->br_startoff - got.br_startoff; | 2994 | temp = del->br_startoff - got.br_startoff; |
3029 | trace_xfs_bmap_pre_update(ip, idx, state, _THIS_IP_); | 2995 | trace_xfs_bmap_pre_update(ip, *idx, state, _THIS_IP_); |
3030 | xfs_bmbt_set_blockcount(ep, temp); | 2996 | xfs_bmbt_set_blockcount(ep, temp); |
3031 | new.br_startoff = del_endoff; | 2997 | new.br_startoff = del_endoff; |
3032 | temp2 = got_endoff - del_endoff; | 2998 | temp2 = got_endoff - del_endoff; |
@@ -3113,9 +3079,9 @@ xfs_bmap_del_extent( | |||
3113 | } | 3079 | } |
3114 | } | 3080 | } |
3115 | } | 3081 | } |
3116 | trace_xfs_bmap_post_update(ip, idx, state, _THIS_IP_); | 3082 | trace_xfs_bmap_post_update(ip, *idx, state, _THIS_IP_); |
3117 | xfs_iext_insert(ip, idx + 1, 1, &new, state); | 3083 | xfs_iext_insert(ip, *idx + 1, 1, &new, state); |
3118 | ifp->if_lastex = idx + 1; | 3084 | ++*idx; |
3119 | break; | 3085 | break; |
3120 | } | 3086 | } |
3121 | /* | 3087 | /* |
@@ -3142,7 +3108,7 @@ xfs_bmap_del_extent( | |||
3142 | ASSERT(da_old >= da_new); | 3108 | ASSERT(da_old >= da_new); |
3143 | if (da_old > da_new) { | 3109 | if (da_old > da_new) { |
3144 | xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, | 3110 | xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, |
3145 | (int64_t)(da_old - da_new), rsvd); | 3111 | (int64_t)(da_old - da_new), 0); |
3146 | } | 3112 | } |
3147 | done: | 3113 | done: |
3148 | *logflagsp = flags; | 3114 | *logflagsp = flags; |
@@ -4562,29 +4528,24 @@ xfs_bmapi( | |||
4562 | if (rt) { | 4528 | if (rt) { |
4563 | error = xfs_mod_incore_sb(mp, | 4529 | error = xfs_mod_incore_sb(mp, |
4564 | XFS_SBS_FREXTENTS, | 4530 | XFS_SBS_FREXTENTS, |
4565 | -((int64_t)extsz), (flags & | 4531 | -((int64_t)extsz), 0); |
4566 | XFS_BMAPI_RSVBLOCKS)); | ||
4567 | } else { | 4532 | } else { |
4568 | error = xfs_icsb_modify_counters(mp, | 4533 | error = xfs_icsb_modify_counters(mp, |
4569 | XFS_SBS_FDBLOCKS, | 4534 | XFS_SBS_FDBLOCKS, |
4570 | -((int64_t)alen), (flags & | 4535 | -((int64_t)alen), 0); |
4571 | XFS_BMAPI_RSVBLOCKS)); | ||
4572 | } | 4536 | } |
4573 | if (!error) { | 4537 | if (!error) { |
4574 | error = xfs_icsb_modify_counters(mp, | 4538 | error = xfs_icsb_modify_counters(mp, |
4575 | XFS_SBS_FDBLOCKS, | 4539 | XFS_SBS_FDBLOCKS, |
4576 | -((int64_t)indlen), (flags & | 4540 | -((int64_t)indlen), 0); |
4577 | XFS_BMAPI_RSVBLOCKS)); | ||
4578 | if (error && rt) | 4541 | if (error && rt) |
4579 | xfs_mod_incore_sb(mp, | 4542 | xfs_mod_incore_sb(mp, |
4580 | XFS_SBS_FREXTENTS, | 4543 | XFS_SBS_FREXTENTS, |
4581 | (int64_t)extsz, (flags & | 4544 | (int64_t)extsz, 0); |
4582 | XFS_BMAPI_RSVBLOCKS)); | ||
4583 | else if (error) | 4545 | else if (error) |
4584 | xfs_icsb_modify_counters(mp, | 4546 | xfs_icsb_modify_counters(mp, |
4585 | XFS_SBS_FDBLOCKS, | 4547 | XFS_SBS_FDBLOCKS, |
4586 | (int64_t)alen, (flags & | 4548 | (int64_t)alen, 0); |
4587 | XFS_BMAPI_RSVBLOCKS)); | ||
4588 | } | 4549 | } |
4589 | 4550 | ||
4590 | if (error) { | 4551 | if (error) { |
@@ -4701,13 +4662,12 @@ xfs_bmapi( | |||
4701 | if (!wasdelay && (flags & XFS_BMAPI_PREALLOC)) | 4662 | if (!wasdelay && (flags & XFS_BMAPI_PREALLOC)) |
4702 | got.br_state = XFS_EXT_UNWRITTEN; | 4663 | got.br_state = XFS_EXT_UNWRITTEN; |
4703 | } | 4664 | } |
4704 | error = xfs_bmap_add_extent(ip, lastx, &cur, &got, | 4665 | error = xfs_bmap_add_extent(ip, &lastx, &cur, &got, |
4705 | firstblock, flist, &tmp_logflags, | 4666 | firstblock, flist, &tmp_logflags, |
4706 | whichfork, (flags & XFS_BMAPI_RSVBLOCKS)); | 4667 | whichfork); |
4707 | logflags |= tmp_logflags; | 4668 | logflags |= tmp_logflags; |
4708 | if (error) | 4669 | if (error) |
4709 | goto error0; | 4670 | goto error0; |
4710 | lastx = ifp->if_lastex; | ||
4711 | ep = xfs_iext_get_ext(ifp, lastx); | 4671 | ep = xfs_iext_get_ext(ifp, lastx); |
4712 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); | 4672 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
4713 | xfs_bmbt_get_all(ep, &got); | 4673 | xfs_bmbt_get_all(ep, &got); |
@@ -4803,13 +4763,12 @@ xfs_bmapi( | |||
4803 | mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) | 4763 | mval->br_state = (mval->br_state == XFS_EXT_UNWRITTEN) |
4804 | ? XFS_EXT_NORM | 4764 | ? XFS_EXT_NORM |
4805 | : XFS_EXT_UNWRITTEN; | 4765 | : XFS_EXT_UNWRITTEN; |
4806 | error = xfs_bmap_add_extent(ip, lastx, &cur, mval, | 4766 | error = xfs_bmap_add_extent(ip, &lastx, &cur, mval, |
4807 | firstblock, flist, &tmp_logflags, | 4767 | firstblock, flist, &tmp_logflags, |
4808 | whichfork, (flags & XFS_BMAPI_RSVBLOCKS)); | 4768 | whichfork); |
4809 | logflags |= tmp_logflags; | 4769 | logflags |= tmp_logflags; |
4810 | if (error) | 4770 | if (error) |
4811 | goto error0; | 4771 | goto error0; |
4812 | lastx = ifp->if_lastex; | ||
4813 | ep = xfs_iext_get_ext(ifp, lastx); | 4772 | ep = xfs_iext_get_ext(ifp, lastx); |
4814 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); | 4773 | nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); |
4815 | xfs_bmbt_get_all(ep, &got); | 4774 | xfs_bmbt_get_all(ep, &got); |
@@ -4868,14 +4827,14 @@ xfs_bmapi( | |||
4868 | /* | 4827 | /* |
4869 | * Else go on to the next record. | 4828 | * Else go on to the next record. |
4870 | */ | 4829 | */ |
4871 | ep = xfs_iext_get_ext(ifp, ++lastx); | ||
4872 | prev = got; | 4830 | prev = got; |
4873 | if (lastx >= nextents) | 4831 | if (++lastx < nextents) { |
4874 | eof = 1; | 4832 | ep = xfs_iext_get_ext(ifp, lastx); |
4875 | else | ||
4876 | xfs_bmbt_get_all(ep, &got); | 4833 | xfs_bmbt_get_all(ep, &got); |
4834 | } else { | ||
4835 | eof = 1; | ||
4836 | } | ||
4877 | } | 4837 | } |
4878 | ifp->if_lastex = lastx; | ||
4879 | *nmap = n; | 4838 | *nmap = n; |
4880 | /* | 4839 | /* |
4881 | * Transform from btree to extents, give it cur. | 4840 | * Transform from btree to extents, give it cur. |
@@ -4984,7 +4943,6 @@ xfs_bmapi_single( | |||
4984 | ASSERT(!isnullstartblock(got.br_startblock)); | 4943 | ASSERT(!isnullstartblock(got.br_startblock)); |
4985 | ASSERT(bno < got.br_startoff + got.br_blockcount); | 4944 | ASSERT(bno < got.br_startoff + got.br_blockcount); |
4986 | *fsb = got.br_startblock + (bno - got.br_startoff); | 4945 | *fsb = got.br_startblock + (bno - got.br_startoff); |
4987 | ifp->if_lastex = lastx; | ||
4988 | return 0; | 4946 | return 0; |
4989 | } | 4947 | } |
4990 | 4948 | ||
@@ -5026,7 +4984,6 @@ xfs_bunmapi( | |||
5026 | int tmp_logflags; /* partial logging flags */ | 4984 | int tmp_logflags; /* partial logging flags */ |
5027 | int wasdel; /* was a delayed alloc extent */ | 4985 | int wasdel; /* was a delayed alloc extent */ |
5028 | int whichfork; /* data or attribute fork */ | 4986 | int whichfork; /* data or attribute fork */ |
5029 | int rsvd; /* OK to allocate reserved blocks */ | ||
5030 | xfs_fsblock_t sum; | 4987 | xfs_fsblock_t sum; |
5031 | 4988 | ||
5032 | trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_); | 4989 | trace_xfs_bunmap(ip, bno, len, flags, _RET_IP_); |
@@ -5044,7 +5001,7 @@ xfs_bunmapi( | |||
5044 | mp = ip->i_mount; | 5001 | mp = ip->i_mount; |
5045 | if (XFS_FORCED_SHUTDOWN(mp)) | 5002 | if (XFS_FORCED_SHUTDOWN(mp)) |
5046 | return XFS_ERROR(EIO); | 5003 | return XFS_ERROR(EIO); |
5047 | rsvd = (flags & XFS_BMAPI_RSVBLOCKS) != 0; | 5004 | |
5048 | ASSERT(len > 0); | 5005 | ASSERT(len > 0); |
5049 | ASSERT(nexts >= 0); | 5006 | ASSERT(nexts >= 0); |
5050 | ASSERT(ifp->if_ext_max == | 5007 | ASSERT(ifp->if_ext_max == |
@@ -5160,9 +5117,9 @@ xfs_bunmapi( | |||
5160 | del.br_blockcount = mod; | 5117 | del.br_blockcount = mod; |
5161 | } | 5118 | } |
5162 | del.br_state = XFS_EXT_UNWRITTEN; | 5119 | del.br_state = XFS_EXT_UNWRITTEN; |
5163 | error = xfs_bmap_add_extent(ip, lastx, &cur, &del, | 5120 | error = xfs_bmap_add_extent(ip, &lastx, &cur, &del, |
5164 | firstblock, flist, &logflags, | 5121 | firstblock, flist, &logflags, |
5165 | XFS_DATA_FORK, 0); | 5122 | XFS_DATA_FORK); |
5166 | if (error) | 5123 | if (error) |
5167 | goto error0; | 5124 | goto error0; |
5168 | goto nodelete; | 5125 | goto nodelete; |
@@ -5188,9 +5145,12 @@ xfs_bunmapi( | |||
5188 | */ | 5145 | */ |
5189 | ASSERT(bno >= del.br_blockcount); | 5146 | ASSERT(bno >= del.br_blockcount); |
5190 | bno -= del.br_blockcount; | 5147 | bno -= del.br_blockcount; |
5191 | if (bno < got.br_startoff) { | 5148 | if (got.br_startoff > bno) { |
5192 | if (--lastx >= 0) | 5149 | if (--lastx >= 0) { |
5193 | xfs_bmbt_get_all(--ep, &got); | 5150 | ep = xfs_iext_get_ext(ifp, |
5151 | lastx); | ||
5152 | xfs_bmbt_get_all(ep, &got); | ||
5153 | } | ||
5194 | } | 5154 | } |
5195 | continue; | 5155 | continue; |
5196 | } else if (del.br_state == XFS_EXT_UNWRITTEN) { | 5156 | } else if (del.br_state == XFS_EXT_UNWRITTEN) { |
@@ -5214,18 +5174,19 @@ xfs_bunmapi( | |||
5214 | prev.br_startoff = start; | 5174 | prev.br_startoff = start; |
5215 | } | 5175 | } |
5216 | prev.br_state = XFS_EXT_UNWRITTEN; | 5176 | prev.br_state = XFS_EXT_UNWRITTEN; |
5217 | error = xfs_bmap_add_extent(ip, lastx - 1, &cur, | 5177 | lastx--; |
5178 | error = xfs_bmap_add_extent(ip, &lastx, &cur, | ||
5218 | &prev, firstblock, flist, &logflags, | 5179 | &prev, firstblock, flist, &logflags, |
5219 | XFS_DATA_FORK, 0); | 5180 | XFS_DATA_FORK); |
5220 | if (error) | 5181 | if (error) |
5221 | goto error0; | 5182 | goto error0; |
5222 | goto nodelete; | 5183 | goto nodelete; |
5223 | } else { | 5184 | } else { |
5224 | ASSERT(del.br_state == XFS_EXT_NORM); | 5185 | ASSERT(del.br_state == XFS_EXT_NORM); |
5225 | del.br_state = XFS_EXT_UNWRITTEN; | 5186 | del.br_state = XFS_EXT_UNWRITTEN; |
5226 | error = xfs_bmap_add_extent(ip, lastx, &cur, | 5187 | error = xfs_bmap_add_extent(ip, &lastx, &cur, |
5227 | &del, firstblock, flist, &logflags, | 5188 | &del, firstblock, flist, &logflags, |
5228 | XFS_DATA_FORK, 0); | 5189 | XFS_DATA_FORK); |
5229 | if (error) | 5190 | if (error) |
5230 | goto error0; | 5191 | goto error0; |
5231 | goto nodelete; | 5192 | goto nodelete; |
@@ -5240,13 +5201,13 @@ xfs_bunmapi( | |||
5240 | rtexts = XFS_FSB_TO_B(mp, del.br_blockcount); | 5201 | rtexts = XFS_FSB_TO_B(mp, del.br_blockcount); |
5241 | do_div(rtexts, mp->m_sb.sb_rextsize); | 5202 | do_div(rtexts, mp->m_sb.sb_rextsize); |
5242 | xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, | 5203 | xfs_mod_incore_sb(mp, XFS_SBS_FREXTENTS, |
5243 | (int64_t)rtexts, rsvd); | 5204 | (int64_t)rtexts, 0); |
5244 | (void)xfs_trans_reserve_quota_nblks(NULL, | 5205 | (void)xfs_trans_reserve_quota_nblks(NULL, |
5245 | ip, -((long)del.br_blockcount), 0, | 5206 | ip, -((long)del.br_blockcount), 0, |
5246 | XFS_QMOPT_RES_RTBLKS); | 5207 | XFS_QMOPT_RES_RTBLKS); |
5247 | } else { | 5208 | } else { |
5248 | xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, | 5209 | xfs_icsb_modify_counters(mp, XFS_SBS_FDBLOCKS, |
5249 | (int64_t)del.br_blockcount, rsvd); | 5210 | (int64_t)del.br_blockcount, 0); |
5250 | (void)xfs_trans_reserve_quota_nblks(NULL, | 5211 | (void)xfs_trans_reserve_quota_nblks(NULL, |
5251 | ip, -((long)del.br_blockcount), 0, | 5212 | ip, -((long)del.br_blockcount), 0, |
5252 | XFS_QMOPT_RES_REGBLKS); | 5213 | XFS_QMOPT_RES_REGBLKS); |
@@ -5277,31 +5238,29 @@ xfs_bunmapi( | |||
5277 | error = XFS_ERROR(ENOSPC); | 5238 | error = XFS_ERROR(ENOSPC); |
5278 | goto error0; | 5239 | goto error0; |
5279 | } | 5240 | } |
5280 | error = xfs_bmap_del_extent(ip, tp, lastx, flist, cur, &del, | 5241 | error = xfs_bmap_del_extent(ip, tp, &lastx, flist, cur, &del, |
5281 | &tmp_logflags, whichfork, rsvd); | 5242 | &tmp_logflags, whichfork); |
5282 | logflags |= tmp_logflags; | 5243 | logflags |= tmp_logflags; |
5283 | if (error) | 5244 | if (error) |
5284 | goto error0; | 5245 | goto error0; |
5285 | bno = del.br_startoff - 1; | 5246 | bno = del.br_startoff - 1; |
5286 | nodelete: | 5247 | nodelete: |
5287 | lastx = ifp->if_lastex; | ||
5288 | /* | 5248 | /* |
5289 | * If not done go on to the next (previous) record. | 5249 | * If not done go on to the next (previous) record. |
5290 | * Reset ep in case the extents array was re-alloced. | ||
5291 | */ | 5250 | */ |
5292 | ep = xfs_iext_get_ext(ifp, lastx); | ||
5293 | if (bno != (xfs_fileoff_t)-1 && bno >= start) { | 5251 | if (bno != (xfs_fileoff_t)-1 && bno >= start) { |
5294 | if (lastx >= XFS_IFORK_NEXTENTS(ip, whichfork) || | 5252 | if (lastx >= 0) { |
5295 | xfs_bmbt_get_startoff(ep) > bno) { | 5253 | ep = xfs_iext_get_ext(ifp, lastx); |
5296 | if (--lastx >= 0) | 5254 | if (xfs_bmbt_get_startoff(ep) > bno) { |
5297 | ep = xfs_iext_get_ext(ifp, lastx); | 5255 | if (--lastx >= 0) |
5298 | } | 5256 | ep = xfs_iext_get_ext(ifp, |
5299 | if (lastx >= 0) | 5257 | lastx); |
5258 | } | ||
5300 | xfs_bmbt_get_all(ep, &got); | 5259 | xfs_bmbt_get_all(ep, &got); |
5260 | } | ||
5301 | extno++; | 5261 | extno++; |
5302 | } | 5262 | } |
5303 | } | 5263 | } |
5304 | ifp->if_lastex = lastx; | ||
5305 | *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0; | 5264 | *done = bno == (xfs_fileoff_t)-1 || bno < start || lastx < 0; |
5306 | ASSERT(ifp->if_ext_max == | 5265 | ASSERT(ifp->if_ext_max == |
5307 | XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); | 5266 | XFS_IFORK_SIZE(ip, whichfork) / (uint)sizeof(xfs_bmbt_rec_t)); |
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h index 3651191daea1..c62234bde053 100644 --- a/fs/xfs/xfs_bmap.h +++ b/fs/xfs/xfs_bmap.h | |||
@@ -69,7 +69,6 @@ typedef struct xfs_bmap_free | |||
69 | #define XFS_BMAPI_ENTIRE 0x004 /* return entire extent, not trimmed */ | 69 | #define XFS_BMAPI_ENTIRE 0x004 /* return entire extent, not trimmed */ |
70 | #define XFS_BMAPI_METADATA 0x008 /* mapping metadata not user data */ | 70 | #define XFS_BMAPI_METADATA 0x008 /* mapping metadata not user data */ |
71 | #define XFS_BMAPI_ATTRFORK 0x010 /* use attribute fork not data */ | 71 | #define XFS_BMAPI_ATTRFORK 0x010 /* use attribute fork not data */ |
72 | #define XFS_BMAPI_RSVBLOCKS 0x020 /* OK to alloc. reserved data blocks */ | ||
73 | #define XFS_BMAPI_PREALLOC 0x040 /* preallocation op: unwritten space */ | 72 | #define XFS_BMAPI_PREALLOC 0x040 /* preallocation op: unwritten space */ |
74 | #define XFS_BMAPI_IGSTATE 0x080 /* Ignore state - */ | 73 | #define XFS_BMAPI_IGSTATE 0x080 /* Ignore state - */ |
75 | /* combine contig. space */ | 74 | /* combine contig. space */ |
@@ -87,7 +86,6 @@ typedef struct xfs_bmap_free | |||
87 | { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ | 86 | { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ |
88 | { XFS_BMAPI_METADATA, "METADATA" }, \ | 87 | { XFS_BMAPI_METADATA, "METADATA" }, \ |
89 | { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \ | 88 | { XFS_BMAPI_ATTRFORK, "ATTRFORK" }, \ |
90 | { XFS_BMAPI_RSVBLOCKS, "RSVBLOCKS" }, \ | ||
91 | { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ | 89 | { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ |
92 | { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ | 90 | { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ |
93 | { XFS_BMAPI_CONTIG, "CONTIG" }, \ | 91 | { XFS_BMAPI_CONTIG, "CONTIG" }, \ |
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c index c8e3349c287c..a098a20ca63e 100644 --- a/fs/xfs/xfs_inode.c +++ b/fs/xfs/xfs_inode.c | |||
@@ -920,7 +920,6 @@ xfs_iread_extents( | |||
920 | /* | 920 | /* |
921 | * We know that the size is valid (it's checked in iformat_btree) | 921 | * We know that the size is valid (it's checked in iformat_btree) |
922 | */ | 922 | */ |
923 | ifp->if_lastex = NULLEXTNUM; | ||
924 | ifp->if_bytes = ifp->if_real_bytes = 0; | 923 | ifp->if_bytes = ifp->if_real_bytes = 0; |
925 | ifp->if_flags |= XFS_IFEXTENTS; | 924 | ifp->if_flags |= XFS_IFEXTENTS; |
926 | xfs_iext_add(ifp, 0, nextents); | 925 | xfs_iext_add(ifp, 0, nextents); |
@@ -2558,12 +2557,9 @@ xfs_iflush_fork( | |||
2558 | case XFS_DINODE_FMT_EXTENTS: | 2557 | case XFS_DINODE_FMT_EXTENTS: |
2559 | ASSERT((ifp->if_flags & XFS_IFEXTENTS) || | 2558 | ASSERT((ifp->if_flags & XFS_IFEXTENTS) || |
2560 | !(iip->ili_format.ilf_fields & extflag[whichfork])); | 2559 | !(iip->ili_format.ilf_fields & extflag[whichfork])); |
2561 | ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) || | ||
2562 | (ifp->if_bytes == 0)); | ||
2563 | ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) || | ||
2564 | (ifp->if_bytes > 0)); | ||
2565 | if ((iip->ili_format.ilf_fields & extflag[whichfork]) && | 2560 | if ((iip->ili_format.ilf_fields & extflag[whichfork]) && |
2566 | (ifp->if_bytes > 0)) { | 2561 | (ifp->if_bytes > 0)) { |
2562 | ASSERT(xfs_iext_get_ext(ifp, 0)); | ||
2567 | ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); | 2563 | ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0); |
2568 | (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, | 2564 | (void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp, |
2569 | whichfork); | 2565 | whichfork); |
@@ -3112,6 +3108,8 @@ xfs_iext_get_ext( | |||
3112 | xfs_extnum_t idx) /* index of target extent */ | 3108 | xfs_extnum_t idx) /* index of target extent */ |
3113 | { | 3109 | { |
3114 | ASSERT(idx >= 0); | 3110 | ASSERT(idx >= 0); |
3111 | ASSERT(idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t)); | ||
3112 | |||
3115 | if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { | 3113 | if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) { |
3116 | return ifp->if_u1.if_ext_irec->er_extbuf; | 3114 | return ifp->if_u1.if_ext_irec->er_extbuf; |
3117 | } else if (ifp->if_flags & XFS_IFEXTIREC) { | 3115 | } else if (ifp->if_flags & XFS_IFEXTIREC) { |
@@ -3191,7 +3189,6 @@ xfs_iext_add( | |||
3191 | } | 3189 | } |
3192 | ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; | 3190 | ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext; |
3193 | ifp->if_real_bytes = 0; | 3191 | ifp->if_real_bytes = 0; |
3194 | ifp->if_lastex = nextents + ext_diff; | ||
3195 | } | 3192 | } |
3196 | /* | 3193 | /* |
3197 | * Otherwise use a linear (direct) extent list. | 3194 | * Otherwise use a linear (direct) extent list. |
@@ -3886,8 +3883,10 @@ xfs_iext_idx_to_irec( | |||
3886 | xfs_extnum_t page_idx = *idxp; /* extent index in target list */ | 3883 | xfs_extnum_t page_idx = *idxp; /* extent index in target list */ |
3887 | 3884 | ||
3888 | ASSERT(ifp->if_flags & XFS_IFEXTIREC); | 3885 | ASSERT(ifp->if_flags & XFS_IFEXTIREC); |
3889 | ASSERT(page_idx >= 0 && page_idx <= | 3886 | ASSERT(page_idx >= 0); |
3890 | ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t)); | 3887 | ASSERT(page_idx <= ifp->if_bytes / sizeof(xfs_bmbt_rec_t)); |
3888 | ASSERT(page_idx < ifp->if_bytes / sizeof(xfs_bmbt_rec_t) || realloc); | ||
3889 | |||
3891 | nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; | 3890 | nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ; |
3892 | erp_idx = 0; | 3891 | erp_idx = 0; |
3893 | low = 0; | 3892 | low = 0; |
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index ff4e2a30227d..3ae6d58e5473 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h | |||
@@ -67,7 +67,6 @@ typedef struct xfs_ifork { | |||
67 | short if_broot_bytes; /* bytes allocated for root */ | 67 | short if_broot_bytes; /* bytes allocated for root */ |
68 | unsigned char if_flags; /* per-fork flags */ | 68 | unsigned char if_flags; /* per-fork flags */ |
69 | unsigned char if_ext_max; /* max # of extent records */ | 69 | unsigned char if_ext_max; /* max # of extent records */ |
70 | xfs_extnum_t if_lastex; /* last if_extents used */ | ||
71 | union { | 70 | union { |
72 | xfs_bmbt_rec_host_t *if_extents;/* linear map file exts */ | 71 | xfs_bmbt_rec_host_t *if_extents;/* linear map file exts */ |
73 | xfs_ext_irec_t *if_ext_irec; /* irec map file exts */ | 72 | xfs_ext_irec_t *if_ext_irec; /* irec map file exts */ |
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c index 7d56e88a3f0e..c7755d5a5fbe 100644 --- a/fs/xfs/xfs_log_cil.c +++ b/fs/xfs/xfs_log_cil.c | |||
@@ -29,6 +29,7 @@ | |||
29 | #include "xfs_mount.h" | 29 | #include "xfs_mount.h" |
30 | #include "xfs_error.h" | 30 | #include "xfs_error.h" |
31 | #include "xfs_alloc.h" | 31 | #include "xfs_alloc.h" |
32 | #include "xfs_discard.h" | ||
32 | 33 | ||
33 | /* | 34 | /* |
34 | * Perform initial CIL structure initialisation. If the CIL is not | 35 | * Perform initial CIL structure initialisation. If the CIL is not |
@@ -361,18 +362,28 @@ xlog_cil_committed( | |||
361 | int abort) | 362 | int abort) |
362 | { | 363 | { |
363 | struct xfs_cil_ctx *ctx = args; | 364 | struct xfs_cil_ctx *ctx = args; |
365 | struct xfs_mount *mp = ctx->cil->xc_log->l_mp; | ||
364 | 366 | ||
365 | xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, | 367 | xfs_trans_committed_bulk(ctx->cil->xc_log->l_ailp, ctx->lv_chain, |
366 | ctx->start_lsn, abort); | 368 | ctx->start_lsn, abort); |
367 | 369 | ||
368 | xfs_alloc_busy_sort(&ctx->busy_extents); | 370 | xfs_alloc_busy_sort(&ctx->busy_extents); |
369 | xfs_alloc_busy_clear(ctx->cil->xc_log->l_mp, &ctx->busy_extents); | 371 | xfs_alloc_busy_clear(mp, &ctx->busy_extents, |
372 | (mp->m_flags & XFS_MOUNT_DISCARD) && !abort); | ||
370 | 373 | ||
371 | spin_lock(&ctx->cil->xc_cil_lock); | 374 | spin_lock(&ctx->cil->xc_cil_lock); |
372 | list_del(&ctx->committing); | 375 | list_del(&ctx->committing); |
373 | spin_unlock(&ctx->cil->xc_cil_lock); | 376 | spin_unlock(&ctx->cil->xc_cil_lock); |
374 | 377 | ||
375 | xlog_cil_free_logvec(ctx->lv_chain); | 378 | xlog_cil_free_logvec(ctx->lv_chain); |
379 | |||
380 | if (!list_empty(&ctx->busy_extents)) { | ||
381 | ASSERT(mp->m_flags & XFS_MOUNT_DISCARD); | ||
382 | |||
383 | xfs_discard_extents(mp, &ctx->busy_extents); | ||
384 | xfs_alloc_busy_clear(mp, &ctx->busy_extents, false); | ||
385 | } | ||
386 | |||
376 | kmem_free(ctx); | 387 | kmem_free(ctx); |
377 | } | 388 | } |
378 | 389 | ||
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h index 19af0ab0d0c6..3d68bb267c5f 100644 --- a/fs/xfs/xfs_mount.h +++ b/fs/xfs/xfs_mount.h | |||
@@ -224,6 +224,7 @@ typedef struct xfs_mount { | |||
224 | #define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem | 224 | #define XFS_MOUNT_FS_SHUTDOWN (1ULL << 4) /* atomic stop of all filesystem |
225 | operations, typically for | 225 | operations, typically for |
226 | disk errors in metadata */ | 226 | disk errors in metadata */ |
227 | #define XFS_MOUNT_DISCARD (1ULL << 5) /* discard unused blocks */ | ||
227 | #define XFS_MOUNT_RETERR (1ULL << 6) /* return alignment errors to | 228 | #define XFS_MOUNT_RETERR (1ULL << 6) /* return alignment errors to |
228 | user */ | 229 | user */ |
229 | #define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment | 230 | #define XFS_MOUNT_NOALIGN (1ULL << 7) /* turn off stripe alignment |
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c index d1f24858ccc4..7c7bc2b786bd 100644 --- a/fs/xfs/xfs_trans.c +++ b/fs/xfs/xfs_trans.c | |||
@@ -609,7 +609,7 @@ xfs_trans_free( | |||
609 | struct xfs_trans *tp) | 609 | struct xfs_trans *tp) |
610 | { | 610 | { |
611 | xfs_alloc_busy_sort(&tp->t_busy); | 611 | xfs_alloc_busy_sort(&tp->t_busy); |
612 | xfs_alloc_busy_clear(tp->t_mountp, &tp->t_busy); | 612 | xfs_alloc_busy_clear(tp->t_mountp, &tp->t_busy, false); |
613 | 613 | ||
614 | atomic_dec(&tp->t_mountp->m_active_trans); | 614 | atomic_dec(&tp->t_mountp->m_active_trans); |
615 | xfs_trans_free_dqinfo(tp); | 615 | xfs_trans_free_dqinfo(tp); |
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h index 110fa700f853..71c778033f57 100644 --- a/include/asm-generic/bitops/find.h +++ b/include/asm-generic/bitops/find.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _ASM_GENERIC_BITOPS_FIND_H_ | 1 | #ifndef _ASM_GENERIC_BITOPS_FIND_H_ |
2 | #define _ASM_GENERIC_BITOPS_FIND_H_ | 2 | #define _ASM_GENERIC_BITOPS_FIND_H_ |
3 | 3 | ||
4 | #ifndef find_next_bit | ||
4 | /** | 5 | /** |
5 | * find_next_bit - find the next set bit in a memory region | 6 | * find_next_bit - find the next set bit in a memory region |
6 | * @addr: The address to base the search on | 7 | * @addr: The address to base the search on |
@@ -9,7 +10,9 @@ | |||
9 | */ | 10 | */ |
10 | extern unsigned long find_next_bit(const unsigned long *addr, unsigned long | 11 | extern unsigned long find_next_bit(const unsigned long *addr, unsigned long |
11 | size, unsigned long offset); | 12 | size, unsigned long offset); |
13 | #endif | ||
12 | 14 | ||
15 | #ifndef find_next_zero_bit | ||
13 | /** | 16 | /** |
14 | * find_next_zero_bit - find the next cleared bit in a memory region | 17 | * find_next_zero_bit - find the next cleared bit in a memory region |
15 | * @addr: The address to base the search on | 18 | * @addr: The address to base the search on |
@@ -18,6 +21,7 @@ extern unsigned long find_next_bit(const unsigned long *addr, unsigned long | |||
18 | */ | 21 | */ |
19 | extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned | 22 | extern unsigned long find_next_zero_bit(const unsigned long *addr, unsigned |
20 | long size, unsigned long offset); | 23 | long size, unsigned long offset); |
24 | #endif | ||
21 | 25 | ||
22 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT | 26 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT |
23 | 27 | ||
diff --git a/include/asm-generic/bitops/le.h b/include/asm-generic/bitops/le.h index 946a21b1b5dc..f95c663a6a41 100644 --- a/include/asm-generic/bitops/le.h +++ b/include/asm-generic/bitops/le.h | |||
@@ -30,13 +30,20 @@ static inline unsigned long find_first_zero_bit_le(const void *addr, | |||
30 | 30 | ||
31 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) | 31 | #define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7) |
32 | 32 | ||
33 | #ifndef find_next_zero_bit_le | ||
33 | extern unsigned long find_next_zero_bit_le(const void *addr, | 34 | extern unsigned long find_next_zero_bit_le(const void *addr, |
34 | unsigned long size, unsigned long offset); | 35 | unsigned long size, unsigned long offset); |
36 | #endif | ||
37 | |||
38 | #ifndef find_next_bit_le | ||
35 | extern unsigned long find_next_bit_le(const void *addr, | 39 | extern unsigned long find_next_bit_le(const void *addr, |
36 | unsigned long size, unsigned long offset); | 40 | unsigned long size, unsigned long offset); |
41 | #endif | ||
37 | 42 | ||
43 | #ifndef find_first_zero_bit_le | ||
38 | #define find_first_zero_bit_le(addr, size) \ | 44 | #define find_first_zero_bit_le(addr, size) \ |
39 | find_next_zero_bit_le((addr), (size), 0) | 45 | find_next_zero_bit_le((addr), (size), 0) |
46 | #endif | ||
40 | 47 | ||
41 | #else | 48 | #else |
42 | #error "Please fix <asm/byteorder.h>" | 49 | #error "Please fix <asm/byteorder.h>" |
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h index 91784841e407..dfb0ec666c94 100644 --- a/include/asm-generic/bug.h +++ b/include/asm-generic/bug.h | |||
@@ -162,46 +162,6 @@ extern void warn_slowpath_null(const char *file, const int line); | |||
162 | unlikely(__ret_warn_once); \ | 162 | unlikely(__ret_warn_once); \ |
163 | }) | 163 | }) |
164 | 164 | ||
165 | #ifdef CONFIG_PRINTK | ||
166 | |||
167 | #define WARN_ON_RATELIMIT(condition, state) \ | ||
168 | WARN_ON((condition) && __ratelimit(state)) | ||
169 | |||
170 | #define __WARN_RATELIMIT(condition, state, format...) \ | ||
171 | ({ \ | ||
172 | int rtn = 0; \ | ||
173 | if (unlikely(__ratelimit(state))) \ | ||
174 | rtn = WARN(condition, format); \ | ||
175 | rtn; \ | ||
176 | }) | ||
177 | |||
178 | #define WARN_RATELIMIT(condition, format...) \ | ||
179 | ({ \ | ||
180 | static DEFINE_RATELIMIT_STATE(_rs, \ | ||
181 | DEFAULT_RATELIMIT_INTERVAL, \ | ||
182 | DEFAULT_RATELIMIT_BURST); \ | ||
183 | __WARN_RATELIMIT(condition, &_rs, format); \ | ||
184 | }) | ||
185 | |||
186 | #else | ||
187 | |||
188 | #define WARN_ON_RATELIMIT(condition, state) \ | ||
189 | WARN_ON(condition) | ||
190 | |||
191 | #define __WARN_RATELIMIT(condition, state, format...) \ | ||
192 | ({ \ | ||
193 | int rtn = WARN(condition, format); \ | ||
194 | rtn; \ | ||
195 | }) | ||
196 | |||
197 | #define WARN_RATELIMIT(condition, format...) \ | ||
198 | ({ \ | ||
199 | int rtn = WARN(condition, format); \ | ||
200 | rtn; \ | ||
201 | }) | ||
202 | |||
203 | #endif | ||
204 | |||
205 | /* | 165 | /* |
206 | * WARN_ON_SMP() is for cases that the warning is either | 166 | * WARN_ON_SMP() is for cases that the warning is either |
207 | * meaningless for !SMP or may even cause failures. | 167 | * meaningless for !SMP or may even cause failures. |
diff --git a/include/asm-generic/ptrace.h b/include/asm-generic/ptrace.h new file mode 100644 index 000000000000..82e674f6b337 --- /dev/null +++ b/include/asm-generic/ptrace.h | |||
@@ -0,0 +1,74 @@ | |||
1 | /* | ||
2 | * Common low level (register) ptrace helpers | ||
3 | * | ||
4 | * Copyright 2004-2011 Analog Devices Inc. | ||
5 | * | ||
6 | * Licensed under the GPL-2 or later. | ||
7 | */ | ||
8 | |||
9 | #ifndef __ASM_GENERIC_PTRACE_H__ | ||
10 | #define __ASM_GENERIC_PTRACE_H__ | ||
11 | |||
12 | #ifndef __ASSEMBLY__ | ||
13 | |||
14 | /* Helpers for working with the instruction pointer */ | ||
15 | #ifndef GET_IP | ||
16 | #define GET_IP(regs) ((regs)->pc) | ||
17 | #endif | ||
18 | #ifndef SET_IP | ||
19 | #define SET_IP(regs, val) (GET_IP(regs) = (val)) | ||
20 | #endif | ||
21 | |||
22 | static inline unsigned long instruction_pointer(struct pt_regs *regs) | ||
23 | { | ||
24 | return GET_IP(regs); | ||
25 | } | ||
26 | static inline void instruction_pointer_set(struct pt_regs *regs, | ||
27 | unsigned long val) | ||
28 | { | ||
29 | SET_IP(regs, val); | ||
30 | } | ||
31 | |||
32 | #ifndef profile_pc | ||
33 | #define profile_pc(regs) instruction_pointer(regs) | ||
34 | #endif | ||
35 | |||
36 | /* Helpers for working with the user stack pointer */ | ||
37 | #ifndef GET_USP | ||
38 | #define GET_USP(regs) ((regs)->usp) | ||
39 | #endif | ||
40 | #ifndef SET_USP | ||
41 | #define SET_USP(regs, val) (GET_USP(regs) = (val)) | ||
42 | #endif | ||
43 | |||
44 | static inline unsigned long user_stack_pointer(struct pt_regs *regs) | ||
45 | { | ||
46 | return GET_USP(regs); | ||
47 | } | ||
48 | static inline void user_stack_pointer_set(struct pt_regs *regs, | ||
49 | unsigned long val) | ||
50 | { | ||
51 | SET_USP(regs, val); | ||
52 | } | ||
53 | |||
54 | /* Helpers for working with the frame pointer */ | ||
55 | #ifndef GET_FP | ||
56 | #define GET_FP(regs) ((regs)->fp) | ||
57 | #endif | ||
58 | #ifndef SET_FP | ||
59 | #define SET_FP(regs, val) (GET_FP(regs) = (val)) | ||
60 | #endif | ||
61 | |||
62 | static inline unsigned long frame_pointer(struct pt_regs *regs) | ||
63 | { | ||
64 | return GET_FP(regs); | ||
65 | } | ||
66 | static inline void frame_pointer_set(struct pt_regs *regs, | ||
67 | unsigned long val) | ||
68 | { | ||
69 | SET_FP(regs, val); | ||
70 | } | ||
71 | |||
72 | #endif /* __ASSEMBLY__ */ | ||
73 | |||
74 | #endif | ||
diff --git a/include/linux/basic_mmio_gpio.h b/include/linux/basic_mmio_gpio.h index 198087a16fc4..1ae12710d732 100644 --- a/include/linux/basic_mmio_gpio.h +++ b/include/linux/basic_mmio_gpio.h | |||
@@ -13,8 +13,64 @@ | |||
13 | #ifndef __BASIC_MMIO_GPIO_H | 13 | #ifndef __BASIC_MMIO_GPIO_H |
14 | #define __BASIC_MMIO_GPIO_H | 14 | #define __BASIC_MMIO_GPIO_H |
15 | 15 | ||
16 | #include <linux/gpio.h> | ||
17 | #include <linux/types.h> | ||
18 | #include <linux/compiler.h> | ||
19 | |||
16 | struct bgpio_pdata { | 20 | struct bgpio_pdata { |
17 | int base; | 21 | int base; |
22 | int ngpio; | ||
18 | }; | 23 | }; |
19 | 24 | ||
25 | struct device; | ||
26 | |||
27 | struct bgpio_chip { | ||
28 | struct gpio_chip gc; | ||
29 | |||
30 | unsigned long (*read_reg)(void __iomem *reg); | ||
31 | void (*write_reg)(void __iomem *reg, unsigned long data); | ||
32 | |||
33 | void __iomem *reg_dat; | ||
34 | void __iomem *reg_set; | ||
35 | void __iomem *reg_clr; | ||
36 | void __iomem *reg_dir; | ||
37 | |||
38 | /* Number of bits (GPIOs): <register width> * 8. */ | ||
39 | int bits; | ||
40 | |||
41 | /* | ||
42 | * Some GPIO controllers work with the big-endian bits notation, | ||
43 | * e.g. in a 8-bits register, GPIO7 is the least significant bit. | ||
44 | */ | ||
45 | unsigned long (*pin2mask)(struct bgpio_chip *bgc, unsigned int pin); | ||
46 | |||
47 | /* | ||
48 | * Used to lock bgpio_chip->data. Also, this is needed to keep | ||
49 | * shadowed and real data registers writes together. | ||
50 | */ | ||
51 | spinlock_t lock; | ||
52 | |||
53 | /* Shadowed data register to clear/set bits safely. */ | ||
54 | unsigned long data; | ||
55 | |||
56 | /* Shadowed direction registers to clear/set direction safely. */ | ||
57 | unsigned long dir; | ||
58 | }; | ||
59 | |||
60 | static inline struct bgpio_chip *to_bgpio_chip(struct gpio_chip *gc) | ||
61 | { | ||
62 | return container_of(gc, struct bgpio_chip, gc); | ||
63 | } | ||
64 | |||
65 | int __devexit bgpio_remove(struct bgpio_chip *bgc); | ||
66 | int __devinit bgpio_init(struct bgpio_chip *bgc, | ||
67 | struct device *dev, | ||
68 | unsigned long sz, | ||
69 | void __iomem *dat, | ||
70 | void __iomem *set, | ||
71 | void __iomem *clr, | ||
72 | void __iomem *dirout, | ||
73 | void __iomem *dirin, | ||
74 | bool big_endian); | ||
75 | |||
20 | #endif /* __BASIC_MMIO_GPIO_H */ | 76 | #endif /* __BASIC_MMIO_GPIO_H */ |
diff --git a/include/linux/bitops.h b/include/linux/bitops.h index 2184c6b97aeb..a3ef66a2a083 100644 --- a/include/linux/bitops.h +++ b/include/linux/bitops.h | |||
@@ -148,7 +148,7 @@ static inline unsigned long __ffs64(u64 word) | |||
148 | 148 | ||
149 | #ifdef __KERNEL__ | 149 | #ifdef __KERNEL__ |
150 | 150 | ||
151 | #ifdef CONFIG_GENERIC_FIND_LAST_BIT | 151 | #ifndef find_last_bit |
152 | /** | 152 | /** |
153 | * find_last_bit - find the last set bit in a memory region | 153 | * find_last_bit - find the last set bit in a memory region |
154 | * @addr: The address to start the search at | 154 | * @addr: The address to start the search at |
@@ -158,7 +158,7 @@ static inline unsigned long __ffs64(u64 word) | |||
158 | */ | 158 | */ |
159 | extern unsigned long find_last_bit(const unsigned long *addr, | 159 | extern unsigned long find_last_bit(const unsigned long *addr, |
160 | unsigned long size); | 160 | unsigned long size); |
161 | #endif /* CONFIG_GENERIC_FIND_LAST_BIT */ | 161 | #endif |
162 | 162 | ||
163 | #endif /* __KERNEL__ */ | 163 | #endif /* __KERNEL__ */ |
164 | #endif | 164 | #endif |
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h index f5df23561b96..503c8a6b3079 100644 --- a/include/linux/buffer_head.h +++ b/include/linux/buffer_head.h | |||
@@ -217,8 +217,24 @@ int cont_write_begin(struct file *, struct address_space *, loff_t, | |||
217 | get_block_t *, loff_t *); | 217 | get_block_t *, loff_t *); |
218 | int generic_cont_expand_simple(struct inode *inode, loff_t size); | 218 | int generic_cont_expand_simple(struct inode *inode, loff_t size); |
219 | int block_commit_write(struct page *page, unsigned from, unsigned to); | 219 | int block_commit_write(struct page *page, unsigned from, unsigned to); |
220 | int __block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | ||
221 | get_block_t get_block); | ||
220 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, | 222 | int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, |
221 | get_block_t get_block); | 223 | get_block_t get_block); |
224 | /* Convert errno to return value from ->page_mkwrite() call */ | ||
225 | static inline int block_page_mkwrite_return(int err) | ||
226 | { | ||
227 | if (err == 0) | ||
228 | return VM_FAULT_LOCKED; | ||
229 | if (err == -EFAULT) | ||
230 | return VM_FAULT_NOPAGE; | ||
231 | if (err == -ENOMEM) | ||
232 | return VM_FAULT_OOM; | ||
233 | if (err == -EAGAIN) | ||
234 | return VM_FAULT_RETRY; | ||
235 | /* -ENOSPC, -EDQUOT, -EIO ... */ | ||
236 | return VM_FAULT_SIGBUS; | ||
237 | } | ||
222 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); | 238 | sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *); |
223 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); | 239 | int block_truncate_page(struct address_space *, loff_t, get_block_t *); |
224 | int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, | 240 | int nobh_write_begin(struct address_space *, loff_t, unsigned, unsigned, |
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h index 5ac7ebc36dbb..ab4ac0ccb857 100644 --- a/include/linux/cgroup.h +++ b/include/linux/cgroup.h | |||
@@ -467,12 +467,14 @@ struct cgroup_subsys { | |||
467 | int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); | 467 | int (*pre_destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); |
468 | void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); | 468 | void (*destroy)(struct cgroup_subsys *ss, struct cgroup *cgrp); |
469 | int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, | 469 | int (*can_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, |
470 | struct task_struct *tsk, bool threadgroup); | 470 | struct task_struct *tsk); |
471 | int (*can_attach_task)(struct cgroup *cgrp, struct task_struct *tsk); | ||
471 | void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, | 472 | void (*cancel_attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, |
472 | struct task_struct *tsk, bool threadgroup); | 473 | struct task_struct *tsk); |
474 | void (*pre_attach)(struct cgroup *cgrp); | ||
475 | void (*attach_task)(struct cgroup *cgrp, struct task_struct *tsk); | ||
473 | void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, | 476 | void (*attach)(struct cgroup_subsys *ss, struct cgroup *cgrp, |
474 | struct cgroup *old_cgrp, struct task_struct *tsk, | 477 | struct cgroup *old_cgrp, struct task_struct *tsk); |
475 | bool threadgroup); | ||
476 | void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); | 478 | void (*fork)(struct cgroup_subsys *ss, struct task_struct *task); |
477 | void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp, | 479 | void (*exit)(struct cgroup_subsys *ss, struct cgroup *cgrp, |
478 | struct cgroup *old_cgrp, struct task_struct *task); | 480 | struct cgroup *old_cgrp, struct task_struct *task); |
@@ -553,9 +555,6 @@ static inline struct cgroup* task_cgroup(struct task_struct *task, | |||
553 | return task_subsys_state(task, subsys_id)->cgroup; | 555 | return task_subsys_state(task, subsys_id)->cgroup; |
554 | } | 556 | } |
555 | 557 | ||
556 | int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *ss, | ||
557 | char *nodename); | ||
558 | |||
559 | /* A cgroup_iter should be treated as an opaque object */ | 558 | /* A cgroup_iter should be treated as an opaque object */ |
560 | struct cgroup_iter { | 559 | struct cgroup_iter { |
561 | struct list_head *cg_link; | 560 | struct list_head *cg_link; |
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h index cdbfcb8780ec..ac663c18776c 100644 --- a/include/linux/cgroup_subsys.h +++ b/include/linux/cgroup_subsys.h | |||
@@ -19,12 +19,6 @@ SUBSYS(debug) | |||
19 | 19 | ||
20 | /* */ | 20 | /* */ |
21 | 21 | ||
22 | #ifdef CONFIG_CGROUP_NS | ||
23 | SUBSYS(ns) | ||
24 | #endif | ||
25 | |||
26 | /* */ | ||
27 | |||
28 | #ifdef CONFIG_CGROUP_SCHED | 22 | #ifdef CONFIG_CGROUP_SCHED |
29 | SUBSYS(cpu_cgroup) | 23 | SUBSYS(cpu_cgroup) |
30 | #endif | 24 | #endif |
diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h new file mode 100644 index 000000000000..04ffb2e6c9d0 --- /dev/null +++ b/include/linux/cleancache.h | |||
@@ -0,0 +1,122 @@ | |||
1 | #ifndef _LINUX_CLEANCACHE_H | ||
2 | #define _LINUX_CLEANCACHE_H | ||
3 | |||
4 | #include <linux/fs.h> | ||
5 | #include <linux/exportfs.h> | ||
6 | #include <linux/mm.h> | ||
7 | |||
8 | #define CLEANCACHE_KEY_MAX 6 | ||
9 | |||
10 | /* | ||
11 | * cleancache requires every file with a page in cleancache to have a | ||
12 | * unique key unless/until the file is removed/truncated. For some | ||
13 | * filesystems, the inode number is unique, but for "modern" filesystems | ||
14 | * an exportable filehandle is required (see exportfs.h) | ||
15 | */ | ||
16 | struct cleancache_filekey { | ||
17 | union { | ||
18 | ino_t ino; | ||
19 | __u32 fh[CLEANCACHE_KEY_MAX]; | ||
20 | u32 key[CLEANCACHE_KEY_MAX]; | ||
21 | } u; | ||
22 | }; | ||
23 | |||
24 | struct cleancache_ops { | ||
25 | int (*init_fs)(size_t); | ||
26 | int (*init_shared_fs)(char *uuid, size_t); | ||
27 | int (*get_page)(int, struct cleancache_filekey, | ||
28 | pgoff_t, struct page *); | ||
29 | void (*put_page)(int, struct cleancache_filekey, | ||
30 | pgoff_t, struct page *); | ||
31 | void (*flush_page)(int, struct cleancache_filekey, pgoff_t); | ||
32 | void (*flush_inode)(int, struct cleancache_filekey); | ||
33 | void (*flush_fs)(int); | ||
34 | }; | ||
35 | |||
36 | extern struct cleancache_ops | ||
37 | cleancache_register_ops(struct cleancache_ops *ops); | ||
38 | extern void __cleancache_init_fs(struct super_block *); | ||
39 | extern void __cleancache_init_shared_fs(char *, struct super_block *); | ||
40 | extern int __cleancache_get_page(struct page *); | ||
41 | extern void __cleancache_put_page(struct page *); | ||
42 | extern void __cleancache_flush_page(struct address_space *, struct page *); | ||
43 | extern void __cleancache_flush_inode(struct address_space *); | ||
44 | extern void __cleancache_flush_fs(struct super_block *); | ||
45 | extern int cleancache_enabled; | ||
46 | |||
47 | #ifdef CONFIG_CLEANCACHE | ||
48 | static inline bool cleancache_fs_enabled(struct page *page) | ||
49 | { | ||
50 | return page->mapping->host->i_sb->cleancache_poolid >= 0; | ||
51 | } | ||
52 | static inline bool cleancache_fs_enabled_mapping(struct address_space *mapping) | ||
53 | { | ||
54 | return mapping->host->i_sb->cleancache_poolid >= 0; | ||
55 | } | ||
56 | #else | ||
57 | #define cleancache_enabled (0) | ||
58 | #define cleancache_fs_enabled(_page) (0) | ||
59 | #define cleancache_fs_enabled_mapping(_page) (0) | ||
60 | #endif | ||
61 | |||
62 | /* | ||
63 | * The shim layer provided by these inline functions allows the compiler | ||
64 | * to reduce all cleancache hooks to nothingness if CONFIG_CLEANCACHE | ||
65 | * is disabled, to a single global variable check if CONFIG_CLEANCACHE | ||
66 | * is enabled but no cleancache "backend" has dynamically enabled it, | ||
67 | * and, for the most frequent cleancache ops, to a single global variable | ||
68 | * check plus a superblock element comparison if CONFIG_CLEANCACHE is enabled | ||
69 | * and a cleancache backend has dynamically enabled cleancache, but the | ||
70 | * filesystem referenced by that cleancache op has not enabled cleancache. | ||
71 | * As a result, CONFIG_CLEANCACHE can be enabled by default with essentially | ||
72 | * no measurable performance impact. | ||
73 | */ | ||
74 | |||
75 | static inline void cleancache_init_fs(struct super_block *sb) | ||
76 | { | ||
77 | if (cleancache_enabled) | ||
78 | __cleancache_init_fs(sb); | ||
79 | } | ||
80 | |||
81 | static inline void cleancache_init_shared_fs(char *uuid, struct super_block *sb) | ||
82 | { | ||
83 | if (cleancache_enabled) | ||
84 | __cleancache_init_shared_fs(uuid, sb); | ||
85 | } | ||
86 | |||
87 | static inline int cleancache_get_page(struct page *page) | ||
88 | { | ||
89 | int ret = -1; | ||
90 | |||
91 | if (cleancache_enabled && cleancache_fs_enabled(page)) | ||
92 | ret = __cleancache_get_page(page); | ||
93 | return ret; | ||
94 | } | ||
95 | |||
96 | static inline void cleancache_put_page(struct page *page) | ||
97 | { | ||
98 | if (cleancache_enabled && cleancache_fs_enabled(page)) | ||
99 | __cleancache_put_page(page); | ||
100 | } | ||
101 | |||
102 | static inline void cleancache_flush_page(struct address_space *mapping, | ||
103 | struct page *page) | ||
104 | { | ||
105 | /* careful... page->mapping is NULL sometimes when this is called */ | ||
106 | if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) | ||
107 | __cleancache_flush_page(mapping, page); | ||
108 | } | ||
109 | |||
110 | static inline void cleancache_flush_inode(struct address_space *mapping) | ||
111 | { | ||
112 | if (cleancache_enabled && cleancache_fs_enabled_mapping(mapping)) | ||
113 | __cleancache_flush_inode(mapping); | ||
114 | } | ||
115 | |||
116 | static inline void cleancache_flush_fs(struct super_block *sb) | ||
117 | { | ||
118 | if (cleancache_enabled) | ||
119 | __cleancache_flush_fs(sb); | ||
120 | } | ||
121 | |||
122 | #endif /* _LINUX_CLEANCACHE_H */ | ||
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h index 088cd4ace4ef..74054074e876 100644 --- a/include/linux/crash_dump.h +++ b/include/linux/crash_dump.h | |||
@@ -66,6 +66,11 @@ static inline void vmcore_unusable(void) | |||
66 | if (is_kdump_kernel()) | 66 | if (is_kdump_kernel()) |
67 | elfcorehdr_addr = ELFCORE_ADDR_ERR; | 67 | elfcorehdr_addr = ELFCORE_ADDR_ERR; |
68 | } | 68 | } |
69 | |||
70 | #define HAVE_OLDMEM_PFN_IS_RAM 1 | ||
71 | extern int register_oldmem_pfn_is_ram(int (*fn)(unsigned long pfn)); | ||
72 | extern void unregister_oldmem_pfn_is_ram(void); | ||
73 | |||
69 | #else /* !CONFIG_CRASH_DUMP */ | 74 | #else /* !CONFIG_CRASH_DUMP */ |
70 | static inline int is_kdump_kernel(void) { return 0; } | 75 | static inline int is_kdump_kernel(void) { return 0; } |
71 | #endif /* CONFIG_CRASH_DUMP */ | 76 | #endif /* CONFIG_CRASH_DUMP */ |
diff --git a/include/linux/cred.h b/include/linux/cred.h index be16b61283cc..82607992f308 100644 --- a/include/linux/cred.h +++ b/include/linux/cred.h | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Credentials management - see Documentation/credentials.txt | 1 | /* Credentials management - see Documentation/security/credentials.txt |
2 | * | 2 | * |
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h index ebeb2f3ad068..6843cf193a44 100644 --- a/include/linux/flex_array.h +++ b/include/linux/flex_array.h | |||
@@ -21,6 +21,8 @@ struct flex_array { | |||
21 | struct { | 21 | struct { |
22 | int element_size; | 22 | int element_size; |
23 | int total_nr_elements; | 23 | int total_nr_elements; |
24 | int elems_per_part; | ||
25 | u32 reciprocal_elems; | ||
24 | struct flex_array_part *parts[]; | 26 | struct flex_array_part *parts[]; |
25 | }; | 27 | }; |
26 | /* | 28 | /* |
diff --git a/include/linux/fs.h b/include/linux/fs.h index 3f9d3251790d..241609346dfb 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h | |||
@@ -1428,6 +1428,11 @@ struct super_block { | |||
1428 | */ | 1428 | */ |
1429 | char __rcu *s_options; | 1429 | char __rcu *s_options; |
1430 | const struct dentry_operations *s_d_op; /* default d_op for dentries */ | 1430 | const struct dentry_operations *s_d_op; /* default d_op for dentries */ |
1431 | |||
1432 | /* | ||
1433 | * Saved pool identifier for cleancache (-1 means none) | ||
1434 | */ | ||
1435 | int cleancache_poolid; | ||
1431 | }; | 1436 | }; |
1432 | 1437 | ||
1433 | extern struct timespec current_fs_time(struct super_block *sb); | 1438 | extern struct timespec current_fs_time(struct super_block *sb); |
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 943c76b3d4bb..59225ef27d15 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h | |||
@@ -1,6 +1,7 @@ | |||
1 | #ifndef _LINUX_HUGETLB_H | 1 | #ifndef _LINUX_HUGETLB_H |
2 | #define _LINUX_HUGETLB_H | 2 | #define _LINUX_HUGETLB_H |
3 | 3 | ||
4 | #include <linux/mm_types.h> | ||
4 | #include <linux/fs.h> | 5 | #include <linux/fs.h> |
5 | #include <linux/hugetlb_inline.h> | 6 | #include <linux/hugetlb_inline.h> |
6 | 7 | ||
@@ -41,7 +42,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
41 | unsigned long address, unsigned int flags); | 42 | unsigned long address, unsigned int flags); |
42 | int hugetlb_reserve_pages(struct inode *inode, long from, long to, | 43 | int hugetlb_reserve_pages(struct inode *inode, long from, long to, |
43 | struct vm_area_struct *vma, | 44 | struct vm_area_struct *vma, |
44 | int acctflags); | 45 | vm_flags_t vm_flags); |
45 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); | 46 | void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed); |
46 | int dequeue_hwpoisoned_huge_page(struct page *page); | 47 | int dequeue_hwpoisoned_huge_page(struct page *page); |
47 | void copy_huge_page(struct page *dst, struct page *src); | 48 | void copy_huge_page(struct page *dst, struct page *src); |
@@ -168,7 +169,7 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) | |||
168 | 169 | ||
169 | extern const struct file_operations hugetlbfs_file_operations; | 170 | extern const struct file_operations hugetlbfs_file_operations; |
170 | extern const struct vm_operations_struct hugetlb_vm_ops; | 171 | extern const struct vm_operations_struct hugetlb_vm_ops; |
171 | struct file *hugetlb_file_setup(const char *name, size_t size, int acct, | 172 | struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, |
172 | struct user_struct **user, int creat_flags); | 173 | struct user_struct **user, int creat_flags); |
173 | int hugetlb_get_quota(struct address_space *mapping, long delta); | 174 | int hugetlb_get_quota(struct address_space *mapping, long delta); |
174 | void hugetlb_put_quota(struct address_space *mapping, long delta); | 175 | void hugetlb_put_quota(struct address_space *mapping, long delta); |
@@ -192,7 +193,7 @@ static inline void set_file_hugepages(struct file *file) | |||
192 | #define is_file_hugepages(file) 0 | 193 | #define is_file_hugepages(file) 0 |
193 | #define set_file_hugepages(file) BUG() | 194 | #define set_file_hugepages(file) BUG() |
194 | static inline struct file *hugetlb_file_setup(const char *name, size_t size, | 195 | static inline struct file *hugetlb_file_setup(const char *name, size_t size, |
195 | int acctflag, struct user_struct **user, int creat_flags) | 196 | vm_flags_t acctflag, struct user_struct **user, int creat_flags) |
196 | { | 197 | { |
197 | return ERR_PTR(-ENOSYS); | 198 | return ERR_PTR(-ENOSYS); |
198 | } | 199 | } |
diff --git a/include/linux/hugetlb_inline.h b/include/linux/hugetlb_inline.h index 6931489a5c14..2bb681fbeb35 100644 --- a/include/linux/hugetlb_inline.h +++ b/include/linux/hugetlb_inline.h | |||
@@ -7,7 +7,7 @@ | |||
7 | 7 | ||
8 | static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) | 8 | static inline int is_vm_hugetlb_page(struct vm_area_struct *vma) |
9 | { | 9 | { |
10 | return vma->vm_flags & VM_HUGETLB; | 10 | return !!(vma->vm_flags & VM_HUGETLB); |
11 | } | 11 | } |
12 | 12 | ||
13 | #else | 13 | #else |
diff --git a/include/linux/i2c/twl.h b/include/linux/i2c/twl.h index 0c0d1ae79981..ba4f88624fcd 100644 --- a/include/linux/i2c/twl.h +++ b/include/linux/i2c/twl.h | |||
@@ -91,6 +91,7 @@ | |||
91 | #define BCI_INTR_OFFSET 2 | 91 | #define BCI_INTR_OFFSET 2 |
92 | #define MADC_INTR_OFFSET 3 | 92 | #define MADC_INTR_OFFSET 3 |
93 | #define USB_INTR_OFFSET 4 | 93 | #define USB_INTR_OFFSET 4 |
94 | #define CHARGERFAULT_INTR_OFFSET 5 | ||
94 | #define BCI_PRES_INTR_OFFSET 9 | 95 | #define BCI_PRES_INTR_OFFSET 9 |
95 | #define USB_PRES_INTR_OFFSET 10 | 96 | #define USB_PRES_INTR_OFFSET 10 |
96 | #define RTC_INTR_OFFSET 11 | 97 | #define RTC_INTR_OFFSET 11 |
@@ -150,7 +151,12 @@ | |||
150 | #define MMC_PU (0x1 << 3) | 151 | #define MMC_PU (0x1 << 3) |
151 | #define MMC_PD (0x1 << 2) | 152 | #define MMC_PD (0x1 << 2) |
152 | 153 | ||
153 | 154 | #define TWL_SIL_TYPE(rev) ((rev) & 0x00FFFFFF) | |
155 | #define TWL_SIL_REV(rev) ((rev) >> 24) | ||
156 | #define TWL_SIL_5030 0x09002F | ||
157 | #define TWL5030_REV_1_0 0x00 | ||
158 | #define TWL5030_REV_1_1 0x10 | ||
159 | #define TWL5030_REV_1_2 0x30 | ||
154 | 160 | ||
155 | #define TWL4030_CLASS_ID 0x4030 | 161 | #define TWL4030_CLASS_ID 0x4030 |
156 | #define TWL6030_CLASS_ID 0x6030 | 162 | #define TWL6030_CLASS_ID 0x6030 |
@@ -165,6 +171,8 @@ static inline int twl_class_is_ ##class(void) \ | |||
165 | TWL_CLASS_IS(4030, TWL4030_CLASS_ID) | 171 | TWL_CLASS_IS(4030, TWL4030_CLASS_ID) |
166 | TWL_CLASS_IS(6030, TWL6030_CLASS_ID) | 172 | TWL_CLASS_IS(6030, TWL6030_CLASS_ID) |
167 | 173 | ||
174 | #define TWL6025_SUBCLASS BIT(4) /* TWL6025 has changed registers */ | ||
175 | |||
168 | /* | 176 | /* |
169 | * Read and write single 8-bit registers | 177 | * Read and write single 8-bit registers |
170 | */ | 178 | */ |
@@ -180,6 +188,9 @@ int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg); | |||
180 | int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); | 188 | int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); |
181 | int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); | 189 | int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes); |
182 | 190 | ||
191 | int twl_get_type(void); | ||
192 | int twl_get_version(void); | ||
193 | |||
183 | int twl6030_interrupt_unmask(u8 bit_mask, u8 offset); | 194 | int twl6030_interrupt_unmask(u8 bit_mask, u8 offset); |
184 | int twl6030_interrupt_mask(u8 bit_mask, u8 offset); | 195 | int twl6030_interrupt_mask(u8 bit_mask, u8 offset); |
185 | 196 | ||
@@ -279,7 +290,12 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot) | |||
279 | *(Use TWL_4030_MODULE_INTBR) | 290 | *(Use TWL_4030_MODULE_INTBR) |
280 | */ | 291 | */ |
281 | 292 | ||
293 | #define REG_IDCODE_7_0 0x00 | ||
294 | #define REG_IDCODE_15_8 0x01 | ||
295 | #define REG_IDCODE_16_23 0x02 | ||
296 | #define REG_IDCODE_31_24 0x03 | ||
282 | #define REG_GPPUPDCTR1 0x0F | 297 | #define REG_GPPUPDCTR1 0x0F |
298 | #define REG_UNLOCK_TEST_REG 0x12 | ||
283 | 299 | ||
284 | /*I2C1 and I2C4(SR) SDA/SCL pull-up control bits */ | 300 | /*I2C1 and I2C4(SR) SDA/SCL pull-up control bits */ |
285 | 301 | ||
@@ -288,6 +304,8 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot) | |||
288 | #define SR_I2C_SCL_CTRL_PU BIT(4) | 304 | #define SR_I2C_SCL_CTRL_PU BIT(4) |
289 | #define SR_I2C_SDA_CTRL_PU BIT(6) | 305 | #define SR_I2C_SDA_CTRL_PU BIT(6) |
290 | 306 | ||
307 | #define TWL_EEPROM_R_UNLOCK 0x49 | ||
308 | |||
291 | /*----------------------------------------------------------------------*/ | 309 | /*----------------------------------------------------------------------*/ |
292 | 310 | ||
293 | /* | 311 | /* |
@@ -501,7 +519,7 @@ static inline int twl6030_mmc_card_detect(struct device *dev, int slot) | |||
501 | #define RES_32KCLKOUT 26 | 519 | #define RES_32KCLKOUT 26 |
502 | #define RES_RESET 27 | 520 | #define RES_RESET 27 |
503 | /* Power Reference */ | 521 | /* Power Reference */ |
504 | #define RES_Main_Ref 28 | 522 | #define RES_MAIN_REF 28 |
505 | 523 | ||
506 | #define TOTAL_RESOURCES 28 | 524 | #define TOTAL_RESOURCES 28 |
507 | /* | 525 | /* |
@@ -593,6 +611,7 @@ enum twl4030_usb_mode { | |||
593 | 611 | ||
594 | struct twl4030_usb_data { | 612 | struct twl4030_usb_data { |
595 | enum twl4030_usb_mode usb_mode; | 613 | enum twl4030_usb_mode usb_mode; |
614 | unsigned long features; | ||
596 | 615 | ||
597 | int (*phy_init)(struct device *dev); | 616 | int (*phy_init)(struct device *dev); |
598 | int (*phy_exit)(struct device *dev); | 617 | int (*phy_exit)(struct device *dev); |
@@ -699,6 +718,20 @@ struct twl4030_platform_data { | |||
699 | struct regulator_init_data *vcxio; | 718 | struct regulator_init_data *vcxio; |
700 | struct regulator_init_data *vusb; | 719 | struct regulator_init_data *vusb; |
701 | struct regulator_init_data *clk32kg; | 720 | struct regulator_init_data *clk32kg; |
721 | /* TWL6025 LDO regulators */ | ||
722 | struct regulator_init_data *ldo1; | ||
723 | struct regulator_init_data *ldo2; | ||
724 | struct regulator_init_data *ldo3; | ||
725 | struct regulator_init_data *ldo4; | ||
726 | struct regulator_init_data *ldo5; | ||
727 | struct regulator_init_data *ldo6; | ||
728 | struct regulator_init_data *ldo7; | ||
729 | struct regulator_init_data *ldoln; | ||
730 | struct regulator_init_data *ldousb; | ||
731 | /* TWL6025 DCDC regulators */ | ||
732 | struct regulator_init_data *smps3; | ||
733 | struct regulator_init_data *smps4; | ||
734 | struct regulator_init_data *vio6025; | ||
702 | }; | 735 | }; |
703 | 736 | ||
704 | /*----------------------------------------------------------------------*/ | 737 | /*----------------------------------------------------------------------*/ |
@@ -780,4 +813,21 @@ static inline int twl4030charger_usb_en(int enable) { return 0; } | |||
780 | #define TWL6030_REG_VRTC 47 | 813 | #define TWL6030_REG_VRTC 47 |
781 | #define TWL6030_REG_CLK32KG 48 | 814 | #define TWL6030_REG_CLK32KG 48 |
782 | 815 | ||
816 | /* LDOs on 6025 have different names */ | ||
817 | #define TWL6025_REG_LDO2 49 | ||
818 | #define TWL6025_REG_LDO4 50 | ||
819 | #define TWL6025_REG_LDO3 51 | ||
820 | #define TWL6025_REG_LDO5 52 | ||
821 | #define TWL6025_REG_LDO1 53 | ||
822 | #define TWL6025_REG_LDO7 54 | ||
823 | #define TWL6025_REG_LDO6 55 | ||
824 | #define TWL6025_REG_LDOLN 56 | ||
825 | #define TWL6025_REG_LDOUSB 57 | ||
826 | |||
827 | /* 6025 DCDC supplies */ | ||
828 | #define TWL6025_REG_SMPS3 58 | ||
829 | #define TWL6025_REG_SMPS4 59 | ||
830 | #define TWL6025_REG_VIO 60 | ||
831 | |||
832 | |||
783 | #endif /* End of __TWL4030_H */ | 833 | #endif /* End of __TWL4030_H */ |
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h index 0f1325d98295..0065ffd3226b 100644 --- a/include/linux/if_ether.h +++ b/include/linux/if_ether.h | |||
@@ -132,10 +132,6 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb) | |||
132 | 132 | ||
133 | int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); | 133 | int eth_header_parse(const struct sk_buff *skb, unsigned char *haddr); |
134 | 134 | ||
135 | #ifdef CONFIG_SYSCTL | ||
136 | extern struct ctl_table ether_table[]; | ||
137 | #endif | ||
138 | |||
139 | int mac_pton(const char *s, u8 *mac); | 135 | int mac_pton(const char *s, u8 *mac); |
140 | extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); | 136 | extern ssize_t sysfs_format_mac(char *buf, const unsigned char *addr, int len); |
141 | 137 | ||
diff --git a/include/linux/init_task.h b/include/linux/init_task.h index bafc58c00fc3..580f70c02391 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h | |||
@@ -22,6 +22,14 @@ | |||
22 | extern struct files_struct init_files; | 22 | extern struct files_struct init_files; |
23 | extern struct fs_struct init_fs; | 23 | extern struct fs_struct init_fs; |
24 | 24 | ||
25 | #ifdef CONFIG_CGROUPS | ||
26 | #define INIT_THREADGROUP_FORK_LOCK(sig) \ | ||
27 | .threadgroup_fork_lock = \ | ||
28 | __RWSEM_INITIALIZER(sig.threadgroup_fork_lock), | ||
29 | #else | ||
30 | #define INIT_THREADGROUP_FORK_LOCK(sig) | ||
31 | #endif | ||
32 | |||
25 | #define INIT_SIGNALS(sig) { \ | 33 | #define INIT_SIGNALS(sig) { \ |
26 | .nr_threads = 1, \ | 34 | .nr_threads = 1, \ |
27 | .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ | 35 | .wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\ |
@@ -38,6 +46,7 @@ extern struct fs_struct init_fs; | |||
38 | }, \ | 46 | }, \ |
39 | .cred_guard_mutex = \ | 47 | .cred_guard_mutex = \ |
40 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ | 48 | __MUTEX_INITIALIZER(sig.cred_guard_mutex), \ |
49 | INIT_THREADGROUP_FORK_LOCK(sig) \ | ||
41 | } | 50 | } |
42 | 51 | ||
43 | extern struct nsproxy init_nsproxy; | 52 | extern struct nsproxy init_nsproxy; |
diff --git a/include/linux/input/pmic8xxx-keypad.h b/include/linux/input/pmic8xxx-keypad.h new file mode 100644 index 000000000000..5f1e2f9ad959 --- /dev/null +++ b/include/linux/input/pmic8xxx-keypad.h | |||
@@ -0,0 +1,52 @@ | |||
1 | /* Copyright (c) 2011, Code Aurora Forum. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | */ | ||
12 | |||
13 | #ifndef __PMIC8XXX_KEYPAD_H__ | ||
14 | #define __PMIC8XXX_KEYPAD_H__ | ||
15 | |||
16 | #include <linux/input/matrix_keypad.h> | ||
17 | |||
18 | #define PM8XXX_KEYPAD_DEV_NAME "pm8xxx-keypad" | ||
19 | |||
20 | /** | ||
21 | * struct pm8xxx_keypad_platform_data - platform data for keypad | ||
22 | * @keymap_data - matrix keymap data | ||
23 | * @input_name - input device name | ||
24 | * @input_phys_device - input device name | ||
25 | * @num_cols - number of columns of keypad | ||
26 | * @num_rows - number of row of keypad | ||
27 | * @debounce_ms - debounce period in milliseconds | ||
28 | * @scan_delay_ms - scan delay in milliseconds | ||
29 | * @row_hold_ns - row hold period in nanoseconds | ||
30 | * @wakeup - configure keypad as wakeup | ||
31 | * @rep - enable or disable key repeat bit | ||
32 | */ | ||
33 | struct pm8xxx_keypad_platform_data { | ||
34 | const struct matrix_keymap_data *keymap_data; | ||
35 | |||
36 | const char *input_name; | ||
37 | const char *input_phys_device; | ||
38 | |||
39 | unsigned int num_cols; | ||
40 | unsigned int num_rows; | ||
41 | unsigned int rows_gpio_start; | ||
42 | unsigned int cols_gpio_start; | ||
43 | |||
44 | unsigned int debounce_ms; | ||
45 | unsigned int scan_delay_ms; | ||
46 | unsigned int row_hold_ns; | ||
47 | |||
48 | bool wakeup; | ||
49 | bool rep; | ||
50 | }; | ||
51 | |||
52 | #endif /*__PMIC8XXX_KEYPAD_H__ */ | ||
diff --git a/include/linux/input/pmic8xxx-pwrkey.h b/include/linux/input/pmic8xxx-pwrkey.h new file mode 100644 index 000000000000..6d2974e57109 --- /dev/null +++ b/include/linux/input/pmic8xxx-pwrkey.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. | ||
2 | * | ||
3 | * This program is free software; you can redistribute it and/or modify | ||
4 | * it under the terms of the GNU General Public License version 2 and | ||
5 | * only version 2 as published by the Free Software Foundation. | ||
6 | * | ||
7 | * This program is distributed in the hope that it will be useful, | ||
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
10 | * GNU General Public License for more details. | ||
11 | */ | ||
12 | |||
13 | #ifndef __PMIC8XXX_PWRKEY_H__ | ||
14 | #define __PMIC8XXX_PWRKEY_H__ | ||
15 | |||
16 | #define PM8XXX_PWRKEY_DEV_NAME "pm8xxx-pwrkey" | ||
17 | |||
18 | /** | ||
19 | * struct pm8xxx_pwrkey_platform_data - platform data for pwrkey driver | ||
20 | * @pull up: power on register control for pull up/down configuration | ||
21 | * @kpd_trigger_delay_us: time delay for power key state change interrupt | ||
22 | * trigger. | ||
23 | * @wakeup: configure power key as wakeup source | ||
24 | */ | ||
25 | struct pm8xxx_pwrkey_platform_data { | ||
26 | bool pull_up; | ||
27 | u32 kpd_trigger_delay_us; | ||
28 | u32 wakeup; | ||
29 | }; | ||
30 | |||
31 | #endif /* __PMIC8XXX_PWRKEY_H__ */ | ||
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h index 906590aa6907..204f9cd26c16 100644 --- a/include/linux/ipmi_smi.h +++ b/include/linux/ipmi_smi.h | |||
@@ -236,7 +236,7 @@ static inline void ipmi_free_smi_msg(struct ipmi_smi_msg *msg) | |||
236 | directory for this interface. Note that the entry will | 236 | directory for this interface. Note that the entry will |
237 | automatically be dstroyed when the interface is destroyed. */ | 237 | automatically be dstroyed when the interface is destroyed. */ |
238 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, | 238 | int ipmi_smi_add_proc_entry(ipmi_smi_t smi, char *name, |
239 | read_proc_t *read_proc, | 239 | const struct file_operations *proc_ops, |
240 | void *data); | 240 | void *data); |
241 | 241 | ||
242 | #endif /* __LINUX_IPMI_SMI_H */ | 242 | #endif /* __LINUX_IPMI_SMI_H */ |
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h index a32dcaec04e1..4ecb7b16b278 100644 --- a/include/linux/jbd2.h +++ b/include/linux/jbd2.h | |||
@@ -529,9 +529,10 @@ struct transaction_s | |||
529 | enum { | 529 | enum { |
530 | T_RUNNING, | 530 | T_RUNNING, |
531 | T_LOCKED, | 531 | T_LOCKED, |
532 | T_RUNDOWN, | ||
533 | T_FLUSH, | 532 | T_FLUSH, |
534 | T_COMMIT, | 533 | T_COMMIT, |
534 | T_COMMIT_DFLUSH, | ||
535 | T_COMMIT_JFLUSH, | ||
535 | T_FINISHED | 536 | T_FINISHED |
536 | } t_state; | 537 | } t_state; |
537 | 538 | ||
@@ -658,7 +659,9 @@ struct transaction_s | |||
658 | * waiting for it to finish. | 659 | * waiting for it to finish. |
659 | */ | 660 | */ |
660 | unsigned int t_synchronous_commit:1; | 661 | unsigned int t_synchronous_commit:1; |
661 | unsigned int t_flushed_data_blocks:1; | 662 | |
663 | /* Disk flush needs to be sent to fs partition [no locking] */ | ||
664 | int t_need_data_flush; | ||
662 | 665 | ||
663 | /* | 666 | /* |
664 | * For use by the filesystem to store fs-specific data | 667 | * For use by the filesystem to store fs-specific data |
@@ -1228,6 +1231,7 @@ int jbd2_journal_start_commit(journal_t *journal, tid_t *tid); | |||
1228 | int jbd2_journal_force_commit_nested(journal_t *journal); | 1231 | int jbd2_journal_force_commit_nested(journal_t *journal); |
1229 | int jbd2_log_wait_commit(journal_t *journal, tid_t tid); | 1232 | int jbd2_log_wait_commit(journal_t *journal, tid_t tid); |
1230 | int jbd2_log_do_checkpoint(journal_t *journal); | 1233 | int jbd2_log_do_checkpoint(journal_t *journal); |
1234 | int jbd2_trans_will_send_data_barrier(journal_t *journal, tid_t tid); | ||
1231 | 1235 | ||
1232 | void __jbd2_log_wait_for_space(journal_t *journal); | 1236 | void __jbd2_log_wait_for_space(journal_t *journal); |
1233 | extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); | 1237 | extern void __jbd2_journal_drop_transaction(journal_t *, transaction_t *); |
diff --git a/include/linux/key.h b/include/linux/key.h index ef19b99aff98..6ea4eebd3467 100644 --- a/include/linux/key.h +++ b/include/linux/key.h | |||
@@ -9,7 +9,7 @@ | |||
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | * | 10 | * |
11 | * | 11 | * |
12 | * See Documentation/keys.txt for information on keys/keyrings. | 12 | * See Documentation/security/keys.txt for information on keys/keyrings. |
13 | */ | 13 | */ |
14 | 14 | ||
15 | #ifndef _LINUX_KEY_H | 15 | #ifndef _LINUX_KEY_H |
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h index 5e9840f50980..9724a38ee69d 100644 --- a/include/linux/memcontrol.h +++ b/include/linux/memcontrol.h | |||
@@ -20,6 +20,8 @@ | |||
20 | #ifndef _LINUX_MEMCONTROL_H | 20 | #ifndef _LINUX_MEMCONTROL_H |
21 | #define _LINUX_MEMCONTROL_H | 21 | #define _LINUX_MEMCONTROL_H |
22 | #include <linux/cgroup.h> | 22 | #include <linux/cgroup.h> |
23 | #include <linux/vm_event_item.h> | ||
24 | |||
23 | struct mem_cgroup; | 25 | struct mem_cgroup; |
24 | struct page_cgroup; | 26 | struct page_cgroup; |
25 | struct page; | 27 | struct page; |
@@ -106,9 +108,10 @@ extern void mem_cgroup_end_migration(struct mem_cgroup *mem, | |||
106 | */ | 108 | */ |
107 | int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); | 109 | int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg); |
108 | int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); | 110 | int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg); |
109 | unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, | 111 | int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); |
110 | struct zone *zone, | 112 | unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, |
111 | enum lru_list lru); | 113 | struct zone *zone, |
114 | enum lru_list lru); | ||
112 | struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, | 115 | struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, |
113 | struct zone *zone); | 116 | struct zone *zone); |
114 | struct zone_reclaim_stat* | 117 | struct zone_reclaim_stat* |
@@ -144,9 +147,11 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, | |||
144 | } | 147 | } |
145 | 148 | ||
146 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | 149 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, |
147 | gfp_t gfp_mask); | 150 | gfp_t gfp_mask, |
151 | unsigned long *total_scanned); | ||
148 | u64 mem_cgroup_get_limit(struct mem_cgroup *mem); | 152 | u64 mem_cgroup_get_limit(struct mem_cgroup *mem); |
149 | 153 | ||
154 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx); | ||
150 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | 155 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE |
151 | void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); | 156 | void mem_cgroup_split_huge_fixup(struct page *head, struct page *tail); |
152 | #endif | 157 | #endif |
@@ -302,8 +307,8 @@ mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) | |||
302 | } | 307 | } |
303 | 308 | ||
304 | static inline unsigned long | 309 | static inline unsigned long |
305 | mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, struct zone *zone, | 310 | mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, struct zone *zone, |
306 | enum lru_list lru) | 311 | enum lru_list lru) |
307 | { | 312 | { |
308 | return 0; | 313 | return 0; |
309 | } | 314 | } |
@@ -338,7 +343,8 @@ static inline void mem_cgroup_dec_page_stat(struct page *page, | |||
338 | 343 | ||
339 | static inline | 344 | static inline |
340 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | 345 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, |
341 | gfp_t gfp_mask) | 346 | gfp_t gfp_mask, |
347 | unsigned long *total_scanned) | ||
342 | { | 348 | { |
343 | return 0; | 349 | return 0; |
344 | } | 350 | } |
@@ -354,6 +360,10 @@ static inline void mem_cgroup_split_huge_fixup(struct page *head, | |||
354 | { | 360 | { |
355 | } | 361 | } |
356 | 362 | ||
363 | static inline | ||
364 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) | ||
365 | { | ||
366 | } | ||
357 | #endif /* CONFIG_CGROUP_MEM_CONT */ | 367 | #endif /* CONFIG_CGROUP_MEM_CONT */ |
358 | 368 | ||
359 | #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) | 369 | #if !defined(CONFIG_CGROUP_MEM_RES_CTLR) || !defined(CONFIG_DEBUG_VM) |
diff --git a/include/linux/mfd/88pm860x.h b/include/linux/mfd/88pm860x.h index 8fba7972ff5f..63b4fb8e3b6f 100644 --- a/include/linux/mfd/88pm860x.h +++ b/include/linux/mfd/88pm860x.h | |||
@@ -330,6 +330,11 @@ struct pm860x_led_pdata { | |||
330 | unsigned long flags; | 330 | unsigned long flags; |
331 | }; | 331 | }; |
332 | 332 | ||
333 | struct pm860x_rtc_pdata { | ||
334 | int (*sync)(unsigned int ticks); | ||
335 | int vrtc; | ||
336 | }; | ||
337 | |||
333 | struct pm860x_touch_pdata { | 338 | struct pm860x_touch_pdata { |
334 | int gpadc_prebias; | 339 | int gpadc_prebias; |
335 | int slot_cycle; | 340 | int slot_cycle; |
@@ -349,6 +354,7 @@ struct pm860x_power_pdata { | |||
349 | struct pm860x_platform_data { | 354 | struct pm860x_platform_data { |
350 | struct pm860x_backlight_pdata *backlight; | 355 | struct pm860x_backlight_pdata *backlight; |
351 | struct pm860x_led_pdata *led; | 356 | struct pm860x_led_pdata *led; |
357 | struct pm860x_rtc_pdata *rtc; | ||
352 | struct pm860x_touch_pdata *touch; | 358 | struct pm860x_touch_pdata *touch; |
353 | struct pm860x_power_pdata *power; | 359 | struct pm860x_power_pdata *power; |
354 | struct regulator_init_data *regulator; | 360 | struct regulator_init_data *regulator; |
diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h index 7d9b6ae1c203..896b5e47f16e 100644 --- a/include/linux/mfd/abx500.h +++ b/include/linux/mfd/abx500.h | |||
@@ -34,6 +34,13 @@ | |||
34 | #define AB5500_2_0 0x21 | 34 | #define AB5500_2_0 0x21 |
35 | #define AB5500_2_1 0x22 | 35 | #define AB5500_2_1 0x22 |
36 | 36 | ||
37 | /* AB8500 CIDs*/ | ||
38 | #define AB8500_CUTEARLY 0x00 | ||
39 | #define AB8500_CUT1P0 0x10 | ||
40 | #define AB8500_CUT1P1 0x11 | ||
41 | #define AB8500_CUT2P0 0x20 | ||
42 | #define AB8500_CUT3P0 0x30 | ||
43 | |||
37 | /* | 44 | /* |
38 | * AB3100, EVENTA1, A2 and A3 event register flags | 45 | * AB3100, EVENTA1, A2 and A3 event register flags |
39 | * these are catenated into a single 32-bit flag in the code | 46 | * these are catenated into a single 32-bit flag in the code |
@@ -186,6 +193,7 @@ struct abx500_init_settings { | |||
186 | struct ab3550_platform_data { | 193 | struct ab3550_platform_data { |
187 | struct {unsigned int base; unsigned int count; } irq; | 194 | struct {unsigned int base; unsigned int count; } irq; |
188 | void *dev_data[AB3550_NUM_DEVICES]; | 195 | void *dev_data[AB3550_NUM_DEVICES]; |
196 | size_t dev_data_sz[AB3550_NUM_DEVICES]; | ||
189 | struct abx500_init_settings *init_settings; | 197 | struct abx500_init_settings *init_settings; |
190 | unsigned int init_settings_sz; | 198 | unsigned int init_settings_sz; |
191 | }; | 199 | }; |
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h index de3c4ad19afb..ed793b77a1c5 100644 --- a/include/linux/mfd/asic3.h +++ b/include/linux/mfd/asic3.h | |||
@@ -16,6 +16,13 @@ | |||
16 | 16 | ||
17 | #include <linux/types.h> | 17 | #include <linux/types.h> |
18 | 18 | ||
19 | struct led_classdev; | ||
20 | struct asic3_led { | ||
21 | const char *name; | ||
22 | const char *default_trigger; | ||
23 | struct led_classdev *cdev; | ||
24 | }; | ||
25 | |||
19 | struct asic3_platform_data { | 26 | struct asic3_platform_data { |
20 | u16 *gpio_config; | 27 | u16 *gpio_config; |
21 | unsigned int gpio_config_num; | 28 | unsigned int gpio_config_num; |
@@ -23,6 +30,8 @@ struct asic3_platform_data { | |||
23 | unsigned int irq_base; | 30 | unsigned int irq_base; |
24 | 31 | ||
25 | unsigned int gpio_base; | 32 | unsigned int gpio_base; |
33 | |||
34 | struct asic3_led *leds; | ||
26 | }; | 35 | }; |
27 | 36 | ||
28 | #define ASIC3_NUM_GPIO_BANKS 4 | 37 | #define ASIC3_NUM_GPIO_BANKS 4 |
@@ -111,9 +120,9 @@ struct asic3_platform_data { | |||
111 | #define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0) | 120 | #define ASIC3_GPIOA11_PWM0 ASIC3_CONFIG_GPIO(11, 1, 1, 0) |
112 | #define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0) | 121 | #define ASIC3_GPIOA12_PWM1 ASIC3_CONFIG_GPIO(12, 1, 1, 0) |
113 | #define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0) | 122 | #define ASIC3_GPIOA15_CONTROL_CX ASIC3_CONFIG_GPIO(15, 1, 1, 0) |
114 | #define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 1, 0) | 123 | #define ASIC3_GPIOC0_LED0 ASIC3_CONFIG_GPIO(32, 1, 0, 0) |
115 | #define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 1, 0) | 124 | #define ASIC3_GPIOC1_LED1 ASIC3_CONFIG_GPIO(33, 1, 0, 0) |
116 | #define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 1, 0) | 125 | #define ASIC3_GPIOC2_LED2 ASIC3_CONFIG_GPIO(34, 1, 0, 0) |
117 | #define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0) | 126 | #define ASIC3_GPIOC3_SPI_RXD ASIC3_CONFIG_GPIO(35, 1, 0, 0) |
118 | #define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0) | 127 | #define ASIC3_GPIOC4_CF_nCD ASIC3_CONFIG_GPIO(36, 1, 0, 0) |
119 | #define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0) | 128 | #define ASIC3_GPIOC4_SPI_TXD ASIC3_CONFIG_GPIO(36, 1, 1, 0) |
@@ -152,6 +161,7 @@ struct asic3_platform_data { | |||
152 | #define PWM_TIMEBASE_VALUE(x) ((x)&0xf) /* Low 4 bits sets time base */ | 161 | #define PWM_TIMEBASE_VALUE(x) ((x)&0xf) /* Low 4 bits sets time base */ |
153 | #define PWM_TIMEBASE_ENABLE (1 << 4) /* Enable clock */ | 162 | #define PWM_TIMEBASE_ENABLE (1 << 4) /* Enable clock */ |
154 | 163 | ||
164 | #define ASIC3_NUM_LEDS 3 | ||
155 | #define ASIC3_LED_0_Base 0x0700 | 165 | #define ASIC3_LED_0_Base 0x0700 |
156 | #define ASIC3_LED_1_Base 0x0800 | 166 | #define ASIC3_LED_1_Base 0x0800 |
157 | #define ASIC3_LED_2_Base 0x0900 | 167 | #define ASIC3_LED_2_Base 0x0900 |
@@ -287,10 +297,17 @@ struct asic3_platform_data { | |||
287 | * | 297 | * |
288 | *****************************************************************************/ | 298 | *****************************************************************************/ |
289 | #define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */ | 299 | #define ASIC3_SD_CONFIG_BASE 0x0400 /* Assumes 32 bit addressing */ |
300 | #define ASIC3_SD_CONFIG_SIZE 0x0200 /* Assumes 32 bit addressing */ | ||
290 | #define ASIC3_SD_CTRL_BASE 0x1000 | 301 | #define ASIC3_SD_CTRL_BASE 0x1000 |
291 | #define ASIC3_SDIO_CTRL_BASE 0x1200 | 302 | #define ASIC3_SDIO_CTRL_BASE 0x1200 |
292 | 303 | ||
293 | #define ASIC3_MAP_SIZE_32BIT 0x2000 | 304 | #define ASIC3_MAP_SIZE_32BIT 0x2000 |
294 | #define ASIC3_MAP_SIZE_16BIT 0x1000 | 305 | #define ASIC3_MAP_SIZE_16BIT 0x1000 |
295 | 306 | ||
307 | /* Functions needed by leds-asic3 */ | ||
308 | |||
309 | struct asic3; | ||
310 | extern void asic3_write_register(struct asic3 *asic, unsigned int reg, u32 val); | ||
311 | extern u32 asic3_read_register(struct asic3 *asic, unsigned int reg); | ||
312 | |||
296 | #endif /* __ASIC3_H__ */ | 313 | #endif /* __ASIC3_H__ */ |
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h index aef23309a742..4e76163dd862 100644 --- a/include/linux/mfd/core.h +++ b/include/linux/mfd/core.h | |||
@@ -33,8 +33,9 @@ struct mfd_cell { | |||
33 | int (*suspend)(struct platform_device *dev); | 33 | int (*suspend)(struct platform_device *dev); |
34 | int (*resume)(struct platform_device *dev); | 34 | int (*resume)(struct platform_device *dev); |
35 | 35 | ||
36 | /* mfd_data can be used to pass data to client drivers */ | 36 | /* platform data passed to the sub devices drivers */ |
37 | void *mfd_data; | 37 | void *platform_data; |
38 | size_t pdata_size; | ||
38 | 39 | ||
39 | /* | 40 | /* |
40 | * These resources can be specified relative to the parent device. | 41 | * These resources can be specified relative to the parent device. |
@@ -89,24 +90,6 @@ static inline const struct mfd_cell *mfd_get_cell(struct platform_device *pdev) | |||
89 | return pdev->mfd_cell; | 90 | return pdev->mfd_cell; |
90 | } | 91 | } |
91 | 92 | ||
92 | /* | ||
93 | * Given a platform device that's been created by mfd_add_devices(), fetch | ||
94 | * the .mfd_data entry from the mfd_cell that created it. | ||
95 | * Otherwise just return the platform_data pointer. | ||
96 | * This maintains compatibility with platform drivers whose devices aren't | ||
97 | * created by the mfd layer, and expect platform_data to contain what would've | ||
98 | * otherwise been in mfd_data. | ||
99 | */ | ||
100 | static inline void *mfd_get_data(struct platform_device *pdev) | ||
101 | { | ||
102 | const struct mfd_cell *cell = mfd_get_cell(pdev); | ||
103 | |||
104 | if (cell) | ||
105 | return cell->mfd_data; | ||
106 | else | ||
107 | return pdev->dev.platform_data; | ||
108 | } | ||
109 | |||
110 | extern int mfd_add_devices(struct device *parent, int id, | 93 | extern int mfd_add_devices(struct device *parent, int id, |
111 | struct mfd_cell *cells, int n_devs, | 94 | struct mfd_cell *cells, int n_devs, |
112 | struct resource *mem_base, | 95 | struct resource *mem_base, |
diff --git a/include/linux/mfd/max8997-private.h b/include/linux/mfd/max8997-private.h index 69d1010e2e51..5ff2400ad46c 100644 --- a/include/linux/mfd/max8997-private.h +++ b/include/linux/mfd/max8997-private.h | |||
@@ -311,10 +311,6 @@ enum max8997_irq { | |||
311 | MAX8997_IRQ_NR, | 311 | MAX8997_IRQ_NR, |
312 | }; | 312 | }; |
313 | 313 | ||
314 | #define MAX8997_REG_BUCK1DVS(x) (MAX8997_REG_BUCK1DVS1 + (x) - 1) | ||
315 | #define MAX8997_REG_BUCK2DVS(x) (MAX8997_REG_BUCK2DVS1 + (x) - 1) | ||
316 | #define MAX8997_REG_BUCK5DVS(x) (MAX8997_REG_BUCK5DVS1 + (x) - 1) | ||
317 | |||
318 | #define MAX8997_NUM_GPIO 12 | 314 | #define MAX8997_NUM_GPIO 12 |
319 | struct max8997_dev { | 315 | struct max8997_dev { |
320 | struct device *dev; | 316 | struct device *dev; |
diff --git a/include/linux/mfd/pm8xxx/core.h b/include/linux/mfd/pm8xxx/core.h new file mode 100644 index 000000000000..bd2f4f64e931 --- /dev/null +++ b/include/linux/mfd/pm8xxx/core.h | |||
@@ -0,0 +1,81 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011, Code Aurora Forum. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | /* | ||
14 | * Qualcomm PMIC 8xxx driver header file | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #ifndef __MFD_PM8XXX_CORE_H | ||
19 | #define __MFD_PM8XXX_CORE_H | ||
20 | |||
21 | #include <linux/mfd/core.h> | ||
22 | |||
23 | struct pm8xxx_drvdata { | ||
24 | int (*pmic_readb) (const struct device *dev, u16 addr, u8 *val); | ||
25 | int (*pmic_writeb) (const struct device *dev, u16 addr, u8 val); | ||
26 | int (*pmic_read_buf) (const struct device *dev, u16 addr, u8 *buf, | ||
27 | int n); | ||
28 | int (*pmic_write_buf) (const struct device *dev, u16 addr, u8 *buf, | ||
29 | int n); | ||
30 | int (*pmic_read_irq_stat) (const struct device *dev, int irq); | ||
31 | void *pm_chip_data; | ||
32 | }; | ||
33 | |||
34 | static inline int pm8xxx_readb(const struct device *dev, u16 addr, u8 *val) | ||
35 | { | ||
36 | struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); | ||
37 | |||
38 | if (!dd) | ||
39 | return -EINVAL; | ||
40 | return dd->pmic_readb(dev, addr, val); | ||
41 | } | ||
42 | |||
43 | static inline int pm8xxx_writeb(const struct device *dev, u16 addr, u8 val) | ||
44 | { | ||
45 | struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); | ||
46 | |||
47 | if (!dd) | ||
48 | return -EINVAL; | ||
49 | return dd->pmic_writeb(dev, addr, val); | ||
50 | } | ||
51 | |||
52 | static inline int pm8xxx_read_buf(const struct device *dev, u16 addr, u8 *buf, | ||
53 | int n) | ||
54 | { | ||
55 | struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); | ||
56 | |||
57 | if (!dd) | ||
58 | return -EINVAL; | ||
59 | return dd->pmic_read_buf(dev, addr, buf, n); | ||
60 | } | ||
61 | |||
62 | static inline int pm8xxx_write_buf(const struct device *dev, u16 addr, u8 *buf, | ||
63 | int n) | ||
64 | { | ||
65 | struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); | ||
66 | |||
67 | if (!dd) | ||
68 | return -EINVAL; | ||
69 | return dd->pmic_write_buf(dev, addr, buf, n); | ||
70 | } | ||
71 | |||
72 | static inline int pm8xxx_read_irq_stat(const struct device *dev, int irq) | ||
73 | { | ||
74 | struct pm8xxx_drvdata *dd = dev_get_drvdata(dev); | ||
75 | |||
76 | if (!dd) | ||
77 | return -EINVAL; | ||
78 | return dd->pmic_read_irq_stat(dev, irq); | ||
79 | } | ||
80 | |||
81 | #endif | ||
diff --git a/include/linux/mfd/pm8xxx/irq.h b/include/linux/mfd/pm8xxx/irq.h new file mode 100644 index 000000000000..4b21769f4483 --- /dev/null +++ b/include/linux/mfd/pm8xxx/irq.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011, Code Aurora Forum. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | /* | ||
14 | * Qualcomm PMIC irq 8xxx driver header file | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #ifndef __MFD_PM8XXX_IRQ_H | ||
19 | #define __MFD_PM8XXX_IRQ_H | ||
20 | |||
21 | #include <linux/errno.h> | ||
22 | #include <linux/err.h> | ||
23 | |||
24 | struct pm8xxx_irq_core_data { | ||
25 | u32 rev; | ||
26 | int nirqs; | ||
27 | }; | ||
28 | |||
29 | struct pm8xxx_irq_platform_data { | ||
30 | int irq_base; | ||
31 | struct pm8xxx_irq_core_data irq_cdata; | ||
32 | int devirq; | ||
33 | int irq_trigger_flag; | ||
34 | }; | ||
35 | |||
36 | struct pm_irq_chip; | ||
37 | |||
38 | #ifdef CONFIG_MFD_PM8XXX_IRQ | ||
39 | int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq); | ||
40 | struct pm_irq_chip * __devinit pm8xxx_irq_init(struct device *dev, | ||
41 | const struct pm8xxx_irq_platform_data *pdata); | ||
42 | int __devexit pm8xxx_irq_exit(struct pm_irq_chip *chip); | ||
43 | #else | ||
44 | static inline int pm8xxx_get_irq_stat(struct pm_irq_chip *chip, int irq) | ||
45 | { | ||
46 | return -ENXIO; | ||
47 | } | ||
48 | static inline struct pm_irq_chip * __devinit pm8xxx_irq_init( | ||
49 | const struct device *dev, | ||
50 | const struct pm8xxx_irq_platform_data *pdata) | ||
51 | { | ||
52 | return ERR_PTR(-ENXIO); | ||
53 | } | ||
54 | static inline int __devexit pm8xxx_irq_exit(struct pm_irq_chip *chip) | ||
55 | { | ||
56 | return -ENXIO; | ||
57 | } | ||
58 | #endif /* CONFIG_MFD_PM8XXX_IRQ */ | ||
59 | #endif /* __MFD_PM8XXX_IRQ_H */ | ||
diff --git a/include/linux/mfd/pm8xxx/pm8921.h b/include/linux/mfd/pm8xxx/pm8921.h new file mode 100644 index 000000000000..d5517fd32d1b --- /dev/null +++ b/include/linux/mfd/pm8xxx/pm8921.h | |||
@@ -0,0 +1,31 @@ | |||
1 | /* | ||
2 | * Copyright (c) 2011, Code Aurora Forum. All rights reserved. | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 and | ||
6 | * only version 2 as published by the Free Software Foundation. | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, | ||
9 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
11 | * GNU General Public License for more details. | ||
12 | */ | ||
13 | /* | ||
14 | * Qualcomm PMIC 8921 driver header file | ||
15 | * | ||
16 | */ | ||
17 | |||
18 | #ifndef __MFD_PM8921_H | ||
19 | #define __MFD_PM8921_H | ||
20 | |||
21 | #include <linux/device.h> | ||
22 | #include <linux/mfd/pm8xxx/irq.h> | ||
23 | |||
24 | #define PM8921_NR_IRQS 256 | ||
25 | |||
26 | struct pm8921_platform_data { | ||
27 | int irq_base; | ||
28 | struct pm8xxx_irq_platform_data *irq_pdata; | ||
29 | }; | ||
30 | |||
31 | #endif | ||
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h new file mode 100644 index 000000000000..8bb85b930c07 --- /dev/null +++ b/include/linux/mfd/tps65910.h | |||
@@ -0,0 +1,800 @@ | |||
1 | /* | ||
2 | * tps65910.h -- TI TPS6591x | ||
3 | * | ||
4 | * Copyright 2010-2011 Texas Instruments Inc. | ||
5 | * | ||
6 | * Author: Graeme Gregory <gg@slimlogic.co.uk> | ||
7 | * Author: Jorge Eduardo Candelaria <jedu@slimlogic.co.uk> | ||
8 | * Author: Arnaud Deconinck <a-deconinck@ti.com> | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify it | ||
11 | * under the terms of the GNU General Public License as published by the | ||
12 | * Free Software Foundation; either version 2 of the License, or (at your | ||
13 | * option) any later version. | ||
14 | * | ||
15 | */ | ||
16 | |||
17 | #ifndef __LINUX_MFD_TPS65910_H | ||
18 | #define __LINUX_MFD_TPS65910_H | ||
19 | |||
20 | /* TPS chip id list */ | ||
21 | #define TPS65910 0 | ||
22 | #define TPS65911 1 | ||
23 | |||
24 | /* TPS regulator type list */ | ||
25 | #define REGULATOR_LDO 0 | ||
26 | #define REGULATOR_DCDC 1 | ||
27 | |||
28 | /* | ||
29 | * List of registers for component TPS65910 | ||
30 | * | ||
31 | */ | ||
32 | |||
33 | #define TPS65910_SECONDS 0x0 | ||
34 | #define TPS65910_MINUTES 0x1 | ||
35 | #define TPS65910_HOURS 0x2 | ||
36 | #define TPS65910_DAYS 0x3 | ||
37 | #define TPS65910_MONTHS 0x4 | ||
38 | #define TPS65910_YEARS 0x5 | ||
39 | #define TPS65910_WEEKS 0x6 | ||
40 | #define TPS65910_ALARM_SECONDS 0x8 | ||
41 | #define TPS65910_ALARM_MINUTES 0x9 | ||
42 | #define TPS65910_ALARM_HOURS 0xA | ||
43 | #define TPS65910_ALARM_DAYS 0xB | ||
44 | #define TPS65910_ALARM_MONTHS 0xC | ||
45 | #define TPS65910_ALARM_YEARS 0xD | ||
46 | #define TPS65910_RTC_CTRL 0x10 | ||
47 | #define TPS65910_RTC_STATUS 0x11 | ||
48 | #define TPS65910_RTC_INTERRUPTS 0x12 | ||
49 | #define TPS65910_RTC_COMP_LSB 0x13 | ||
50 | #define TPS65910_RTC_COMP_MSB 0x14 | ||
51 | #define TPS65910_RTC_RES_PROG 0x15 | ||
52 | #define TPS65910_RTC_RESET_STATUS 0x16 | ||
53 | #define TPS65910_BCK1 0x17 | ||
54 | #define TPS65910_BCK2 0x18 | ||
55 | #define TPS65910_BCK3 0x19 | ||
56 | #define TPS65910_BCK4 0x1A | ||
57 | #define TPS65910_BCK5 0x1B | ||
58 | #define TPS65910_PUADEN 0x1C | ||
59 | #define TPS65910_REF 0x1D | ||
60 | #define TPS65910_VRTC 0x1E | ||
61 | #define TPS65910_VIO 0x20 | ||
62 | #define TPS65910_VDD1 0x21 | ||
63 | #define TPS65910_VDD1_OP 0x22 | ||
64 | #define TPS65910_VDD1_SR 0x23 | ||
65 | #define TPS65910_VDD2 0x24 | ||
66 | #define TPS65910_VDD2_OP 0x25 | ||
67 | #define TPS65910_VDD2_SR 0x26 | ||
68 | #define TPS65910_VDD3 0x27 | ||
69 | #define TPS65910_VDIG1 0x30 | ||
70 | #define TPS65910_VDIG2 0x31 | ||
71 | #define TPS65910_VAUX1 0x32 | ||
72 | #define TPS65910_VAUX2 0x33 | ||
73 | #define TPS65910_VAUX33 0x34 | ||
74 | #define TPS65910_VMMC 0x35 | ||
75 | #define TPS65910_VPLL 0x36 | ||
76 | #define TPS65910_VDAC 0x37 | ||
77 | #define TPS65910_THERM 0x38 | ||
78 | #define TPS65910_BBCH 0x39 | ||
79 | #define TPS65910_DCDCCTRL 0x3E | ||
80 | #define TPS65910_DEVCTRL 0x3F | ||
81 | #define TPS65910_DEVCTRL2 0x40 | ||
82 | #define TPS65910_SLEEP_KEEP_LDO_ON 0x41 | ||
83 | #define TPS65910_SLEEP_KEEP_RES_ON 0x42 | ||
84 | #define TPS65910_SLEEP_SET_LDO_OFF 0x43 | ||
85 | #define TPS65910_SLEEP_SET_RES_OFF 0x44 | ||
86 | #define TPS65910_EN1_LDO_ASS 0x45 | ||
87 | #define TPS65910_EN1_SMPS_ASS 0x46 | ||
88 | #define TPS65910_EN2_LDO_ASS 0x47 | ||
89 | #define TPS65910_EN2_SMPS_ASS 0x48 | ||
90 | #define TPS65910_EN3_LDO_ASS 0x49 | ||
91 | #define TPS65910_SPARE 0x4A | ||
92 | #define TPS65910_INT_STS 0x50 | ||
93 | #define TPS65910_INT_MSK 0x51 | ||
94 | #define TPS65910_INT_STS2 0x52 | ||
95 | #define TPS65910_INT_MSK2 0x53 | ||
96 | #define TPS65910_INT_STS3 0x54 | ||
97 | #define TPS65910_INT_MSK3 0x55 | ||
98 | #define TPS65910_GPIO0 0x60 | ||
99 | #define TPS65910_GPIO1 0x61 | ||
100 | #define TPS65910_GPIO2 0x62 | ||
101 | #define TPS65910_GPIO3 0x63 | ||
102 | #define TPS65910_GPIO4 0x64 | ||
103 | #define TPS65910_GPIO5 0x65 | ||
104 | #define TPS65910_GPIO6 0x66 | ||
105 | #define TPS65910_GPIO7 0x67 | ||
106 | #define TPS65910_GPIO8 0x68 | ||
107 | #define TPS65910_JTAGVERNUM 0x80 | ||
108 | #define TPS65910_MAX_REGISTER 0x80 | ||
109 | |||
110 | /* | ||
111 | * List of registers specific to TPS65911 | ||
112 | */ | ||
113 | #define TPS65911_VDDCTRL 0x27 | ||
114 | #define TPS65911_VDDCTRL_OP 0x28 | ||
115 | #define TPS65911_VDDCTRL_SR 0x29 | ||
116 | #define TPS65911_LDO1 0x30 | ||
117 | #define TPS65911_LDO2 0x31 | ||
118 | #define TPS65911_LDO5 0x32 | ||
119 | #define TPS65911_LDO8 0x33 | ||
120 | #define TPS65911_LDO7 0x34 | ||
121 | #define TPS65911_LDO6 0x35 | ||
122 | #define TPS65911_LDO4 0x36 | ||
123 | #define TPS65911_LDO3 0x37 | ||
124 | #define TPS65911_VMBCH 0x6A | ||
125 | #define TPS65911_VMBCH2 0x6B | ||
126 | |||
127 | /* | ||
128 | * List of register bitfields for component TPS65910 | ||
129 | * | ||
130 | */ | ||
131 | |||
132 | |||
133 | /*Register BCK1 (0x80) register.RegisterDescription */ | ||
134 | #define BCK1_BCKUP_MASK 0xFF | ||
135 | #define BCK1_BCKUP_SHIFT 0 | ||
136 | |||
137 | |||
138 | /*Register BCK2 (0x80) register.RegisterDescription */ | ||
139 | #define BCK2_BCKUP_MASK 0xFF | ||
140 | #define BCK2_BCKUP_SHIFT 0 | ||
141 | |||
142 | |||
143 | /*Register BCK3 (0x80) register.RegisterDescription */ | ||
144 | #define BCK3_BCKUP_MASK 0xFF | ||
145 | #define BCK3_BCKUP_SHIFT 0 | ||
146 | |||
147 | |||
148 | /*Register BCK4 (0x80) register.RegisterDescription */ | ||
149 | #define BCK4_BCKUP_MASK 0xFF | ||
150 | #define BCK4_BCKUP_SHIFT 0 | ||
151 | |||
152 | |||
153 | /*Register BCK5 (0x80) register.RegisterDescription */ | ||
154 | #define BCK5_BCKUP_MASK 0xFF | ||
155 | #define BCK5_BCKUP_SHIFT 0 | ||
156 | |||
157 | |||
158 | /*Register PUADEN (0x80) register.RegisterDescription */ | ||
159 | #define PUADEN_EN3P_MASK 0x80 | ||
160 | #define PUADEN_EN3P_SHIFT 7 | ||
161 | #define PUADEN_I2CCTLP_MASK 0x40 | ||
162 | #define PUADEN_I2CCTLP_SHIFT 6 | ||
163 | #define PUADEN_I2CSRP_MASK 0x20 | ||
164 | #define PUADEN_I2CSRP_SHIFT 5 | ||
165 | #define PUADEN_PWRONP_MASK 0x10 | ||
166 | #define PUADEN_PWRONP_SHIFT 4 | ||
167 | #define PUADEN_SLEEPP_MASK 0x08 | ||
168 | #define PUADEN_SLEEPP_SHIFT 3 | ||
169 | #define PUADEN_PWRHOLDP_MASK 0x04 | ||
170 | #define PUADEN_PWRHOLDP_SHIFT 2 | ||
171 | #define PUADEN_BOOT1P_MASK 0x02 | ||
172 | #define PUADEN_BOOT1P_SHIFT 1 | ||
173 | #define PUADEN_BOOT0P_MASK 0x01 | ||
174 | #define PUADEN_BOOT0P_SHIFT 0 | ||
175 | |||
176 | |||
177 | /*Register REF (0x80) register.RegisterDescription */ | ||
178 | #define REF_VMBCH_SEL_MASK 0x0C | ||
179 | #define REF_VMBCH_SEL_SHIFT 2 | ||
180 | #define REF_ST_MASK 0x03 | ||
181 | #define REF_ST_SHIFT 0 | ||
182 | |||
183 | |||
184 | /*Register VRTC (0x80) register.RegisterDescription */ | ||
185 | #define VRTC_VRTC_OFFMASK_MASK 0x08 | ||
186 | #define VRTC_VRTC_OFFMASK_SHIFT 3 | ||
187 | #define VRTC_ST_MASK 0x03 | ||
188 | #define VRTC_ST_SHIFT 0 | ||
189 | |||
190 | |||
191 | /*Register VIO (0x80) register.RegisterDescription */ | ||
192 | #define VIO_ILMAX_MASK 0xC0 | ||
193 | #define VIO_ILMAX_SHIFT 6 | ||
194 | #define VIO_SEL_MASK 0x0C | ||
195 | #define VIO_SEL_SHIFT 2 | ||
196 | #define VIO_ST_MASK 0x03 | ||
197 | #define VIO_ST_SHIFT 0 | ||
198 | |||
199 | |||
200 | /*Register VDD1 (0x80) register.RegisterDescription */ | ||
201 | #define VDD1_VGAIN_SEL_MASK 0xC0 | ||
202 | #define VDD1_VGAIN_SEL_SHIFT 6 | ||
203 | #define VDD1_ILMAX_MASK 0x20 | ||
204 | #define VDD1_ILMAX_SHIFT 5 | ||
205 | #define VDD1_TSTEP_MASK 0x1C | ||
206 | #define VDD1_TSTEP_SHIFT 2 | ||
207 | #define VDD1_ST_MASK 0x03 | ||
208 | #define VDD1_ST_SHIFT 0 | ||
209 | |||
210 | |||
211 | /*Register VDD1_OP (0x80) register.RegisterDescription */ | ||
212 | #define VDD1_OP_CMD_MASK 0x80 | ||
213 | #define VDD1_OP_CMD_SHIFT 7 | ||
214 | #define VDD1_OP_SEL_MASK 0x7F | ||
215 | #define VDD1_OP_SEL_SHIFT 0 | ||
216 | |||
217 | |||
218 | /*Register VDD1_SR (0x80) register.RegisterDescription */ | ||
219 | #define VDD1_SR_SEL_MASK 0x7F | ||
220 | #define VDD1_SR_SEL_SHIFT 0 | ||
221 | |||
222 | |||
223 | /*Register VDD2 (0x80) register.RegisterDescription */ | ||
224 | #define VDD2_VGAIN_SEL_MASK 0xC0 | ||
225 | #define VDD2_VGAIN_SEL_SHIFT 6 | ||
226 | #define VDD2_ILMAX_MASK 0x20 | ||
227 | #define VDD2_ILMAX_SHIFT 5 | ||
228 | #define VDD2_TSTEP_MASK 0x1C | ||
229 | #define VDD2_TSTEP_SHIFT 2 | ||
230 | #define VDD2_ST_MASK 0x03 | ||
231 | #define VDD2_ST_SHIFT 0 | ||
232 | |||
233 | |||
234 | /*Register VDD2_OP (0x80) register.RegisterDescription */ | ||
235 | #define VDD2_OP_CMD_MASK 0x80 | ||
236 | #define VDD2_OP_CMD_SHIFT 7 | ||
237 | #define VDD2_OP_SEL_MASK 0x7F | ||
238 | #define VDD2_OP_SEL_SHIFT 0 | ||
239 | |||
240 | /*Register VDD2_SR (0x80) register.RegisterDescription */ | ||
241 | #define VDD2_SR_SEL_MASK 0x7F | ||
242 | #define VDD2_SR_SEL_SHIFT 0 | ||
243 | |||
244 | |||
245 | /*Registers VDD1, VDD2 voltage values definitions */ | ||
246 | #define VDD1_2_NUM_VOLTS 73 | ||
247 | #define VDD1_2_MIN_VOLT 6000 | ||
248 | #define VDD1_2_OFFSET 125 | ||
249 | |||
250 | |||
251 | /*Register VDD3 (0x80) register.RegisterDescription */ | ||
252 | #define VDD3_CKINEN_MASK 0x04 | ||
253 | #define VDD3_CKINEN_SHIFT 2 | ||
254 | #define VDD3_ST_MASK 0x03 | ||
255 | #define VDD3_ST_SHIFT 0 | ||
256 | #define VDDCTRL_MIN_VOLT 6000 | ||
257 | #define VDDCTRL_OFFSET 125 | ||
258 | |||
259 | /*Registers VDIG (0x80) to VDAC register.RegisterDescription */ | ||
260 | #define LDO_SEL_MASK 0x0C | ||
261 | #define LDO_SEL_SHIFT 2 | ||
262 | #define LDO_ST_MASK 0x03 | ||
263 | #define LDO_ST_SHIFT 0 | ||
264 | #define LDO_ST_ON_BIT 0x01 | ||
265 | #define LDO_ST_MODE_BIT 0x02 | ||
266 | |||
267 | |||
268 | /* Registers LDO1 to LDO8 in tps65910 */ | ||
269 | #define LDO1_SEL_MASK 0xFC | ||
270 | #define LDO3_SEL_MASK 0x7C | ||
271 | #define LDO_MIN_VOLT 1000 | ||
272 | #define LDO_MAX_VOLT 3300; | ||
273 | |||
274 | |||
275 | /*Register VDIG1 (0x80) register.RegisterDescription */ | ||
276 | #define VDIG1_SEL_MASK 0x0C | ||
277 | #define VDIG1_SEL_SHIFT 2 | ||
278 | #define VDIG1_ST_MASK 0x03 | ||
279 | #define VDIG1_ST_SHIFT 0 | ||
280 | |||
281 | |||
282 | /*Register VDIG2 (0x80) register.RegisterDescription */ | ||
283 | #define VDIG2_SEL_MASK 0x0C | ||
284 | #define VDIG2_SEL_SHIFT 2 | ||
285 | #define VDIG2_ST_MASK 0x03 | ||
286 | #define VDIG2_ST_SHIFT 0 | ||
287 | |||
288 | |||
289 | /*Register VAUX1 (0x80) register.RegisterDescription */ | ||
290 | #define VAUX1_SEL_MASK 0x0C | ||
291 | #define VAUX1_SEL_SHIFT 2 | ||
292 | #define VAUX1_ST_MASK 0x03 | ||
293 | #define VAUX1_ST_SHIFT 0 | ||
294 | |||
295 | |||
296 | /*Register VAUX2 (0x80) register.RegisterDescription */ | ||
297 | #define VAUX2_SEL_MASK 0x0C | ||
298 | #define VAUX2_SEL_SHIFT 2 | ||
299 | #define VAUX2_ST_MASK 0x03 | ||
300 | #define VAUX2_ST_SHIFT 0 | ||
301 | |||
302 | |||
303 | /*Register VAUX33 (0x80) register.RegisterDescription */ | ||
304 | #define VAUX33_SEL_MASK 0x0C | ||
305 | #define VAUX33_SEL_SHIFT 2 | ||
306 | #define VAUX33_ST_MASK 0x03 | ||
307 | #define VAUX33_ST_SHIFT 0 | ||
308 | |||
309 | |||
310 | /*Register VMMC (0x80) register.RegisterDescription */ | ||
311 | #define VMMC_SEL_MASK 0x0C | ||
312 | #define VMMC_SEL_SHIFT 2 | ||
313 | #define VMMC_ST_MASK 0x03 | ||
314 | #define VMMC_ST_SHIFT 0 | ||
315 | |||
316 | |||
317 | /*Register VPLL (0x80) register.RegisterDescription */ | ||
318 | #define VPLL_SEL_MASK 0x0C | ||
319 | #define VPLL_SEL_SHIFT 2 | ||
320 | #define VPLL_ST_MASK 0x03 | ||
321 | #define VPLL_ST_SHIFT 0 | ||
322 | |||
323 | |||
324 | /*Register VDAC (0x80) register.RegisterDescription */ | ||
325 | #define VDAC_SEL_MASK 0x0C | ||
326 | #define VDAC_SEL_SHIFT 2 | ||
327 | #define VDAC_ST_MASK 0x03 | ||
328 | #define VDAC_ST_SHIFT 0 | ||
329 | |||
330 | |||
331 | /*Register THERM (0x80) register.RegisterDescription */ | ||
332 | #define THERM_THERM_HD_MASK 0x20 | ||
333 | #define THERM_THERM_HD_SHIFT 5 | ||
334 | #define THERM_THERM_TS_MASK 0x10 | ||
335 | #define THERM_THERM_TS_SHIFT 4 | ||
336 | #define THERM_THERM_HDSEL_MASK 0x0C | ||
337 | #define THERM_THERM_HDSEL_SHIFT 2 | ||
338 | #define THERM_RSVD1_MASK 0x02 | ||
339 | #define THERM_RSVD1_SHIFT 1 | ||
340 | #define THERM_THERM_STATE_MASK 0x01 | ||
341 | #define THERM_THERM_STATE_SHIFT 0 | ||
342 | |||
343 | |||
344 | /*Register BBCH (0x80) register.RegisterDescription */ | ||
345 | #define BBCH_BBSEL_MASK 0x06 | ||
346 | #define BBCH_BBSEL_SHIFT 1 | ||
347 | #define BBCH_BBCHEN_MASK 0x01 | ||
348 | #define BBCH_BBCHEN_SHIFT 0 | ||
349 | |||
350 | |||
351 | /*Register DCDCCTRL (0x80) register.RegisterDescription */ | ||
352 | #define DCDCCTRL_VDD2_PSKIP_MASK 0x20 | ||
353 | #define DCDCCTRL_VDD2_PSKIP_SHIFT 5 | ||
354 | #define DCDCCTRL_VDD1_PSKIP_MASK 0x10 | ||
355 | #define DCDCCTRL_VDD1_PSKIP_SHIFT 4 | ||
356 | #define DCDCCTRL_VIO_PSKIP_MASK 0x08 | ||
357 | #define DCDCCTRL_VIO_PSKIP_SHIFT 3 | ||
358 | #define DCDCCTRL_DCDCCKEXT_MASK 0x04 | ||
359 | #define DCDCCTRL_DCDCCKEXT_SHIFT 2 | ||
360 | #define DCDCCTRL_DCDCCKSYNC_MASK 0x03 | ||
361 | #define DCDCCTRL_DCDCCKSYNC_SHIFT 0 | ||
362 | |||
363 | |||
364 | /*Register DEVCTRL (0x80) register.RegisterDescription */ | ||
365 | #define DEVCTRL_RTC_PWDN_MASK 0x40 | ||
366 | #define DEVCTRL_RTC_PWDN_SHIFT 6 | ||
367 | #define DEVCTRL_CK32K_CTRL_MASK 0x20 | ||
368 | #define DEVCTRL_CK32K_CTRL_SHIFT 5 | ||
369 | #define DEVCTRL_SR_CTL_I2C_SEL_MASK 0x10 | ||
370 | #define DEVCTRL_SR_CTL_I2C_SEL_SHIFT 4 | ||
371 | #define DEVCTRL_DEV_OFF_RST_MASK 0x08 | ||
372 | #define DEVCTRL_DEV_OFF_RST_SHIFT 3 | ||
373 | #define DEVCTRL_DEV_ON_MASK 0x04 | ||
374 | #define DEVCTRL_DEV_ON_SHIFT 2 | ||
375 | #define DEVCTRL_DEV_SLP_MASK 0x02 | ||
376 | #define DEVCTRL_DEV_SLP_SHIFT 1 | ||
377 | #define DEVCTRL_DEV_OFF_MASK 0x01 | ||
378 | #define DEVCTRL_DEV_OFF_SHIFT 0 | ||
379 | |||
380 | |||
381 | /*Register DEVCTRL2 (0x80) register.RegisterDescription */ | ||
382 | #define DEVCTRL2_TSLOT_LENGTH_MASK 0x30 | ||
383 | #define DEVCTRL2_TSLOT_LENGTH_SHIFT 4 | ||
384 | #define DEVCTRL2_SLEEPSIG_POL_MASK 0x08 | ||
385 | #define DEVCTRL2_SLEEPSIG_POL_SHIFT 3 | ||
386 | #define DEVCTRL2_PWON_LP_OFF_MASK 0x04 | ||
387 | #define DEVCTRL2_PWON_LP_OFF_SHIFT 2 | ||
388 | #define DEVCTRL2_PWON_LP_RST_MASK 0x02 | ||
389 | #define DEVCTRL2_PWON_LP_RST_SHIFT 1 | ||
390 | #define DEVCTRL2_IT_POL_MASK 0x01 | ||
391 | #define DEVCTRL2_IT_POL_SHIFT 0 | ||
392 | |||
393 | |||
394 | /*Register SLEEP_KEEP_LDO_ON (0x80) register.RegisterDescription */ | ||
395 | #define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_MASK 0x80 | ||
396 | #define SLEEP_KEEP_LDO_ON_VDAC_KEEPON_SHIFT 7 | ||
397 | #define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_MASK 0x40 | ||
398 | #define SLEEP_KEEP_LDO_ON_VPLL_KEEPON_SHIFT 6 | ||
399 | #define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_MASK 0x20 | ||
400 | #define SLEEP_KEEP_LDO_ON_VAUX33_KEEPON_SHIFT 5 | ||
401 | #define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_MASK 0x10 | ||
402 | #define SLEEP_KEEP_LDO_ON_VAUX2_KEEPON_SHIFT 4 | ||
403 | #define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_MASK 0x08 | ||
404 | #define SLEEP_KEEP_LDO_ON_VAUX1_KEEPON_SHIFT 3 | ||
405 | #define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_MASK 0x04 | ||
406 | #define SLEEP_KEEP_LDO_ON_VDIG2_KEEPON_SHIFT 2 | ||
407 | #define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_MASK 0x02 | ||
408 | #define SLEEP_KEEP_LDO_ON_VDIG1_KEEPON_SHIFT 1 | ||
409 | #define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_MASK 0x01 | ||
410 | #define SLEEP_KEEP_LDO_ON_VMMC_KEEPON_SHIFT 0 | ||
411 | |||
412 | |||
413 | /*Register SLEEP_KEEP_RES_ON (0x80) register.RegisterDescription */ | ||
414 | #define SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK 0x80 | ||
415 | #define SLEEP_KEEP_RES_ON_THERM_KEEPON_SHIFT 7 | ||
416 | #define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK 0x40 | ||
417 | #define SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_SHIFT 6 | ||
418 | #define SLEEP_KEEP_RES_ON_VRTC_KEEPON_MASK 0x20 | ||
419 | #define SLEEP_KEEP_RES_ON_VRTC_KEEPON_SHIFT 5 | ||
420 | #define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK 0x10 | ||
421 | #define SLEEP_KEEP_RES_ON_I2CHS_KEEPON_SHIFT 4 | ||
422 | #define SLEEP_KEEP_RES_ON_VDD3_KEEPON_MASK 0x08 | ||
423 | #define SLEEP_KEEP_RES_ON_VDD3_KEEPON_SHIFT 3 | ||
424 | #define SLEEP_KEEP_RES_ON_VDD2_KEEPON_MASK 0x04 | ||
425 | #define SLEEP_KEEP_RES_ON_VDD2_KEEPON_SHIFT 2 | ||
426 | #define SLEEP_KEEP_RES_ON_VDD1_KEEPON_MASK 0x02 | ||
427 | #define SLEEP_KEEP_RES_ON_VDD1_KEEPON_SHIFT 1 | ||
428 | #define SLEEP_KEEP_RES_ON_VIO_KEEPON_MASK 0x01 | ||
429 | #define SLEEP_KEEP_RES_ON_VIO_KEEPON_SHIFT 0 | ||
430 | |||
431 | |||
432 | /*Register SLEEP_SET_LDO_OFF (0x80) register.RegisterDescription */ | ||
433 | #define SLEEP_SET_LDO_OFF_VDAC_SETOFF_MASK 0x80 | ||
434 | #define SLEEP_SET_LDO_OFF_VDAC_SETOFF_SHIFT 7 | ||
435 | #define SLEEP_SET_LDO_OFF_VPLL_SETOFF_MASK 0x40 | ||
436 | #define SLEEP_SET_LDO_OFF_VPLL_SETOFF_SHIFT 6 | ||
437 | #define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_MASK 0x20 | ||
438 | #define SLEEP_SET_LDO_OFF_VAUX33_SETOFF_SHIFT 5 | ||
439 | #define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_MASK 0x10 | ||
440 | #define SLEEP_SET_LDO_OFF_VAUX2_SETOFF_SHIFT 4 | ||
441 | #define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_MASK 0x08 | ||
442 | #define SLEEP_SET_LDO_OFF_VAUX1_SETOFF_SHIFT 3 | ||
443 | #define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_MASK 0x04 | ||
444 | #define SLEEP_SET_LDO_OFF_VDIG2_SETOFF_SHIFT 2 | ||
445 | #define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_MASK 0x02 | ||
446 | #define SLEEP_SET_LDO_OFF_VDIG1_SETOFF_SHIFT 1 | ||
447 | #define SLEEP_SET_LDO_OFF_VMMC_SETOFF_MASK 0x01 | ||
448 | #define SLEEP_SET_LDO_OFF_VMMC_SETOFF_SHIFT 0 | ||
449 | |||
450 | |||
451 | /*Register SLEEP_SET_RES_OFF (0x80) register.RegisterDescription */ | ||
452 | #define SLEEP_SET_RES_OFF_DEFAULT_VOLT_MASK 0x80 | ||
453 | #define SLEEP_SET_RES_OFF_DEFAULT_VOLT_SHIFT 7 | ||
454 | #define SLEEP_SET_RES_OFF_RSVD_MASK 0x60 | ||
455 | #define SLEEP_SET_RES_OFF_RSVD_SHIFT 5 | ||
456 | #define SLEEP_SET_RES_OFF_SPARE_SETOFF_MASK 0x10 | ||
457 | #define SLEEP_SET_RES_OFF_SPARE_SETOFF_SHIFT 4 | ||
458 | #define SLEEP_SET_RES_OFF_VDD3_SETOFF_MASK 0x08 | ||
459 | #define SLEEP_SET_RES_OFF_VDD3_SETOFF_SHIFT 3 | ||
460 | #define SLEEP_SET_RES_OFF_VDD2_SETOFF_MASK 0x04 | ||
461 | #define SLEEP_SET_RES_OFF_VDD2_SETOFF_SHIFT 2 | ||
462 | #define SLEEP_SET_RES_OFF_VDD1_SETOFF_MASK 0x02 | ||
463 | #define SLEEP_SET_RES_OFF_VDD1_SETOFF_SHIFT 1 | ||
464 | #define SLEEP_SET_RES_OFF_VIO_SETOFF_MASK 0x01 | ||
465 | #define SLEEP_SET_RES_OFF_VIO_SETOFF_SHIFT 0 | ||
466 | |||
467 | |||
468 | /*Register EN1_LDO_ASS (0x80) register.RegisterDescription */ | ||
469 | #define EN1_LDO_ASS_VDAC_EN1_MASK 0x80 | ||
470 | #define EN1_LDO_ASS_VDAC_EN1_SHIFT 7 | ||
471 | #define EN1_LDO_ASS_VPLL_EN1_MASK 0x40 | ||
472 | #define EN1_LDO_ASS_VPLL_EN1_SHIFT 6 | ||
473 | #define EN1_LDO_ASS_VAUX33_EN1_MASK 0x20 | ||
474 | #define EN1_LDO_ASS_VAUX33_EN1_SHIFT 5 | ||
475 | #define EN1_LDO_ASS_VAUX2_EN1_MASK 0x10 | ||
476 | #define EN1_LDO_ASS_VAUX2_EN1_SHIFT 4 | ||
477 | #define EN1_LDO_ASS_VAUX1_EN1_MASK 0x08 | ||
478 | #define EN1_LDO_ASS_VAUX1_EN1_SHIFT 3 | ||
479 | #define EN1_LDO_ASS_VDIG2_EN1_MASK 0x04 | ||
480 | #define EN1_LDO_ASS_VDIG2_EN1_SHIFT 2 | ||
481 | #define EN1_LDO_ASS_VDIG1_EN1_MASK 0x02 | ||
482 | #define EN1_LDO_ASS_VDIG1_EN1_SHIFT 1 | ||
483 | #define EN1_LDO_ASS_VMMC_EN1_MASK 0x01 | ||
484 | #define EN1_LDO_ASS_VMMC_EN1_SHIFT 0 | ||
485 | |||
486 | |||
487 | /*Register EN1_SMPS_ASS (0x80) register.RegisterDescription */ | ||
488 | #define EN1_SMPS_ASS_RSVD_MASK 0xE0 | ||
489 | #define EN1_SMPS_ASS_RSVD_SHIFT 5 | ||
490 | #define EN1_SMPS_ASS_SPARE_EN1_MASK 0x10 | ||
491 | #define EN1_SMPS_ASS_SPARE_EN1_SHIFT 4 | ||
492 | #define EN1_SMPS_ASS_VDD3_EN1_MASK 0x08 | ||
493 | #define EN1_SMPS_ASS_VDD3_EN1_SHIFT 3 | ||
494 | #define EN1_SMPS_ASS_VDD2_EN1_MASK 0x04 | ||
495 | #define EN1_SMPS_ASS_VDD2_EN1_SHIFT 2 | ||
496 | #define EN1_SMPS_ASS_VDD1_EN1_MASK 0x02 | ||
497 | #define EN1_SMPS_ASS_VDD1_EN1_SHIFT 1 | ||
498 | #define EN1_SMPS_ASS_VIO_EN1_MASK 0x01 | ||
499 | #define EN1_SMPS_ASS_VIO_EN1_SHIFT 0 | ||
500 | |||
501 | |||
502 | /*Register EN2_LDO_ASS (0x80) register.RegisterDescription */ | ||
503 | #define EN2_LDO_ASS_VDAC_EN2_MASK 0x80 | ||
504 | #define EN2_LDO_ASS_VDAC_EN2_SHIFT 7 | ||
505 | #define EN2_LDO_ASS_VPLL_EN2_MASK 0x40 | ||
506 | #define EN2_LDO_ASS_VPLL_EN2_SHIFT 6 | ||
507 | #define EN2_LDO_ASS_VAUX33_EN2_MASK 0x20 | ||
508 | #define EN2_LDO_ASS_VAUX33_EN2_SHIFT 5 | ||
509 | #define EN2_LDO_ASS_VAUX2_EN2_MASK 0x10 | ||
510 | #define EN2_LDO_ASS_VAUX2_EN2_SHIFT 4 | ||
511 | #define EN2_LDO_ASS_VAUX1_EN2_MASK 0x08 | ||
512 | #define EN2_LDO_ASS_VAUX1_EN2_SHIFT 3 | ||
513 | #define EN2_LDO_ASS_VDIG2_EN2_MASK 0x04 | ||
514 | #define EN2_LDO_ASS_VDIG2_EN2_SHIFT 2 | ||
515 | #define EN2_LDO_ASS_VDIG1_EN2_MASK 0x02 | ||
516 | #define EN2_LDO_ASS_VDIG1_EN2_SHIFT 1 | ||
517 | #define EN2_LDO_ASS_VMMC_EN2_MASK 0x01 | ||
518 | #define EN2_LDO_ASS_VMMC_EN2_SHIFT 0 | ||
519 | |||
520 | |||
521 | /*Register EN2_SMPS_ASS (0x80) register.RegisterDescription */ | ||
522 | #define EN2_SMPS_ASS_RSVD_MASK 0xE0 | ||
523 | #define EN2_SMPS_ASS_RSVD_SHIFT 5 | ||
524 | #define EN2_SMPS_ASS_SPARE_EN2_MASK 0x10 | ||
525 | #define EN2_SMPS_ASS_SPARE_EN2_SHIFT 4 | ||
526 | #define EN2_SMPS_ASS_VDD3_EN2_MASK 0x08 | ||
527 | #define EN2_SMPS_ASS_VDD3_EN2_SHIFT 3 | ||
528 | #define EN2_SMPS_ASS_VDD2_EN2_MASK 0x04 | ||
529 | #define EN2_SMPS_ASS_VDD2_EN2_SHIFT 2 | ||
530 | #define EN2_SMPS_ASS_VDD1_EN2_MASK 0x02 | ||
531 | #define EN2_SMPS_ASS_VDD1_EN2_SHIFT 1 | ||
532 | #define EN2_SMPS_ASS_VIO_EN2_MASK 0x01 | ||
533 | #define EN2_SMPS_ASS_VIO_EN2_SHIFT 0 | ||
534 | |||
535 | |||
536 | /*Register EN3_LDO_ASS (0x80) register.RegisterDescription */ | ||
537 | #define EN3_LDO_ASS_VDAC_EN3_MASK 0x80 | ||
538 | #define EN3_LDO_ASS_VDAC_EN3_SHIFT 7 | ||
539 | #define EN3_LDO_ASS_VPLL_EN3_MASK 0x40 | ||
540 | #define EN3_LDO_ASS_VPLL_EN3_SHIFT 6 | ||
541 | #define EN3_LDO_ASS_VAUX33_EN3_MASK 0x20 | ||
542 | #define EN3_LDO_ASS_VAUX33_EN3_SHIFT 5 | ||
543 | #define EN3_LDO_ASS_VAUX2_EN3_MASK 0x10 | ||
544 | #define EN3_LDO_ASS_VAUX2_EN3_SHIFT 4 | ||
545 | #define EN3_LDO_ASS_VAUX1_EN3_MASK 0x08 | ||
546 | #define EN3_LDO_ASS_VAUX1_EN3_SHIFT 3 | ||
547 | #define EN3_LDO_ASS_VDIG2_EN3_MASK 0x04 | ||
548 | #define EN3_LDO_ASS_VDIG2_EN3_SHIFT 2 | ||
549 | #define EN3_LDO_ASS_VDIG1_EN3_MASK 0x02 | ||
550 | #define EN3_LDO_ASS_VDIG1_EN3_SHIFT 1 | ||
551 | #define EN3_LDO_ASS_VMMC_EN3_MASK 0x01 | ||
552 | #define EN3_LDO_ASS_VMMC_EN3_SHIFT 0 | ||
553 | |||
554 | |||
555 | /*Register SPARE (0x80) register.RegisterDescription */ | ||
556 | #define SPARE_SPARE_MASK 0xFF | ||
557 | #define SPARE_SPARE_SHIFT 0 | ||
558 | |||
559 | |||
560 | /*Register INT_STS (0x80) register.RegisterDescription */ | ||
561 | #define INT_STS_RTC_PERIOD_IT_MASK 0x80 | ||
562 | #define INT_STS_RTC_PERIOD_IT_SHIFT 7 | ||
563 | #define INT_STS_RTC_ALARM_IT_MASK 0x40 | ||
564 | #define INT_STS_RTC_ALARM_IT_SHIFT 6 | ||
565 | #define INT_STS_HOTDIE_IT_MASK 0x20 | ||
566 | #define INT_STS_HOTDIE_IT_SHIFT 5 | ||
567 | #define INT_STS_PWRHOLD_IT_MASK 0x10 | ||
568 | #define INT_STS_PWRHOLD_IT_SHIFT 4 | ||
569 | #define INT_STS_PWRON_LP_IT_MASK 0x08 | ||
570 | #define INT_STS_PWRON_LP_IT_SHIFT 3 | ||
571 | #define INT_STS_PWRON_IT_MASK 0x04 | ||
572 | #define INT_STS_PWRON_IT_SHIFT 2 | ||
573 | #define INT_STS_VMBHI_IT_MASK 0x02 | ||
574 | #define INT_STS_VMBHI_IT_SHIFT 1 | ||
575 | #define INT_STS_VMBDCH_IT_MASK 0x01 | ||
576 | #define INT_STS_VMBDCH_IT_SHIFT 0 | ||
577 | |||
578 | |||
579 | /*Register INT_MSK (0x80) register.RegisterDescription */ | ||
580 | #define INT_MSK_RTC_PERIOD_IT_MSK_MASK 0x80 | ||
581 | #define INT_MSK_RTC_PERIOD_IT_MSK_SHIFT 7 | ||
582 | #define INT_MSK_RTC_ALARM_IT_MSK_MASK 0x40 | ||
583 | #define INT_MSK_RTC_ALARM_IT_MSK_SHIFT 6 | ||
584 | #define INT_MSK_HOTDIE_IT_MSK_MASK 0x20 | ||
585 | #define INT_MSK_HOTDIE_IT_MSK_SHIFT 5 | ||
586 | #define INT_MSK_PWRHOLD_IT_MSK_MASK 0x10 | ||
587 | #define INT_MSK_PWRHOLD_IT_MSK_SHIFT 4 | ||
588 | #define INT_MSK_PWRON_LP_IT_MSK_MASK 0x08 | ||
589 | #define INT_MSK_PWRON_LP_IT_MSK_SHIFT 3 | ||
590 | #define INT_MSK_PWRON_IT_MSK_MASK 0x04 | ||
591 | #define INT_MSK_PWRON_IT_MSK_SHIFT 2 | ||
592 | #define INT_MSK_VMBHI_IT_MSK_MASK 0x02 | ||
593 | #define INT_MSK_VMBHI_IT_MSK_SHIFT 1 | ||
594 | #define INT_MSK_VMBDCH_IT_MSK_MASK 0x01 | ||
595 | #define INT_MSK_VMBDCH_IT_MSK_SHIFT 0 | ||
596 | |||
597 | |||
598 | /*Register INT_STS2 (0x80) register.RegisterDescription */ | ||
599 | #define INT_STS2_GPIO3_F_IT_MASK 0x80 | ||
600 | #define INT_STS2_GPIO3_F_IT_SHIFT 7 | ||
601 | #define INT_STS2_GPIO3_R_IT_MASK 0x40 | ||
602 | #define INT_STS2_GPIO3_R_IT_SHIFT 6 | ||
603 | #define INT_STS2_GPIO2_F_IT_MASK 0x20 | ||
604 | #define INT_STS2_GPIO2_F_IT_SHIFT 5 | ||
605 | #define INT_STS2_GPIO2_R_IT_MASK 0x10 | ||
606 | #define INT_STS2_GPIO2_R_IT_SHIFT 4 | ||
607 | #define INT_STS2_GPIO1_F_IT_MASK 0x08 | ||
608 | #define INT_STS2_GPIO1_F_IT_SHIFT 3 | ||
609 | #define INT_STS2_GPIO1_R_IT_MASK 0x04 | ||
610 | #define INT_STS2_GPIO1_R_IT_SHIFT 2 | ||
611 | #define INT_STS2_GPIO0_F_IT_MASK 0x02 | ||
612 | #define INT_STS2_GPIO0_F_IT_SHIFT 1 | ||
613 | #define INT_STS2_GPIO0_R_IT_MASK 0x01 | ||
614 | #define INT_STS2_GPIO0_R_IT_SHIFT 0 | ||
615 | |||
616 | |||
617 | /*Register INT_MSK2 (0x80) register.RegisterDescription */ | ||
618 | #define INT_MSK2_GPIO3_F_IT_MSK_MASK 0x80 | ||
619 | #define INT_MSK2_GPIO3_F_IT_MSK_SHIFT 7 | ||
620 | #define INT_MSK2_GPIO3_R_IT_MSK_MASK 0x40 | ||
621 | #define INT_MSK2_GPIO3_R_IT_MSK_SHIFT 6 | ||
622 | #define INT_MSK2_GPIO2_F_IT_MSK_MASK 0x20 | ||
623 | #define INT_MSK2_GPIO2_F_IT_MSK_SHIFT 5 | ||
624 | #define INT_MSK2_GPIO2_R_IT_MSK_MASK 0x10 | ||
625 | #define INT_MSK2_GPIO2_R_IT_MSK_SHIFT 4 | ||
626 | #define INT_MSK2_GPIO1_F_IT_MSK_MASK 0x08 | ||
627 | #define INT_MSK2_GPIO1_F_IT_MSK_SHIFT 3 | ||
628 | #define INT_MSK2_GPIO1_R_IT_MSK_MASK 0x04 | ||
629 | #define INT_MSK2_GPIO1_R_IT_MSK_SHIFT 2 | ||
630 | #define INT_MSK2_GPIO0_F_IT_MSK_MASK 0x02 | ||
631 | #define INT_MSK2_GPIO0_F_IT_MSK_SHIFT 1 | ||
632 | #define INT_MSK2_GPIO0_R_IT_MSK_MASK 0x01 | ||
633 | #define INT_MSK2_GPIO0_R_IT_MSK_SHIFT 0 | ||
634 | |||
635 | |||
636 | /*Register INT_STS3 (0x80) register.RegisterDescription */ | ||
637 | #define INT_STS3_GPIO5_F_IT_MASK 0x08 | ||
638 | #define INT_STS3_GPIO5_F_IT_SHIFT 3 | ||
639 | #define INT_STS3_GPIO5_R_IT_MASK 0x04 | ||
640 | #define INT_STS3_GPIO5_R_IT_SHIFT 2 | ||
641 | #define INT_STS3_GPIO4_F_IT_MASK 0x02 | ||
642 | #define INT_STS3_GPIO4_F_IT_SHIFT 1 | ||
643 | #define INT_STS3_GPIO4_R_IT_MASK 0x01 | ||
644 | #define INT_STS3_GPIO4_R_IT_SHIFT 0 | ||
645 | |||
646 | |||
647 | /*Register INT_MSK3 (0x80) register.RegisterDescription */ | ||
648 | #define INT_MSK3_GPIO5_F_IT_MSK_MASK 0x08 | ||
649 | #define INT_MSK3_GPIO5_F_IT_MSK_SHIFT 3 | ||
650 | #define INT_MSK3_GPIO5_R_IT_MSK_MASK 0x04 | ||
651 | #define INT_MSK3_GPIO5_R_IT_MSK_SHIFT 2 | ||
652 | #define INT_MSK3_GPIO4_F_IT_MSK_MASK 0x02 | ||
653 | #define INT_MSK3_GPIO4_F_IT_MSK_SHIFT 1 | ||
654 | #define INT_MSK3_GPIO4_R_IT_MSK_MASK 0x01 | ||
655 | #define INT_MSK3_GPIO4_R_IT_MSK_SHIFT 0 | ||
656 | |||
657 | |||
658 | /*Register GPIO (0x80) register.RegisterDescription */ | ||
659 | #define GPIO_DEB_MASK 0x10 | ||
660 | #define GPIO_DEB_SHIFT 4 | ||
661 | #define GPIO_PUEN_MASK 0x08 | ||
662 | #define GPIO_PUEN_SHIFT 3 | ||
663 | #define GPIO_CFG_MASK 0x04 | ||
664 | #define GPIO_CFG_SHIFT 2 | ||
665 | #define GPIO_STS_MASK 0x02 | ||
666 | #define GPIO_STS_SHIFT 1 | ||
667 | #define GPIO_SET_MASK 0x01 | ||
668 | #define GPIO_SET_SHIFT 0 | ||
669 | |||
670 | |||
671 | /*Register JTAGVERNUM (0x80) register.RegisterDescription */ | ||
672 | #define JTAGVERNUM_VERNUM_MASK 0x0F | ||
673 | #define JTAGVERNUM_VERNUM_SHIFT 0 | ||
674 | |||
675 | |||
676 | /* Register VDDCTRL (0x27) bit definitions */ | ||
677 | #define VDDCTRL_ST_MASK 0x03 | ||
678 | #define VDDCTRL_ST_SHIFT 0 | ||
679 | |||
680 | |||
681 | /*Register VDDCTRL_OP (0x28) bit definitios */ | ||
682 | #define VDDCTRL_OP_CMD_MASK 0x80 | ||
683 | #define VDDCTRL_OP_CMD_SHIFT 7 | ||
684 | #define VDDCTRL_OP_SEL_MASK 0x7F | ||
685 | #define VDDCTRL_OP_SEL_SHIFT 0 | ||
686 | |||
687 | |||
688 | /*Register VDDCTRL_SR (0x29) bit definitions */ | ||
689 | #define VDDCTRL_SR_SEL_MASK 0x7F | ||
690 | #define VDDCTRL_SR_SEL_SHIFT 0 | ||
691 | |||
692 | |||
693 | /* IRQ Definitions */ | ||
694 | #define TPS65910_IRQ_VBAT_VMBDCH 0 | ||
695 | #define TPS65910_IRQ_VBAT_VMHI 1 | ||
696 | #define TPS65910_IRQ_PWRON 2 | ||
697 | #define TPS65910_IRQ_PWRON_LP 3 | ||
698 | #define TPS65910_IRQ_PWRHOLD 4 | ||
699 | #define TPS65910_IRQ_HOTDIE 5 | ||
700 | #define TPS65910_IRQ_RTC_ALARM 6 | ||
701 | #define TPS65910_IRQ_RTC_PERIOD 7 | ||
702 | #define TPS65910_IRQ_GPIO_R 8 | ||
703 | #define TPS65910_IRQ_GPIO_F 9 | ||
704 | #define TPS65910_NUM_IRQ 10 | ||
705 | |||
706 | #define TPS65911_IRQ_VBAT_VMBDCH 0 | ||
707 | #define TPS65911_IRQ_VBAT_VMBDCH2L 1 | ||
708 | #define TPS65911_IRQ_VBAT_VMBDCH2H 2 | ||
709 | #define TPS65911_IRQ_VBAT_VMHI 3 | ||
710 | #define TPS65911_IRQ_PWRON 4 | ||
711 | #define TPS65911_IRQ_PWRON_LP 5 | ||
712 | #define TPS65911_IRQ_PWRHOLD_F 6 | ||
713 | #define TPS65911_IRQ_PWRHOLD_R 7 | ||
714 | #define TPS65911_IRQ_HOTDIE 8 | ||
715 | #define TPS65911_IRQ_RTC_ALARM 9 | ||
716 | #define TPS65911_IRQ_RTC_PERIOD 10 | ||
717 | #define TPS65911_IRQ_GPIO0_R 11 | ||
718 | #define TPS65911_IRQ_GPIO0_F 12 | ||
719 | #define TPS65911_IRQ_GPIO1_R 13 | ||
720 | #define TPS65911_IRQ_GPIO1_F 14 | ||
721 | #define TPS65911_IRQ_GPIO2_R 15 | ||
722 | #define TPS65911_IRQ_GPIO2_F 16 | ||
723 | #define TPS65911_IRQ_GPIO3_R 17 | ||
724 | #define TPS65911_IRQ_GPIO3_F 18 | ||
725 | #define TPS65911_IRQ_GPIO4_R 19 | ||
726 | #define TPS65911_IRQ_GPIO4_F 20 | ||
727 | #define TPS65911_IRQ_GPIO5_R 21 | ||
728 | #define TPS65911_IRQ_GPIO5_F 22 | ||
729 | #define TPS65911_IRQ_WTCHDG 23 | ||
730 | #define TPS65911_IRQ_PWRDN 24 | ||
731 | |||
732 | #define TPS65911_NUM_IRQ 25 | ||
733 | |||
734 | |||
735 | /* GPIO Register Definitions */ | ||
736 | #define TPS65910_GPIO_DEB BIT(2) | ||
737 | #define TPS65910_GPIO_PUEN BIT(3) | ||
738 | #define TPS65910_GPIO_CFG BIT(2) | ||
739 | #define TPS65910_GPIO_STS BIT(1) | ||
740 | #define TPS65910_GPIO_SET BIT(0) | ||
741 | |||
742 | /** | ||
743 | * struct tps65910_board | ||
744 | * Board platform data may be used to initialize regulators. | ||
745 | */ | ||
746 | |||
747 | struct tps65910_board { | ||
748 | int gpio_base; | ||
749 | int irq; | ||
750 | int irq_base; | ||
751 | int vmbch_threshold; | ||
752 | int vmbch2_threshold; | ||
753 | struct regulator_init_data *tps65910_pmic_init_data; | ||
754 | }; | ||
755 | |||
756 | /** | ||
757 | * struct tps65910 - tps65910 sub-driver chip access routines | ||
758 | */ | ||
759 | |||
760 | struct tps65910 { | ||
761 | struct device *dev; | ||
762 | struct i2c_client *i2c_client; | ||
763 | struct mutex io_mutex; | ||
764 | unsigned int id; | ||
765 | int (*read)(struct tps65910 *tps65910, u8 reg, int size, void *dest); | ||
766 | int (*write)(struct tps65910 *tps65910, u8 reg, int size, void *src); | ||
767 | |||
768 | /* Client devices */ | ||
769 | struct tps65910_pmic *pmic; | ||
770 | struct tps65910_rtc *rtc; | ||
771 | struct tps65910_power *power; | ||
772 | |||
773 | /* GPIO Handling */ | ||
774 | struct gpio_chip gpio; | ||
775 | |||
776 | /* IRQ Handling */ | ||
777 | struct mutex irq_lock; | ||
778 | int chip_irq; | ||
779 | int irq_base; | ||
780 | int irq_num; | ||
781 | u32 irq_mask; | ||
782 | }; | ||
783 | |||
784 | struct tps65910_platform_data { | ||
785 | int irq; | ||
786 | int irq_base; | ||
787 | }; | ||
788 | |||
789 | int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask); | ||
790 | int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask); | ||
791 | void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base); | ||
792 | int tps65910_irq_init(struct tps65910 *tps65910, int irq, | ||
793 | struct tps65910_platform_data *pdata); | ||
794 | |||
795 | static inline int tps65910_chip_id(struct tps65910 *tps65910) | ||
796 | { | ||
797 | return tps65910->id; | ||
798 | } | ||
799 | |||
800 | #endif /* __LINUX_MFD_TPS65910_H */ | ||
diff --git a/include/linux/mfd/twl4030-codec.h b/include/linux/mfd/twl4030-codec.h index 2ec317c68e59..5cc16bbd1da1 100644 --- a/include/linux/mfd/twl4030-codec.h +++ b/include/linux/mfd/twl4030-codec.h | |||
@@ -1,7 +1,7 @@ | |||
1 | /* | 1 | /* |
2 | * MFD driver for twl4030 codec submodule | 2 | * MFD driver for twl4030 codec submodule |
3 | * | 3 | * |
4 | * Author: Peter Ujfalusi <peter.ujfalusi@nokia.com> | 4 | * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> |
5 | * | 5 | * |
6 | * Copyright: (C) 2009 Nokia Corporation | 6 | * Copyright: (C) 2009 Nokia Corporation |
7 | * | 7 | * |
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h index 903280d21866..0d515ee1c247 100644 --- a/include/linux/mfd/wm831x/core.h +++ b/include/linux/mfd/wm831x/core.h | |||
@@ -301,30 +301,4 @@ int wm831x_device_suspend(struct wm831x *wm831x); | |||
301 | int wm831x_irq_init(struct wm831x *wm831x, int irq); | 301 | int wm831x_irq_init(struct wm831x *wm831x, int irq); |
302 | void wm831x_irq_exit(struct wm831x *wm831x); | 302 | void wm831x_irq_exit(struct wm831x *wm831x); |
303 | 303 | ||
304 | static inline int __must_check wm831x_request_irq(struct wm831x *wm831x, | ||
305 | unsigned int irq, | ||
306 | irq_handler_t handler, | ||
307 | unsigned long flags, | ||
308 | const char *name, | ||
309 | void *dev) | ||
310 | { | ||
311 | return request_threaded_irq(irq, NULL, handler, flags, name, dev); | ||
312 | } | ||
313 | |||
314 | static inline void wm831x_free_irq(struct wm831x *wm831x, | ||
315 | unsigned int irq, void *dev) | ||
316 | { | ||
317 | free_irq(irq, dev); | ||
318 | } | ||
319 | |||
320 | static inline void wm831x_disable_irq(struct wm831x *wm831x, int irq) | ||
321 | { | ||
322 | disable_irq(irq); | ||
323 | } | ||
324 | |||
325 | static inline void wm831x_enable_irq(struct wm831x *wm831x, int irq) | ||
326 | { | ||
327 | enable_irq(irq); | ||
328 | } | ||
329 | |||
330 | #endif | 304 | #endif |
diff --git a/include/linux/mfd/wm831x/pdata.h b/include/linux/mfd/wm831x/pdata.h index 632d1567a1b6..ff42d700293f 100644 --- a/include/linux/mfd/wm831x/pdata.h +++ b/include/linux/mfd/wm831x/pdata.h | |||
@@ -105,6 +105,9 @@ struct wm831x_watchdog_pdata { | |||
105 | #define WM831X_MAX_LDO 11 | 105 | #define WM831X_MAX_LDO 11 |
106 | #define WM831X_MAX_ISINK 2 | 106 | #define WM831X_MAX_ISINK 2 |
107 | 107 | ||
108 | #define WM831X_GPIO_CONFIGURE 0x10000 | ||
109 | #define WM831X_GPIO_NUM 16 | ||
110 | |||
108 | struct wm831x_pdata { | 111 | struct wm831x_pdata { |
109 | /** Used to distinguish multiple WM831x chips */ | 112 | /** Used to distinguish multiple WM831x chips */ |
110 | int wm831x_num; | 113 | int wm831x_num; |
@@ -119,6 +122,7 @@ struct wm831x_pdata { | |||
119 | 122 | ||
120 | int irq_base; | 123 | int irq_base; |
121 | int gpio_base; | 124 | int gpio_base; |
125 | int gpio_defaults[WM831X_GPIO_NUM]; | ||
122 | struct wm831x_backlight_pdata *backlight; | 126 | struct wm831x_backlight_pdata *backlight; |
123 | struct wm831x_backup_pdata *backup; | 127 | struct wm831x_backup_pdata *backup; |
124 | struct wm831x_battery_pdata *battery; | 128 | struct wm831x_battery_pdata *battery; |
diff --git a/include/linux/mm.h b/include/linux/mm.h index 8eb969ebf904..9670f71d7be9 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h | |||
@@ -165,12 +165,12 @@ extern pgprot_t protection_map[16]; | |||
165 | */ | 165 | */ |
166 | static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) | 166 | static inline int is_linear_pfn_mapping(struct vm_area_struct *vma) |
167 | { | 167 | { |
168 | return (vma->vm_flags & VM_PFN_AT_MMAP); | 168 | return !!(vma->vm_flags & VM_PFN_AT_MMAP); |
169 | } | 169 | } |
170 | 170 | ||
171 | static inline int is_pfn_mapping(struct vm_area_struct *vma) | 171 | static inline int is_pfn_mapping(struct vm_area_struct *vma) |
172 | { | 172 | { |
173 | return (vma->vm_flags & VM_PFNMAP); | 173 | return !!(vma->vm_flags & VM_PFNMAP); |
174 | } | 174 | } |
175 | 175 | ||
176 | /* | 176 | /* |
@@ -1408,17 +1408,11 @@ extern void exit_mmap(struct mm_struct *); | |||
1408 | extern int mm_take_all_locks(struct mm_struct *mm); | 1408 | extern int mm_take_all_locks(struct mm_struct *mm); |
1409 | extern void mm_drop_all_locks(struct mm_struct *mm); | 1409 | extern void mm_drop_all_locks(struct mm_struct *mm); |
1410 | 1410 | ||
1411 | #ifdef CONFIG_PROC_FS | ||
1412 | /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ | 1411 | /* From fs/proc/base.c. callers must _not_ hold the mm's exe_file_lock */ |
1413 | extern void added_exe_file_vma(struct mm_struct *mm); | 1412 | extern void added_exe_file_vma(struct mm_struct *mm); |
1414 | extern void removed_exe_file_vma(struct mm_struct *mm); | 1413 | extern void removed_exe_file_vma(struct mm_struct *mm); |
1415 | #else | 1414 | extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); |
1416 | static inline void added_exe_file_vma(struct mm_struct *mm) | 1415 | extern struct file *get_mm_exe_file(struct mm_struct *mm); |
1417 | {} | ||
1418 | |||
1419 | static inline void removed_exe_file_vma(struct mm_struct *mm) | ||
1420 | {} | ||
1421 | #endif /* CONFIG_PROC_FS */ | ||
1422 | 1416 | ||
1423 | extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); | 1417 | extern int may_expand_vm(struct mm_struct *mm, unsigned long npages); |
1424 | extern int install_special_mapping(struct mm_struct *mm, | 1418 | extern int install_special_mapping(struct mm_struct *mm, |
@@ -1432,7 +1426,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
1432 | unsigned long flag, unsigned long pgoff); | 1426 | unsigned long flag, unsigned long pgoff); |
1433 | extern unsigned long mmap_region(struct file *file, unsigned long addr, | 1427 | extern unsigned long mmap_region(struct file *file, unsigned long addr, |
1434 | unsigned long len, unsigned long flags, | 1428 | unsigned long len, unsigned long flags, |
1435 | unsigned int vm_flags, unsigned long pgoff); | 1429 | vm_flags_t vm_flags, unsigned long pgoff); |
1436 | 1430 | ||
1437 | static inline unsigned long do_mmap(struct file *file, unsigned long addr, | 1431 | static inline unsigned long do_mmap(struct file *file, unsigned long addr, |
1438 | unsigned long len, unsigned long prot, | 1432 | unsigned long len, unsigned long prot, |
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 071d459e866b..2a78aae78c69 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h | |||
@@ -102,6 +102,8 @@ struct page { | |||
102 | #endif | 102 | #endif |
103 | }; | 103 | }; |
104 | 104 | ||
105 | typedef unsigned long __nocast vm_flags_t; | ||
106 | |||
105 | /* | 107 | /* |
106 | * A region containing a mapping of a non-memory backed file under NOMMU | 108 | * A region containing a mapping of a non-memory backed file under NOMMU |
107 | * conditions. These are held in a global tree and are pinned by the VMAs that | 109 | * conditions. These are held in a global tree and are pinned by the VMAs that |
@@ -109,7 +111,7 @@ struct page { | |||
109 | */ | 111 | */ |
110 | struct vm_region { | 112 | struct vm_region { |
111 | struct rb_node vm_rb; /* link in global region tree */ | 113 | struct rb_node vm_rb; /* link in global region tree */ |
112 | unsigned long vm_flags; /* VMA vm_flags */ | 114 | vm_flags_t vm_flags; /* VMA vm_flags */ |
113 | unsigned long vm_start; /* start address of region */ | 115 | unsigned long vm_start; /* start address of region */ |
114 | unsigned long vm_end; /* region initialised to here */ | 116 | unsigned long vm_end; /* region initialised to here */ |
115 | unsigned long vm_top; /* region allocated to here */ | 117 | unsigned long vm_top; /* region allocated to here */ |
@@ -300,11 +302,9 @@ struct mm_struct { | |||
300 | struct task_struct __rcu *owner; | 302 | struct task_struct __rcu *owner; |
301 | #endif | 303 | #endif |
302 | 304 | ||
303 | #ifdef CONFIG_PROC_FS | ||
304 | /* store ref to file /proc/<pid>/exe symlink points to */ | 305 | /* store ref to file /proc/<pid>/exe symlink points to */ |
305 | struct file *exe_file; | 306 | struct file *exe_file; |
306 | unsigned long num_exe_file_vmas; | 307 | unsigned long num_exe_file_vmas; |
307 | #endif | ||
308 | #ifdef CONFIG_MMU_NOTIFIER | 308 | #ifdef CONFIG_MMU_NOTIFIER |
309 | struct mmu_notifier_mm *mmu_notifier_mm; | 309 | struct mmu_notifier_mm *mmu_notifier_mm; |
310 | #endif | 310 | #endif |
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 261f299c9441..c928dac6cad0 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h | |||
@@ -273,11 +273,6 @@ struct zone_reclaim_stat { | |||
273 | */ | 273 | */ |
274 | unsigned long recent_rotated[2]; | 274 | unsigned long recent_rotated[2]; |
275 | unsigned long recent_scanned[2]; | 275 | unsigned long recent_scanned[2]; |
276 | |||
277 | /* | ||
278 | * accumulated for batching | ||
279 | */ | ||
280 | unsigned long nr_saved_scan[NR_LRU_LISTS]; | ||
281 | }; | 276 | }; |
282 | 277 | ||
283 | struct zone { | 278 | struct zone { |
diff --git a/include/linux/net.h b/include/linux/net.h index 1da55e9b6f01..b29923006b11 100644 --- a/include/linux/net.h +++ b/include/linux/net.h | |||
@@ -289,11 +289,5 @@ extern int kernel_sock_shutdown(struct socket *sock, | |||
289 | MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \ | 289 | MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \ |
290 | "-type-" __stringify(type)) | 290 | "-type-" __stringify(type)) |
291 | 291 | ||
292 | #ifdef CONFIG_SYSCTL | ||
293 | #include <linux/sysctl.h> | ||
294 | #include <linux/ratelimit.h> | ||
295 | extern struct ratelimit_state net_ratelimit_state; | ||
296 | #endif | ||
297 | |||
298 | #endif /* __KERNEL__ */ | 292 | #endif /* __KERNEL__ */ |
299 | #endif /* _LINUX_NET_H */ | 293 | #endif /* _LINUX_NET_H */ |
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h index 7fa95df60146..857f5026ced6 100644 --- a/include/linux/netfilter.h +++ b/include/linux/netfilter.h | |||
@@ -13,6 +13,7 @@ | |||
13 | #endif | 13 | #endif |
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/compiler.h> | 15 | #include <linux/compiler.h> |
16 | #include <linux/sysctl.h> | ||
16 | 17 | ||
17 | /* Responses from hook functions. */ | 18 | /* Responses from hook functions. */ |
18 | #define NF_DROP 0 | 19 | #define NF_DROP 0 |
diff --git a/include/linux/netfilter/ipset/ip_set_ahash.h b/include/linux/netfilter/ipset/ip_set_ahash.h index a0196ac79051..ac3c822eb39a 100644 --- a/include/linux/netfilter/ipset/ip_set_ahash.h +++ b/include/linux/netfilter/ipset/ip_set_ahash.h | |||
@@ -839,7 +839,7 @@ type_pf_tdel(struct ip_set *set, void *value, u32 timeout) | |||
839 | struct htable *t = h->table; | 839 | struct htable *t = h->table; |
840 | const struct type_pf_elem *d = value; | 840 | const struct type_pf_elem *d = value; |
841 | struct hbucket *n; | 841 | struct hbucket *n; |
842 | int i, ret = 0; | 842 | int i; |
843 | struct type_pf_elem *data; | 843 | struct type_pf_elem *data; |
844 | u32 key; | 844 | u32 key; |
845 | 845 | ||
@@ -850,7 +850,7 @@ type_pf_tdel(struct ip_set *set, void *value, u32 timeout) | |||
850 | if (!type_pf_data_equal(data, d)) | 850 | if (!type_pf_data_equal(data, d)) |
851 | continue; | 851 | continue; |
852 | if (type_pf_data_expired(data)) | 852 | if (type_pf_data_expired(data)) |
853 | ret = -IPSET_ERR_EXIST; | 853 | return -IPSET_ERR_EXIST; |
854 | if (i != n->pos - 1) | 854 | if (i != n->pos - 1) |
855 | /* Not last one */ | 855 | /* Not last one */ |
856 | type_pf_data_copy(data, ahash_tdata(n, n->pos - 1)); | 856 | type_pf_data_copy(data, ahash_tdata(n, n->pos - 1)); |
diff --git a/include/linux/netfilter/ipset/ip_set_timeout.h b/include/linux/netfilter/ipset/ip_set_timeout.h index 9f30c5f2ec1c..bcdd40ad39ed 100644 --- a/include/linux/netfilter/ipset/ip_set_timeout.h +++ b/include/linux/netfilter/ipset/ip_set_timeout.h | |||
@@ -45,7 +45,7 @@ ip_set_timeout_test(unsigned long timeout) | |||
45 | { | 45 | { |
46 | return timeout != IPSET_ELEM_UNSET && | 46 | return timeout != IPSET_ELEM_UNSET && |
47 | (timeout == IPSET_ELEM_PERMANENT || | 47 | (timeout == IPSET_ELEM_PERMANENT || |
48 | time_after(timeout, jiffies)); | 48 | time_is_after_jiffies(timeout)); |
49 | } | 49 | } |
50 | 50 | ||
51 | static inline bool | 51 | static inline bool |
@@ -53,7 +53,7 @@ ip_set_timeout_expired(unsigned long timeout) | |||
53 | { | 53 | { |
54 | return timeout != IPSET_ELEM_UNSET && | 54 | return timeout != IPSET_ELEM_UNSET && |
55 | timeout != IPSET_ELEM_PERMANENT && | 55 | timeout != IPSET_ELEM_PERMANENT && |
56 | time_before(timeout, jiffies); | 56 | time_is_before_jiffies(timeout); |
57 | } | 57 | } |
58 | 58 | ||
59 | static inline unsigned long | 59 | static inline unsigned long |
@@ -64,7 +64,7 @@ ip_set_timeout_set(u32 timeout) | |||
64 | if (!timeout) | 64 | if (!timeout) |
65 | return IPSET_ELEM_PERMANENT; | 65 | return IPSET_ELEM_PERMANENT; |
66 | 66 | ||
67 | t = timeout * HZ + jiffies; | 67 | t = msecs_to_jiffies(timeout * 1000) + jiffies; |
68 | if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT) | 68 | if (t == IPSET_ELEM_UNSET || t == IPSET_ELEM_PERMANENT) |
69 | /* Bingo! */ | 69 | /* Bingo! */ |
70 | t++; | 70 | t++; |
@@ -75,7 +75,8 @@ ip_set_timeout_set(u32 timeout) | |||
75 | static inline u32 | 75 | static inline u32 |
76 | ip_set_timeout_get(unsigned long timeout) | 76 | ip_set_timeout_get(unsigned long timeout) |
77 | { | 77 | { |
78 | return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ; | 78 | return timeout == IPSET_ELEM_PERMANENT ? 0 : |
79 | jiffies_to_msecs(timeout - jiffies)/1000; | ||
79 | } | 80 | } |
80 | 81 | ||
81 | #else | 82 | #else |
@@ -89,14 +90,14 @@ static inline bool | |||
89 | ip_set_timeout_test(unsigned long timeout) | 90 | ip_set_timeout_test(unsigned long timeout) |
90 | { | 91 | { |
91 | return timeout == IPSET_ELEM_PERMANENT || | 92 | return timeout == IPSET_ELEM_PERMANENT || |
92 | time_after(timeout, jiffies); | 93 | time_is_after_jiffies(timeout); |
93 | } | 94 | } |
94 | 95 | ||
95 | static inline bool | 96 | static inline bool |
96 | ip_set_timeout_expired(unsigned long timeout) | 97 | ip_set_timeout_expired(unsigned long timeout) |
97 | { | 98 | { |
98 | return timeout != IPSET_ELEM_PERMANENT && | 99 | return timeout != IPSET_ELEM_PERMANENT && |
99 | time_before(timeout, jiffies); | 100 | time_is_before_jiffies(timeout); |
100 | } | 101 | } |
101 | 102 | ||
102 | static inline unsigned long | 103 | static inline unsigned long |
@@ -107,7 +108,7 @@ ip_set_timeout_set(u32 timeout) | |||
107 | if (!timeout) | 108 | if (!timeout) |
108 | return IPSET_ELEM_PERMANENT; | 109 | return IPSET_ELEM_PERMANENT; |
109 | 110 | ||
110 | t = timeout * HZ + jiffies; | 111 | t = msecs_to_jiffies(timeout * 1000) + jiffies; |
111 | if (t == IPSET_ELEM_PERMANENT) | 112 | if (t == IPSET_ELEM_PERMANENT) |
112 | /* Bingo! :-) */ | 113 | /* Bingo! :-) */ |
113 | t++; | 114 | t++; |
@@ -118,7 +119,8 @@ ip_set_timeout_set(u32 timeout) | |||
118 | static inline u32 | 119 | static inline u32 |
119 | ip_set_timeout_get(unsigned long timeout) | 120 | ip_set_timeout_get(unsigned long timeout) |
120 | { | 121 | { |
121 | return timeout == IPSET_ELEM_PERMANENT ? 0 : (timeout - jiffies)/HZ; | 122 | return timeout == IPSET_ELEM_PERMANENT ? 0 : |
123 | jiffies_to_msecs(timeout - jiffies)/1000; | ||
122 | } | 124 | } |
123 | #endif /* ! IP_SET_BITMAP_TIMEOUT */ | 125 | #endif /* ! IP_SET_BITMAP_TIMEOUT */ |
124 | 126 | ||
diff --git a/include/linux/netlink.h b/include/linux/netlink.h index 4c4ac3f3ce5a..a9dd89552f9c 100644 --- a/include/linux/netlink.h +++ b/include/linux/netlink.h | |||
@@ -24,6 +24,7 @@ | |||
24 | /* leave room for NETLINK_DM (DM Events) */ | 24 | /* leave room for NETLINK_DM (DM Events) */ |
25 | #define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ | 25 | #define NETLINK_SCSITRANSPORT 18 /* SCSI Transports */ |
26 | #define NETLINK_ECRYPTFS 19 | 26 | #define NETLINK_ECRYPTFS 19 |
27 | #define NETLINK_RDMA 20 | ||
27 | 28 | ||
28 | #define MAX_LINKS 32 | 29 | #define MAX_LINKS 32 |
29 | 30 | ||
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h index 7b370c7cfeff..50d20aba57d3 100644 --- a/include/linux/nsproxy.h +++ b/include/linux/nsproxy.h | |||
@@ -81,13 +81,4 @@ static inline void get_nsproxy(struct nsproxy *ns) | |||
81 | atomic_inc(&ns->count); | 81 | atomic_inc(&ns->count); |
82 | } | 82 | } |
83 | 83 | ||
84 | #ifdef CONFIG_CGROUP_NS | ||
85 | int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid); | ||
86 | #else | ||
87 | static inline int ns_cgroup_clone(struct task_struct *tsk, struct pid *pid) | ||
88 | { | ||
89 | return 0; | ||
90 | } | ||
91 | #endif | ||
92 | |||
93 | #endif | 84 | #endif |
diff --git a/include/linux/pid.h b/include/linux/pid.h index cdced84261d7..b152d44fb181 100644 --- a/include/linux/pid.h +++ b/include/linux/pid.h | |||
@@ -105,7 +105,7 @@ extern struct pid_namespace init_pid_ns; | |||
105 | * or rcu_read_lock() held. | 105 | * or rcu_read_lock() held. |
106 | * | 106 | * |
107 | * find_pid_ns() finds the pid in the namespace specified | 107 | * find_pid_ns() finds the pid in the namespace specified |
108 | * find_vpid() finr the pid by its virtual id, i.e. in the current namespace | 108 | * find_vpid() finds the pid by its virtual id, i.e. in the current namespace |
109 | * | 109 | * |
110 | * see also find_task_by_vpid() set in include/linux/sched.h | 110 | * see also find_task_by_vpid() set in include/linux/sched.h |
111 | */ | 111 | */ |
diff --git a/include/linux/power/isp1704_charger.h b/include/linux/power/isp1704_charger.h new file mode 100644 index 000000000000..68096a6aa2d7 --- /dev/null +++ b/include/linux/power/isp1704_charger.h | |||
@@ -0,0 +1,29 @@ | |||
1 | /* | ||
2 | * ISP1704 USB Charger Detection driver | ||
3 | * | ||
4 | * Copyright (C) 2011 Nokia Corporation | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or modify | ||
7 | * it under the terms of the GNU General Public License as published by | ||
8 | * the Free Software Foundation; either version 2 of the License, or | ||
9 | * (at your option) any later version. | ||
10 | * | ||
11 | * This program is distributed in the hope that it will be useful, | ||
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | * GNU General Public License for more details. | ||
15 | * | ||
16 | * You should have received a copy of the GNU General Public License | ||
17 | * along with this program; if not, write to the Free Software | ||
18 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
19 | */ | ||
20 | |||
21 | |||
22 | #ifndef __ISP1704_CHARGER_H | ||
23 | #define __ISP1704_CHARGER_H | ||
24 | |||
25 | struct isp1704_charger_data { | ||
26 | void (*set_power)(bool on); | ||
27 | }; | ||
28 | |||
29 | #endif | ||
diff --git a/include/linux/power/max8903_charger.h b/include/linux/power/max8903_charger.h new file mode 100644 index 000000000000..24f51db8a83f --- /dev/null +++ b/include/linux/power/max8903_charger.h | |||
@@ -0,0 +1,57 @@ | |||
1 | /* | ||
2 | * max8903_charger.h - Maxim 8903 USB/Adapter Charger Driver | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics | ||
5 | * MyungJoo Ham <myungjoo.ham@samsung.com> | ||
6 | * | ||
7 | * This program is free software; you can redistribute it and/or modify | ||
8 | * it under the terms of the GNU General Public License as published by | ||
9 | * the Free Software Foundation; either version 2 of the License, or | ||
10 | * (at your option) any later version. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, | ||
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | * GNU General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA | ||
20 | * | ||
21 | */ | ||
22 | |||
23 | #ifndef __MAX8903_CHARGER_H__ | ||
24 | #define __MAX8903_CHARGER_H__ | ||
25 | |||
26 | struct max8903_pdata { | ||
27 | /* | ||
28 | * GPIOs | ||
29 | * cen, chg, flt, and usus are optional. | ||
30 | * dok, dcm, and uok are not optional depending on the status of | ||
31 | * dc_valid and usb_valid. | ||
32 | */ | ||
33 | int cen; /* Charger Enable input */ | ||
34 | int dok; /* DC(Adapter) Power OK output */ | ||
35 | int uok; /* USB Power OK output */ | ||
36 | int chg; /* Charger status output */ | ||
37 | int flt; /* Fault output */ | ||
38 | int dcm; /* Current-Limit Mode input (1: DC, 2: USB) */ | ||
39 | int usus; /* USB Suspend Input (1: suspended) */ | ||
40 | |||
41 | /* | ||
42 | * DC(Adapter/TA) is wired | ||
43 | * When dc_valid is true, | ||
44 | * dok and dcm should be valid. | ||
45 | * | ||
46 | * At least one of dc_valid or usb_valid should be true. | ||
47 | */ | ||
48 | bool dc_valid; | ||
49 | /* | ||
50 | * USB is wired | ||
51 | * When usb_valid is true, | ||
52 | * uok should be valid. | ||
53 | */ | ||
54 | bool usb_valid; | ||
55 | }; | ||
56 | |||
57 | #endif /* __MAX8903_CHARGER_H__ */ | ||
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h index 648c9c58add7..e7576cf9e32d 100644 --- a/include/linux/proc_fs.h +++ b/include/linux/proc_fs.h | |||
@@ -173,12 +173,6 @@ extern void proc_net_remove(struct net *net, const char *name); | |||
173 | extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, | 173 | extern struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name, |
174 | struct proc_dir_entry *parent); | 174 | struct proc_dir_entry *parent); |
175 | 175 | ||
176 | /* While the {get|set|dup}_mm_exe_file functions are for mm_structs, they are | ||
177 | * only needed to implement /proc/<pid>|self/exe so we define them here. */ | ||
178 | extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file); | ||
179 | extern struct file *get_mm_exe_file(struct mm_struct *mm); | ||
180 | extern void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm); | ||
181 | |||
182 | extern struct file *proc_ns_fget(int fd); | 176 | extern struct file *proc_ns_fget(int fd); |
183 | 177 | ||
184 | #else | 178 | #else |
@@ -230,19 +224,6 @@ static inline void pid_ns_release_proc(struct pid_namespace *ns) | |||
230 | { | 224 | { |
231 | } | 225 | } |
232 | 226 | ||
233 | static inline void set_mm_exe_file(struct mm_struct *mm, | ||
234 | struct file *new_exe_file) | ||
235 | {} | ||
236 | |||
237 | static inline struct file *get_mm_exe_file(struct mm_struct *mm) | ||
238 | { | ||
239 | return NULL; | ||
240 | } | ||
241 | |||
242 | static inline void dup_mm_exe_file(struct mm_struct *oldmm, | ||
243 | struct mm_struct *newmm) | ||
244 | {} | ||
245 | |||
246 | static inline struct file *proc_ns_fget(int fd) | 227 | static inline struct file *proc_ns_fget(int fd) |
247 | { | 228 | { |
248 | return ERR_PTR(-EINVAL); | 229 | return ERR_PTR(-EINVAL); |
diff --git a/include/linux/ratelimit.h b/include/linux/ratelimit.h index 03ff67b0cdf5..2f007157fab9 100644 --- a/include/linux/ratelimit.h +++ b/include/linux/ratelimit.h | |||
@@ -41,4 +41,44 @@ extern struct ratelimit_state printk_ratelimit_state; | |||
41 | extern int ___ratelimit(struct ratelimit_state *rs, const char *func); | 41 | extern int ___ratelimit(struct ratelimit_state *rs, const char *func); |
42 | #define __ratelimit(state) ___ratelimit(state, __func__) | 42 | #define __ratelimit(state) ___ratelimit(state, __func__) |
43 | 43 | ||
44 | #ifdef CONFIG_PRINTK | ||
45 | |||
46 | #define WARN_ON_RATELIMIT(condition, state) \ | ||
47 | WARN_ON((condition) && __ratelimit(state)) | ||
48 | |||
49 | #define __WARN_RATELIMIT(condition, state, format...) \ | ||
50 | ({ \ | ||
51 | int rtn = 0; \ | ||
52 | if (unlikely(__ratelimit(state))) \ | ||
53 | rtn = WARN(condition, format); \ | ||
54 | rtn; \ | ||
55 | }) | ||
56 | |||
57 | #define WARN_RATELIMIT(condition, format...) \ | ||
58 | ({ \ | ||
59 | static DEFINE_RATELIMIT_STATE(_rs, \ | ||
60 | DEFAULT_RATELIMIT_INTERVAL, \ | ||
61 | DEFAULT_RATELIMIT_BURST); \ | ||
62 | __WARN_RATELIMIT(condition, &_rs, format); \ | ||
63 | }) | ||
64 | |||
65 | #else | ||
66 | |||
67 | #define WARN_ON_RATELIMIT(condition, state) \ | ||
68 | WARN_ON(condition) | ||
69 | |||
70 | #define __WARN_RATELIMIT(condition, state, format...) \ | ||
71 | ({ \ | ||
72 | int rtn = WARN(condition, format); \ | ||
73 | rtn; \ | ||
74 | }) | ||
75 | |||
76 | #define WARN_RATELIMIT(condition, format...) \ | ||
77 | ({ \ | ||
78 | int rtn = WARN(condition, format); \ | ||
79 | rtn; \ | ||
80 | }) | ||
81 | |||
82 | #endif | ||
83 | |||
44 | #endif /* _LINUX_RATELIMIT_H */ | 84 | #endif /* _LINUX_RATELIMIT_H */ |
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h index c4c4fc45f856..ce3127a75c88 100644 --- a/include/linux/regulator/machine.h +++ b/include/linux/regulator/machine.h | |||
@@ -68,6 +68,8 @@ struct regulator_state { | |||
68 | * | 68 | * |
69 | * @min_uV: Smallest voltage consumers may set. | 69 | * @min_uV: Smallest voltage consumers may set. |
70 | * @max_uV: Largest voltage consumers may set. | 70 | * @max_uV: Largest voltage consumers may set. |
71 | * @uV_offset: Offset applied to voltages from consumer to compensate for | ||
72 | * voltage drops. | ||
71 | * | 73 | * |
72 | * @min_uA: Smallest consumers consumers may set. | 74 | * @min_uA: Smallest consumers consumers may set. |
73 | * @max_uA: Largest current consumers may set. | 75 | * @max_uA: Largest current consumers may set. |
@@ -99,6 +101,8 @@ struct regulation_constraints { | |||
99 | int min_uV; | 101 | int min_uV; |
100 | int max_uV; | 102 | int max_uV; |
101 | 103 | ||
104 | int uV_offset; | ||
105 | |||
102 | /* current output range (inclusive) - for current control */ | 106 | /* current output range (inclusive) - for current control */ |
103 | int min_uA; | 107 | int min_uA; |
104 | int max_uA; | 108 | int max_uA; |
@@ -160,8 +164,6 @@ struct regulator_consumer_supply { | |||
160 | * @supply_regulator: Parent regulator. Specified using the regulator name | 164 | * @supply_regulator: Parent regulator. Specified using the regulator name |
161 | * as it appears in the name field in sysfs, which can | 165 | * as it appears in the name field in sysfs, which can |
162 | * be explicitly set using the constraints field 'name'. | 166 | * be explicitly set using the constraints field 'name'. |
163 | * @supply_regulator_dev: Parent regulator (if any) - DEPRECATED in favour | ||
164 | * of supply_regulator. | ||
165 | * | 167 | * |
166 | * @constraints: Constraints. These must be specified for the regulator to | 168 | * @constraints: Constraints. These must be specified for the regulator to |
167 | * be usable. | 169 | * be usable. |
@@ -173,7 +175,6 @@ struct regulator_consumer_supply { | |||
173 | */ | 175 | */ |
174 | struct regulator_init_data { | 176 | struct regulator_init_data { |
175 | const char *supply_regulator; /* or NULL for system supply */ | 177 | const char *supply_regulator; /* or NULL for system supply */ |
176 | struct device *supply_regulator_dev; /* or NULL for system supply */ | ||
177 | 178 | ||
178 | struct regulation_constraints constraints; | 179 | struct regulation_constraints constraints; |
179 | 180 | ||
diff --git a/include/linux/rtc.h b/include/linux/rtc.h index 877ece45426f..b27ebea25660 100644 --- a/include/linux/rtc.h +++ b/include/linux/rtc.h | |||
@@ -92,10 +92,10 @@ struct rtc_pll_info { | |||
92 | #define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */ | 92 | #define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */ |
93 | 93 | ||
94 | /* interrupt flags */ | 94 | /* interrupt flags */ |
95 | #define RTC_IRQF 0x80 /* any of the following is active */ | 95 | #define RTC_IRQF 0x80 /* Any of the following is active */ |
96 | #define RTC_PF 0x40 | 96 | #define RTC_PF 0x40 /* Periodic interrupt */ |
97 | #define RTC_AF 0x20 | 97 | #define RTC_AF 0x20 /* Alarm interrupt */ |
98 | #define RTC_UF 0x10 | 98 | #define RTC_UF 0x10 /* Update interrupt for 1Hz RTC */ |
99 | 99 | ||
100 | #ifdef __KERNEL__ | 100 | #ifdef __KERNEL__ |
101 | 101 | ||
diff --git a/include/linux/sched.h b/include/linux/sched.h index f18300eddfcb..dc8871295a5a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -513,6 +513,7 @@ struct thread_group_cputimer { | |||
513 | spinlock_t lock; | 513 | spinlock_t lock; |
514 | }; | 514 | }; |
515 | 515 | ||
516 | #include <linux/rwsem.h> | ||
516 | struct autogroup; | 517 | struct autogroup; |
517 | 518 | ||
518 | /* | 519 | /* |
@@ -632,6 +633,16 @@ struct signal_struct { | |||
632 | unsigned audit_tty; | 633 | unsigned audit_tty; |
633 | struct tty_audit_buf *tty_audit_buf; | 634 | struct tty_audit_buf *tty_audit_buf; |
634 | #endif | 635 | #endif |
636 | #ifdef CONFIG_CGROUPS | ||
637 | /* | ||
638 | * The threadgroup_fork_lock prevents threads from forking with | ||
639 | * CLONE_THREAD while held for writing. Use this for fork-sensitive | ||
640 | * threadgroup-wide operations. It's taken for reading in fork.c in | ||
641 | * copy_process(). | ||
642 | * Currently only needed write-side by cgroups. | ||
643 | */ | ||
644 | struct rw_semaphore threadgroup_fork_lock; | ||
645 | #endif | ||
635 | 646 | ||
636 | int oom_adj; /* OOM kill score adjustment (bit shift) */ | 647 | int oom_adj; /* OOM kill score adjustment (bit shift) */ |
637 | int oom_score_adj; /* OOM kill score adjustment */ | 648 | int oom_score_adj; /* OOM kill score adjustment */ |
@@ -2323,6 +2334,31 @@ static inline void unlock_task_sighand(struct task_struct *tsk, | |||
2323 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); | 2334 | spin_unlock_irqrestore(&tsk->sighand->siglock, *flags); |
2324 | } | 2335 | } |
2325 | 2336 | ||
2337 | /* See the declaration of threadgroup_fork_lock in signal_struct. */ | ||
2338 | #ifdef CONFIG_CGROUPS | ||
2339 | static inline void threadgroup_fork_read_lock(struct task_struct *tsk) | ||
2340 | { | ||
2341 | down_read(&tsk->signal->threadgroup_fork_lock); | ||
2342 | } | ||
2343 | static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) | ||
2344 | { | ||
2345 | up_read(&tsk->signal->threadgroup_fork_lock); | ||
2346 | } | ||
2347 | static inline void threadgroup_fork_write_lock(struct task_struct *tsk) | ||
2348 | { | ||
2349 | down_write(&tsk->signal->threadgroup_fork_lock); | ||
2350 | } | ||
2351 | static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) | ||
2352 | { | ||
2353 | up_write(&tsk->signal->threadgroup_fork_lock); | ||
2354 | } | ||
2355 | #else | ||
2356 | static inline void threadgroup_fork_read_lock(struct task_struct *tsk) {} | ||
2357 | static inline void threadgroup_fork_read_unlock(struct task_struct *tsk) {} | ||
2358 | static inline void threadgroup_fork_write_lock(struct task_struct *tsk) {} | ||
2359 | static inline void threadgroup_fork_write_unlock(struct task_struct *tsk) {} | ||
2360 | #endif | ||
2361 | |||
2326 | #ifndef __HAVE_THREAD_FUNCTIONS | 2362 | #ifndef __HAVE_THREAD_FUNCTIONS |
2327 | 2363 | ||
2328 | #define task_thread_info(task) ((struct thread_info *)(task)->stack) | 2364 | #define task_thread_info(task) ((struct thread_info *)(task)->stack) |
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h index 06d69648fc86..e9811892844f 100644 --- a/include/linux/seqlock.h +++ b/include/linux/seqlock.h | |||
@@ -41,9 +41,6 @@ typedef struct { | |||
41 | #define __SEQLOCK_UNLOCKED(lockname) \ | 41 | #define __SEQLOCK_UNLOCKED(lockname) \ |
42 | { 0, __SPIN_LOCK_UNLOCKED(lockname) } | 42 | { 0, __SPIN_LOCK_UNLOCKED(lockname) } |
43 | 43 | ||
44 | #define SEQLOCK_UNLOCKED \ | ||
45 | __SEQLOCK_UNLOCKED(old_style_seqlock_init) | ||
46 | |||
47 | #define seqlock_init(x) \ | 44 | #define seqlock_init(x) \ |
48 | do { \ | 45 | do { \ |
49 | (x)->sequence = 0; \ | 46 | (x)->sequence = 0; \ |
diff --git a/include/linux/smp.h b/include/linux/smp.h index 74243c86ba39..7ad824d510a2 100644 --- a/include/linux/smp.h +++ b/include/linux/smp.h | |||
@@ -98,16 +98,6 @@ void ipi_call_unlock_irq(void); | |||
98 | */ | 98 | */ |
99 | int on_each_cpu(smp_call_func_t func, void *info, int wait); | 99 | int on_each_cpu(smp_call_func_t func, void *info, int wait); |
100 | 100 | ||
101 | #define MSG_ALL_BUT_SELF 0x8000 /* Assume <32768 CPU's */ | ||
102 | #define MSG_ALL 0x8001 | ||
103 | |||
104 | #define MSG_INVALIDATE_TLB 0x0001 /* Remote processor TLB invalidate */ | ||
105 | #define MSG_STOP_CPU 0x0002 /* Sent to shut down slave CPU's | ||
106 | * when rebooting | ||
107 | */ | ||
108 | #define MSG_RESCHEDULE 0x0003 /* Reschedule request from master CPU*/ | ||
109 | #define MSG_CALL_FUNCTION 0x0004 /* Call function on all other CPUs */ | ||
110 | |||
111 | /* | 101 | /* |
112 | * Mark the boot cpu "online" so that it can call console drivers in | 102 | * Mark the boot cpu "online" so that it can call console drivers in |
113 | * printk() and can access its per-cpu storage. | 103 | * printk() and can access its per-cpu storage. |
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h index b4d7710bc38d..bb4f5fbbbd8e 100644 --- a/include/linux/spi/spi.h +++ b/include/linux/spi/spi.h | |||
@@ -581,7 +581,7 @@ extern int spi_bus_unlock(struct spi_master *master); | |||
581 | * Callable only from contexts that can sleep. | 581 | * Callable only from contexts that can sleep. |
582 | */ | 582 | */ |
583 | static inline int | 583 | static inline int |
584 | spi_write(struct spi_device *spi, const u8 *buf, size_t len) | 584 | spi_write(struct spi_device *spi, const void *buf, size_t len) |
585 | { | 585 | { |
586 | struct spi_transfer t = { | 586 | struct spi_transfer t = { |
587 | .tx_buf = buf, | 587 | .tx_buf = buf, |
@@ -605,7 +605,7 @@ spi_write(struct spi_device *spi, const u8 *buf, size_t len) | |||
605 | * Callable only from contexts that can sleep. | 605 | * Callable only from contexts that can sleep. |
606 | */ | 606 | */ |
607 | static inline int | 607 | static inline int |
608 | spi_read(struct spi_device *spi, u8 *buf, size_t len) | 608 | spi_read(struct spi_device *spi, void *buf, size_t len) |
609 | { | 609 | { |
610 | struct spi_transfer t = { | 610 | struct spi_transfer t = { |
611 | .rx_buf = buf, | 611 | .rx_buf = buf, |
@@ -620,8 +620,8 @@ spi_read(struct spi_device *spi, u8 *buf, size_t len) | |||
620 | 620 | ||
621 | /* this copies txbuf and rxbuf data; for small transfers only! */ | 621 | /* this copies txbuf and rxbuf data; for small transfers only! */ |
622 | extern int spi_write_then_read(struct spi_device *spi, | 622 | extern int spi_write_then_read(struct spi_device *spi, |
623 | const u8 *txbuf, unsigned n_tx, | 623 | const void *txbuf, unsigned n_tx, |
624 | u8 *rxbuf, unsigned n_rx); | 624 | void *rxbuf, unsigned n_rx); |
625 | 625 | ||
626 | /** | 626 | /** |
627 | * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read | 627 | * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read |
diff --git a/include/linux/swap.h b/include/linux/swap.h index a5c6da5d8df8..384eb5fe530b 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h | |||
@@ -257,7 +257,8 @@ extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, | |||
257 | extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | 257 | extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, |
258 | gfp_t gfp_mask, bool noswap, | 258 | gfp_t gfp_mask, bool noswap, |
259 | unsigned int swappiness, | 259 | unsigned int swappiness, |
260 | struct zone *zone); | 260 | struct zone *zone, |
261 | unsigned long *nr_scanned); | ||
261 | extern int __isolate_lru_page(struct page *page, int mode, int file); | 262 | extern int __isolate_lru_page(struct page *page, int mode, int file); |
262 | extern unsigned long shrink_all_memory(unsigned long nr_pages); | 263 | extern unsigned long shrink_all_memory(unsigned long nr_pages); |
263 | extern int vm_swappiness; | 264 | extern int vm_swappiness; |
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h new file mode 100644 index 000000000000..03b90cdc1921 --- /dev/null +++ b/include/linux/vm_event_item.h | |||
@@ -0,0 +1,64 @@ | |||
1 | #ifndef VM_EVENT_ITEM_H_INCLUDED | ||
2 | #define VM_EVENT_ITEM_H_INCLUDED | ||
3 | |||
4 | #ifdef CONFIG_ZONE_DMA | ||
5 | #define DMA_ZONE(xx) xx##_DMA, | ||
6 | #else | ||
7 | #define DMA_ZONE(xx) | ||
8 | #endif | ||
9 | |||
10 | #ifdef CONFIG_ZONE_DMA32 | ||
11 | #define DMA32_ZONE(xx) xx##_DMA32, | ||
12 | #else | ||
13 | #define DMA32_ZONE(xx) | ||
14 | #endif | ||
15 | |||
16 | #ifdef CONFIG_HIGHMEM | ||
17 | #define HIGHMEM_ZONE(xx) , xx##_HIGH | ||
18 | #else | ||
19 | #define HIGHMEM_ZONE(xx) | ||
20 | #endif | ||
21 | |||
22 | #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE | ||
23 | |||
24 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | ||
25 | FOR_ALL_ZONES(PGALLOC), | ||
26 | PGFREE, PGACTIVATE, PGDEACTIVATE, | ||
27 | PGFAULT, PGMAJFAULT, | ||
28 | FOR_ALL_ZONES(PGREFILL), | ||
29 | FOR_ALL_ZONES(PGSTEAL), | ||
30 | FOR_ALL_ZONES(PGSCAN_KSWAPD), | ||
31 | FOR_ALL_ZONES(PGSCAN_DIRECT), | ||
32 | #ifdef CONFIG_NUMA | ||
33 | PGSCAN_ZONE_RECLAIM_FAILED, | ||
34 | #endif | ||
35 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, | ||
36 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, | ||
37 | KSWAPD_SKIP_CONGESTION_WAIT, | ||
38 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, | ||
39 | #ifdef CONFIG_COMPACTION | ||
40 | COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED, | ||
41 | COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, | ||
42 | #endif | ||
43 | #ifdef CONFIG_HUGETLB_PAGE | ||
44 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, | ||
45 | #endif | ||
46 | UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ | ||
47 | UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ | ||
48 | UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ | ||
49 | UNEVICTABLE_PGMLOCKED, | ||
50 | UNEVICTABLE_PGMUNLOCKED, | ||
51 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ | ||
52 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ | ||
53 | UNEVICTABLE_MLOCKFREED, | ||
54 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
55 | THP_FAULT_ALLOC, | ||
56 | THP_FAULT_FALLBACK, | ||
57 | THP_COLLAPSE_ALLOC, | ||
58 | THP_COLLAPSE_ALLOC_FAILED, | ||
59 | THP_SPLIT, | ||
60 | #endif | ||
61 | NR_VM_EVENT_ITEMS | ||
62 | }; | ||
63 | |||
64 | #endif /* VM_EVENT_ITEM_H_INCLUDED */ | ||
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 51359837511a..bcd942fa611c 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h | |||
@@ -5,69 +5,9 @@ | |||
5 | #include <linux/percpu.h> | 5 | #include <linux/percpu.h> |
6 | #include <linux/mm.h> | 6 | #include <linux/mm.h> |
7 | #include <linux/mmzone.h> | 7 | #include <linux/mmzone.h> |
8 | #include <linux/vm_event_item.h> | ||
8 | #include <asm/atomic.h> | 9 | #include <asm/atomic.h> |
9 | 10 | ||
10 | #ifdef CONFIG_ZONE_DMA | ||
11 | #define DMA_ZONE(xx) xx##_DMA, | ||
12 | #else | ||
13 | #define DMA_ZONE(xx) | ||
14 | #endif | ||
15 | |||
16 | #ifdef CONFIG_ZONE_DMA32 | ||
17 | #define DMA32_ZONE(xx) xx##_DMA32, | ||
18 | #else | ||
19 | #define DMA32_ZONE(xx) | ||
20 | #endif | ||
21 | |||
22 | #ifdef CONFIG_HIGHMEM | ||
23 | #define HIGHMEM_ZONE(xx) , xx##_HIGH | ||
24 | #else | ||
25 | #define HIGHMEM_ZONE(xx) | ||
26 | #endif | ||
27 | |||
28 | |||
29 | #define FOR_ALL_ZONES(xx) DMA_ZONE(xx) DMA32_ZONE(xx) xx##_NORMAL HIGHMEM_ZONE(xx) , xx##_MOVABLE | ||
30 | |||
31 | enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT, | ||
32 | FOR_ALL_ZONES(PGALLOC), | ||
33 | PGFREE, PGACTIVATE, PGDEACTIVATE, | ||
34 | PGFAULT, PGMAJFAULT, | ||
35 | FOR_ALL_ZONES(PGREFILL), | ||
36 | FOR_ALL_ZONES(PGSTEAL), | ||
37 | FOR_ALL_ZONES(PGSCAN_KSWAPD), | ||
38 | FOR_ALL_ZONES(PGSCAN_DIRECT), | ||
39 | #ifdef CONFIG_NUMA | ||
40 | PGSCAN_ZONE_RECLAIM_FAILED, | ||
41 | #endif | ||
42 | PGINODESTEAL, SLABS_SCANNED, KSWAPD_STEAL, KSWAPD_INODESTEAL, | ||
43 | KSWAPD_LOW_WMARK_HIT_QUICKLY, KSWAPD_HIGH_WMARK_HIT_QUICKLY, | ||
44 | KSWAPD_SKIP_CONGESTION_WAIT, | ||
45 | PAGEOUTRUN, ALLOCSTALL, PGROTATED, | ||
46 | #ifdef CONFIG_COMPACTION | ||
47 | COMPACTBLOCKS, COMPACTPAGES, COMPACTPAGEFAILED, | ||
48 | COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS, | ||
49 | #endif | ||
50 | #ifdef CONFIG_HUGETLB_PAGE | ||
51 | HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL, | ||
52 | #endif | ||
53 | UNEVICTABLE_PGCULLED, /* culled to noreclaim list */ | ||
54 | UNEVICTABLE_PGSCANNED, /* scanned for reclaimability */ | ||
55 | UNEVICTABLE_PGRESCUED, /* rescued from noreclaim list */ | ||
56 | UNEVICTABLE_PGMLOCKED, | ||
57 | UNEVICTABLE_PGMUNLOCKED, | ||
58 | UNEVICTABLE_PGCLEARED, /* on COW, page truncate */ | ||
59 | UNEVICTABLE_PGSTRANDED, /* unable to isolate on unlock */ | ||
60 | UNEVICTABLE_MLOCKFREED, | ||
61 | #ifdef CONFIG_TRANSPARENT_HUGEPAGE | ||
62 | THP_FAULT_ALLOC, | ||
63 | THP_FAULT_FALLBACK, | ||
64 | THP_COLLAPSE_ALLOC, | ||
65 | THP_COLLAPSE_ALLOC_FAILED, | ||
66 | THP_SPLIT, | ||
67 | #endif | ||
68 | NR_VM_EVENT_ITEMS | ||
69 | }; | ||
70 | |||
71 | extern int sysctl_stat_interval; | 11 | extern int sysctl_stat_interval; |
72 | 12 | ||
73 | #ifdef CONFIG_VM_EVENT_COUNTERS | 13 | #ifdef CONFIG_VM_EVENT_COUNTERS |
diff --git a/include/media/m5mols.h b/include/media/m5mols.h new file mode 100644 index 000000000000..2d7e7ca2313d --- /dev/null +++ b/include/media/m5mols.h | |||
@@ -0,0 +1,35 @@ | |||
1 | /* | ||
2 | * Driver header for M-5MOLS 8M Pixel camera sensor with ISP | ||
3 | * | ||
4 | * Copyright (C) 2011 Samsung Electronics Co., Ltd. | ||
5 | * Author: HeungJun Kim, riverful.kim@samsung.com | ||
6 | * | ||
7 | * Copyright (C) 2009 Samsung Electronics Co., Ltd. | ||
8 | * Author: Dongsoo Nathaniel Kim, dongsoo45.kim@samsung.com | ||
9 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | ||
11 | * it under the terms of the GNU General Public License as published by | ||
12 | * the Free Software Foundation; either version 2 of the License, or | ||
13 | * (at your option) any later version. | ||
14 | */ | ||
15 | |||
16 | #ifndef MEDIA_M5MOLS_H | ||
17 | #define MEDIA_M5MOLS_H | ||
18 | |||
19 | /** | ||
20 | * struct m5mols_platform_data - platform data for M-5MOLS driver | ||
21 | * @irq: GPIO getting the irq pin of M-5MOLS | ||
22 | * @gpio_reset: GPIO driving the reset pin of M-5MOLS | ||
23 | * @reset_polarity: active state for gpio_rst pin, 0 or 1 | ||
24 | * @set_power: an additional callback to the board setup code | ||
25 | * to be called after enabling and before disabling | ||
26 | * the sensor's supply regulators | ||
27 | */ | ||
28 | struct m5mols_platform_data { | ||
29 | int irq; | ||
30 | int gpio_reset; | ||
31 | u8 reset_polarity; | ||
32 | int (*set_power)(struct device *dev, int on); | ||
33 | }; | ||
34 | |||
35 | #endif /* MEDIA_M5MOLS_H */ | ||
diff --git a/include/media/videobuf-dvb.h b/include/media/videobuf-dvb.h index 07cf4b9d0a65..bf365721d6b0 100644 --- a/include/media/videobuf-dvb.h +++ b/include/media/videobuf-dvb.h | |||
@@ -4,6 +4,9 @@ | |||
4 | #include <dvb_net.h> | 4 | #include <dvb_net.h> |
5 | #include <dvb_frontend.h> | 5 | #include <dvb_frontend.h> |
6 | 6 | ||
7 | #ifndef _VIDEOBUF_DVB_H_ | ||
8 | #define _VIDEOBUF_DVB_H_ | ||
9 | |||
7 | struct videobuf_dvb { | 10 | struct videobuf_dvb { |
8 | /* filling that the job of the driver */ | 11 | /* filling that the job of the driver */ |
9 | char *name; | 12 | char *name; |
@@ -54,6 +57,7 @@ void videobuf_dvb_dealloc_frontends(struct videobuf_dvb_frontends *f); | |||
54 | struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_frontends *f, int id); | 57 | struct videobuf_dvb_frontend * videobuf_dvb_get_frontend(struct videobuf_dvb_frontends *f, int id); |
55 | int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p); | 58 | int videobuf_dvb_find_frontend(struct videobuf_dvb_frontends *f, struct dvb_frontend *p); |
56 | 59 | ||
60 | #endif /* _VIDEOBUF_DVB_H_ */ | ||
57 | 61 | ||
58 | /* | 62 | /* |
59 | * Local variables: | 63 | * Local variables: |
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 4fff432aeade..481f856c650f 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -797,7 +797,8 @@ struct netns_ipvs { | |||
797 | struct list_head rs_table[IP_VS_RTAB_SIZE]; | 797 | struct list_head rs_table[IP_VS_RTAB_SIZE]; |
798 | /* ip_vs_app */ | 798 | /* ip_vs_app */ |
799 | struct list_head app_list; | 799 | struct list_head app_list; |
800 | 800 | /* ip_vs_ftp */ | |
801 | struct ip_vs_app *ftp_app; | ||
801 | /* ip_vs_proto */ | 802 | /* ip_vs_proto */ |
802 | #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ | 803 | #define IP_VS_PROTO_TAB_SIZE 32 /* must be power of 2 */ |
803 | struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE]; | 804 | struct ip_vs_proto_data *proto_data_table[IP_VS_PROTO_TAB_SIZE]; |
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h index dcc8f5749d3f..2bf9ed9ef26b 100644 --- a/include/net/net_namespace.h +++ b/include/net/net_namespace.h | |||
@@ -7,6 +7,7 @@ | |||
7 | #include <asm/atomic.h> | 7 | #include <asm/atomic.h> |
8 | #include <linux/workqueue.h> | 8 | #include <linux/workqueue.h> |
9 | #include <linux/list.h> | 9 | #include <linux/list.h> |
10 | #include <linux/sysctl.h> | ||
10 | 11 | ||
11 | #include <net/netns/core.h> | 12 | #include <net/netns/core.h> |
12 | #include <net/netns/mib.h> | 13 | #include <net/netns/mib.h> |
diff --git a/include/net/net_ratelimit.h b/include/net/net_ratelimit.h new file mode 100644 index 000000000000..7727b4247daf --- /dev/null +++ b/include/net/net_ratelimit.h | |||
@@ -0,0 +1,8 @@ | |||
1 | #ifndef _LINUX_NET_RATELIMIT_H | ||
2 | #define _LINUX_NET_RATELIMIT_H | ||
3 | |||
4 | #include <linux/ratelimit.h> | ||
5 | |||
6 | extern struct ratelimit_state net_ratelimit_state; | ||
7 | |||
8 | #endif /* _LINUX_NET_RATELIMIT_H */ | ||
diff --git a/include/rdma/Kbuild b/include/rdma/Kbuild index e7c043216558..ea56f76c0c22 100644 --- a/include/rdma/Kbuild +++ b/include/rdma/Kbuild | |||
@@ -1 +1,6 @@ | |||
1 | header-y += ib_user_cm.h | ||
1 | header-y += ib_user_mad.h | 2 | header-y += ib_user_mad.h |
3 | header-y += ib_user_sa.h | ||
4 | header-y += ib_user_verbs.h | ||
5 | header-y += rdma_netlink.h | ||
6 | header-y += rdma_user_cm.h | ||
diff --git a/include/rdma/ib_user_cm.h b/include/rdma/ib_user_cm.h index bd3d380781e0..f79014aa28f9 100644 --- a/include/rdma/ib_user_cm.h +++ b/include/rdma/ib_user_cm.h | |||
@@ -34,6 +34,7 @@ | |||
34 | #ifndef IB_USER_CM_H | 34 | #ifndef IB_USER_CM_H |
35 | #define IB_USER_CM_H | 35 | #define IB_USER_CM_H |
36 | 36 | ||
37 | #include <linux/types.h> | ||
37 | #include <rdma/ib_user_sa.h> | 38 | #include <rdma/ib_user_sa.h> |
38 | 39 | ||
39 | #define IB_USER_CM_ABI_VERSION 5 | 40 | #define IB_USER_CM_ABI_VERSION 5 |
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h index 169f7a53fb0c..26977c149c41 100644 --- a/include/rdma/rdma_cm.h +++ b/include/rdma/rdma_cm.h | |||
@@ -111,6 +111,20 @@ struct rdma_cm_event { | |||
111 | } param; | 111 | } param; |
112 | }; | 112 | }; |
113 | 113 | ||
114 | enum rdma_cm_state { | ||
115 | RDMA_CM_IDLE, | ||
116 | RDMA_CM_ADDR_QUERY, | ||
117 | RDMA_CM_ADDR_RESOLVED, | ||
118 | RDMA_CM_ROUTE_QUERY, | ||
119 | RDMA_CM_ROUTE_RESOLVED, | ||
120 | RDMA_CM_CONNECT, | ||
121 | RDMA_CM_DISCONNECT, | ||
122 | RDMA_CM_ADDR_BOUND, | ||
123 | RDMA_CM_LISTEN, | ||
124 | RDMA_CM_DEVICE_REMOVAL, | ||
125 | RDMA_CM_DESTROYING | ||
126 | }; | ||
127 | |||
114 | struct rdma_cm_id; | 128 | struct rdma_cm_id; |
115 | 129 | ||
116 | /** | 130 | /** |
@@ -130,6 +144,7 @@ struct rdma_cm_id { | |||
130 | rdma_cm_event_handler event_handler; | 144 | rdma_cm_event_handler event_handler; |
131 | struct rdma_route route; | 145 | struct rdma_route route; |
132 | enum rdma_port_space ps; | 146 | enum rdma_port_space ps; |
147 | enum ib_qp_type qp_type; | ||
133 | u8 port_num; | 148 | u8 port_num; |
134 | }; | 149 | }; |
135 | 150 | ||
@@ -140,9 +155,11 @@ struct rdma_cm_id { | |||
140 | * returned rdma_id. | 155 | * returned rdma_id. |
141 | * @context: User specified context associated with the id. | 156 | * @context: User specified context associated with the id. |
142 | * @ps: RDMA port space. | 157 | * @ps: RDMA port space. |
158 | * @qp_type: type of queue pair associated with the id. | ||
143 | */ | 159 | */ |
144 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, | 160 | struct rdma_cm_id *rdma_create_id(rdma_cm_event_handler event_handler, |
145 | void *context, enum rdma_port_space ps); | 161 | void *context, enum rdma_port_space ps, |
162 | enum ib_qp_type qp_type); | ||
146 | 163 | ||
147 | /** | 164 | /** |
148 | * rdma_destroy_id - Destroys an RDMA identifier. | 165 | * rdma_destroy_id - Destroys an RDMA identifier. |
diff --git a/include/rdma/rdma_netlink.h b/include/rdma/rdma_netlink.h new file mode 100644 index 000000000000..3c5363ab867b --- /dev/null +++ b/include/rdma/rdma_netlink.h | |||
@@ -0,0 +1,92 @@ | |||
1 | #ifndef _RDMA_NETLINK_H | ||
2 | #define _RDMA_NETLINK_H | ||
3 | |||
4 | #include <linux/types.h> | ||
5 | |||
6 | enum { | ||
7 | RDMA_NL_RDMA_CM = 1 | ||
8 | }; | ||
9 | |||
10 | #define RDMA_NL_GET_CLIENT(type) ((type & (((1 << 6) - 1) << 10)) >> 10) | ||
11 | #define RDMA_NL_GET_OP(type) (type & ((1 << 10) - 1)) | ||
12 | #define RDMA_NL_GET_TYPE(client, op) ((client << 10) + op) | ||
13 | |||
14 | enum { | ||
15 | RDMA_NL_RDMA_CM_ID_STATS = 0, | ||
16 | RDMA_NL_RDMA_CM_NUM_OPS | ||
17 | }; | ||
18 | |||
19 | enum { | ||
20 | RDMA_NL_RDMA_CM_ATTR_SRC_ADDR = 1, | ||
21 | RDMA_NL_RDMA_CM_ATTR_DST_ADDR, | ||
22 | RDMA_NL_RDMA_CM_NUM_ATTR, | ||
23 | }; | ||
24 | |||
25 | struct rdma_cm_id_stats { | ||
26 | __u32 qp_num; | ||
27 | __u32 bound_dev_if; | ||
28 | __u32 port_space; | ||
29 | __s32 pid; | ||
30 | __u8 cm_state; | ||
31 | __u8 node_type; | ||
32 | __u8 port_num; | ||
33 | __u8 qp_type; | ||
34 | }; | ||
35 | |||
36 | #ifdef __KERNEL__ | ||
37 | |||
38 | #include <linux/netlink.h> | ||
39 | |||
40 | struct ibnl_client_cbs { | ||
41 | int (*dump)(struct sk_buff *skb, struct netlink_callback *nlcb); | ||
42 | }; | ||
43 | |||
44 | int ibnl_init(void); | ||
45 | void ibnl_cleanup(void); | ||
46 | |||
47 | /** | ||
48 | * Add a a client to the list of IB netlink exporters. | ||
49 | * @index: Index of the added client | ||
50 | * @nops: Number of supported ops by the added client. | ||
51 | * @cb_table: A table for op->callback | ||
52 | * | ||
53 | * Returns 0 on success or a negative error code. | ||
54 | */ | ||
55 | int ibnl_add_client(int index, int nops, | ||
56 | const struct ibnl_client_cbs cb_table[]); | ||
57 | |||
58 | /** | ||
59 | * Remove a client from IB netlink. | ||
60 | * @index: Index of the removed IB client. | ||
61 | * | ||
62 | * Returns 0 on success or a negative error code. | ||
63 | */ | ||
64 | int ibnl_remove_client(int index); | ||
65 | |||
66 | /** | ||
67 | * Put a new message in a supplied skb. | ||
68 | * @skb: The netlink skb. | ||
69 | * @nlh: Pointer to put the header of the new netlink message. | ||
70 | * @seq: The message sequence number. | ||
71 | * @len: The requested message length to allocate. | ||
72 | * @client: Calling IB netlink client. | ||
73 | * @op: message content op. | ||
74 | * Returns the allocated buffer on success and NULL on failure. | ||
75 | */ | ||
76 | void *ibnl_put_msg(struct sk_buff *skb, struct nlmsghdr **nlh, int seq, | ||
77 | int len, int client, int op); | ||
78 | /** | ||
79 | * Put a new attribute in a supplied skb. | ||
80 | * @skb: The netlink skb. | ||
81 | * @nlh: Header of the netlink message to append the attribute to. | ||
82 | * @len: The length of the attribute data. | ||
83 | * @data: The attribute data to put. | ||
84 | * @type: The attribute type. | ||
85 | * Returns the 0 and a negative error code on failure. | ||
86 | */ | ||
87 | int ibnl_put_attr(struct sk_buff *skb, struct nlmsghdr *nlh, | ||
88 | int len, void *data, int type); | ||
89 | |||
90 | #endif /* __KERNEL__ */ | ||
91 | |||
92 | #endif /* _RDMA_NETLINK_H */ | ||
diff --git a/include/trace/events/gpio.h b/include/trace/events/gpio.h new file mode 100644 index 000000000000..927a8ad9e51b --- /dev/null +++ b/include/trace/events/gpio.h | |||
@@ -0,0 +1,56 @@ | |||
1 | #undef TRACE_SYSTEM | ||
2 | #define TRACE_SYSTEM gpio | ||
3 | |||
4 | #if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) | ||
5 | #define _TRACE_GPIO_H | ||
6 | |||
7 | #include <linux/tracepoint.h> | ||
8 | |||
9 | TRACE_EVENT(gpio_direction, | ||
10 | |||
11 | TP_PROTO(unsigned gpio, int in, int err), | ||
12 | |||
13 | TP_ARGS(gpio, in, err), | ||
14 | |||
15 | TP_STRUCT__entry( | ||
16 | __field(unsigned, gpio) | ||
17 | __field(int, in) | ||
18 | __field(int, err) | ||
19 | ), | ||
20 | |||
21 | TP_fast_assign( | ||
22 | __entry->gpio = gpio; | ||
23 | __entry->in = in; | ||
24 | __entry->err = err; | ||
25 | ), | ||
26 | |||
27 | TP_printk("%u %3s (%d)", __entry->gpio, | ||
28 | __entry->in ? "in" : "out", __entry->err) | ||
29 | ); | ||
30 | |||
31 | TRACE_EVENT(gpio_value, | ||
32 | |||
33 | TP_PROTO(unsigned gpio, int get, int value), | ||
34 | |||
35 | TP_ARGS(gpio, get, value), | ||
36 | |||
37 | TP_STRUCT__entry( | ||
38 | __field(unsigned, gpio) | ||
39 | __field(int, get) | ||
40 | __field(int, value) | ||
41 | ), | ||
42 | |||
43 | TP_fast_assign( | ||
44 | __entry->gpio = gpio; | ||
45 | __entry->get = get; | ||
46 | __entry->value = value; | ||
47 | ), | ||
48 | |||
49 | TP_printk("%u %3s %d", __entry->gpio, | ||
50 | __entry->get ? "get" : "set", __entry->value) | ||
51 | ); | ||
52 | |||
53 | #endif /* if !defined(_TRACE_GPIO_H) || defined(TRACE_HEADER_MULTI_READ) */ | ||
54 | |||
55 | /* This part must be outside protection */ | ||
56 | #include <trace/define_trace.h> | ||
diff --git a/include/xen/interface/xen.h b/include/xen/interface/xen.h index b33257bc7e83..70213b4515eb 100644 --- a/include/xen/interface/xen.h +++ b/include/xen/interface/xen.h | |||
@@ -58,6 +58,7 @@ | |||
58 | #define __HYPERVISOR_event_channel_op 32 | 58 | #define __HYPERVISOR_event_channel_op 32 |
59 | #define __HYPERVISOR_physdev_op 33 | 59 | #define __HYPERVISOR_physdev_op 33 |
60 | #define __HYPERVISOR_hvm_op 34 | 60 | #define __HYPERVISOR_hvm_op 34 |
61 | #define __HYPERVISOR_tmem_op 38 | ||
61 | 62 | ||
62 | /* Architecture-specific hypercall definitions. */ | 63 | /* Architecture-specific hypercall definitions. */ |
63 | #define __HYPERVISOR_arch_0 48 | 64 | #define __HYPERVISOR_arch_0 48 |
@@ -461,6 +462,27 @@ typedef uint8_t xen_domain_handle_t[16]; | |||
461 | #define __mk_unsigned_long(x) x ## UL | 462 | #define __mk_unsigned_long(x) x ## UL |
462 | #define mk_unsigned_long(x) __mk_unsigned_long(x) | 463 | #define mk_unsigned_long(x) __mk_unsigned_long(x) |
463 | 464 | ||
465 | #define TMEM_SPEC_VERSION 1 | ||
466 | |||
467 | struct tmem_op { | ||
468 | uint32_t cmd; | ||
469 | int32_t pool_id; | ||
470 | union { | ||
471 | struct { /* for cmd == TMEM_NEW_POOL */ | ||
472 | uint64_t uuid[2]; | ||
473 | uint32_t flags; | ||
474 | } new; | ||
475 | struct { | ||
476 | uint64_t oid[3]; | ||
477 | uint32_t index; | ||
478 | uint32_t tmem_offset; | ||
479 | uint32_t pfn_offset; | ||
480 | uint32_t len; | ||
481 | GUEST_HANDLE(void) gmfn; /* guest machine page frame */ | ||
482 | } gen; | ||
483 | } u; | ||
484 | }; | ||
485 | |||
464 | #else /* __ASSEMBLY__ */ | 486 | #else /* __ASSEMBLY__ */ |
465 | 487 | ||
466 | /* In assembly code we cannot use C numeric constant suffixes. */ | 488 | /* In assembly code we cannot use C numeric constant suffixes. */ |
diff --git a/init/Kconfig b/init/Kconfig index 332aac649966..ebafac4231ee 100644 --- a/init/Kconfig +++ b/init/Kconfig | |||
@@ -589,14 +589,6 @@ config CGROUP_DEBUG | |||
589 | 589 | ||
590 | Say N if unsure. | 590 | Say N if unsure. |
591 | 591 | ||
592 | config CGROUP_NS | ||
593 | bool "Namespace cgroup subsystem" | ||
594 | help | ||
595 | Provides a simple namespace cgroup subsystem to | ||
596 | provide hierarchical naming of sets of namespaces, | ||
597 | for instance virtual servers and checkpoint/restart | ||
598 | jobs. | ||
599 | |||
600 | config CGROUP_FREEZER | 592 | config CGROUP_FREEZER |
601 | bool "Freezer cgroup subsystem" | 593 | bool "Freezer cgroup subsystem" |
602 | help | 594 | help |
@@ -347,7 +347,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) | |||
347 | struct file * file; | 347 | struct file * file; |
348 | char name[13]; | 348 | char name[13]; |
349 | int id; | 349 | int id; |
350 | int acctflag = 0; | 350 | vm_flags_t acctflag = 0; |
351 | 351 | ||
352 | if (size < SHMMIN || size > ns->shm_ctlmax) | 352 | if (size < SHMMIN || size > ns->shm_ctlmax) |
353 | return -EINVAL; | 353 | return -EINVAL; |
diff --git a/kernel/Makefile b/kernel/Makefile index e9cf19155b46..2d64cfcc8b42 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -61,7 +61,6 @@ obj-$(CONFIG_COMPAT) += compat.o | |||
61 | obj-$(CONFIG_CGROUPS) += cgroup.o | 61 | obj-$(CONFIG_CGROUPS) += cgroup.o |
62 | obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o | 62 | obj-$(CONFIG_CGROUP_FREEZER) += cgroup_freezer.o |
63 | obj-$(CONFIG_CPUSETS) += cpuset.o | 63 | obj-$(CONFIG_CPUSETS) += cpuset.o |
64 | obj-$(CONFIG_CGROUP_NS) += ns_cgroup.o | ||
65 | obj-$(CONFIG_UTS_NS) += utsname.o | 64 | obj-$(CONFIG_UTS_NS) += utsname.o |
66 | obj-$(CONFIG_USER_NS) += user_namespace.o | 65 | obj-$(CONFIG_USER_NS) += user_namespace.o |
67 | obj-$(CONFIG_PID_NS) += pid_namespace.o | 66 | obj-$(CONFIG_PID_NS) += pid_namespace.o |
diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 909a35510af5..2731d115d725 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c | |||
@@ -57,6 +57,7 @@ | |||
57 | #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ | 57 | #include <linux/vmalloc.h> /* TODO: replace with more sophisticated array */ |
58 | #include <linux/eventfd.h> | 58 | #include <linux/eventfd.h> |
59 | #include <linux/poll.h> | 59 | #include <linux/poll.h> |
60 | #include <linux/flex_array.h> /* used in cgroup_attach_proc */ | ||
60 | 61 | ||
61 | #include <asm/atomic.h> | 62 | #include <asm/atomic.h> |
62 | 63 | ||
@@ -1735,6 +1736,76 @@ int cgroup_path(const struct cgroup *cgrp, char *buf, int buflen) | |||
1735 | } | 1736 | } |
1736 | EXPORT_SYMBOL_GPL(cgroup_path); | 1737 | EXPORT_SYMBOL_GPL(cgroup_path); |
1737 | 1738 | ||
1739 | /* | ||
1740 | * cgroup_task_migrate - move a task from one cgroup to another. | ||
1741 | * | ||
1742 | * 'guarantee' is set if the caller promises that a new css_set for the task | ||
1743 | * will already exist. If not set, this function might sleep, and can fail with | ||
1744 | * -ENOMEM. Otherwise, it can only fail with -ESRCH. | ||
1745 | */ | ||
1746 | static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp, | ||
1747 | struct task_struct *tsk, bool guarantee) | ||
1748 | { | ||
1749 | struct css_set *oldcg; | ||
1750 | struct css_set *newcg; | ||
1751 | |||
1752 | /* | ||
1753 | * get old css_set. we need to take task_lock and refcount it, because | ||
1754 | * an exiting task can change its css_set to init_css_set and drop its | ||
1755 | * old one without taking cgroup_mutex. | ||
1756 | */ | ||
1757 | task_lock(tsk); | ||
1758 | oldcg = tsk->cgroups; | ||
1759 | get_css_set(oldcg); | ||
1760 | task_unlock(tsk); | ||
1761 | |||
1762 | /* locate or allocate a new css_set for this task. */ | ||
1763 | if (guarantee) { | ||
1764 | /* we know the css_set we want already exists. */ | ||
1765 | struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; | ||
1766 | read_lock(&css_set_lock); | ||
1767 | newcg = find_existing_css_set(oldcg, cgrp, template); | ||
1768 | BUG_ON(!newcg); | ||
1769 | get_css_set(newcg); | ||
1770 | read_unlock(&css_set_lock); | ||
1771 | } else { | ||
1772 | might_sleep(); | ||
1773 | /* find_css_set will give us newcg already referenced. */ | ||
1774 | newcg = find_css_set(oldcg, cgrp); | ||
1775 | if (!newcg) { | ||
1776 | put_css_set(oldcg); | ||
1777 | return -ENOMEM; | ||
1778 | } | ||
1779 | } | ||
1780 | put_css_set(oldcg); | ||
1781 | |||
1782 | /* if PF_EXITING is set, the tsk->cgroups pointer is no longer safe. */ | ||
1783 | task_lock(tsk); | ||
1784 | if (tsk->flags & PF_EXITING) { | ||
1785 | task_unlock(tsk); | ||
1786 | put_css_set(newcg); | ||
1787 | return -ESRCH; | ||
1788 | } | ||
1789 | rcu_assign_pointer(tsk->cgroups, newcg); | ||
1790 | task_unlock(tsk); | ||
1791 | |||
1792 | /* Update the css_set linked lists if we're using them */ | ||
1793 | write_lock(&css_set_lock); | ||
1794 | if (!list_empty(&tsk->cg_list)) | ||
1795 | list_move(&tsk->cg_list, &newcg->tasks); | ||
1796 | write_unlock(&css_set_lock); | ||
1797 | |||
1798 | /* | ||
1799 | * We just gained a reference on oldcg by taking it from the task. As | ||
1800 | * trading it for newcg is protected by cgroup_mutex, we're safe to drop | ||
1801 | * it here; it will be freed under RCU. | ||
1802 | */ | ||
1803 | put_css_set(oldcg); | ||
1804 | |||
1805 | set_bit(CGRP_RELEASABLE, &oldcgrp->flags); | ||
1806 | return 0; | ||
1807 | } | ||
1808 | |||
1738 | /** | 1809 | /** |
1739 | * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp' | 1810 | * cgroup_attach_task - attach task 'tsk' to cgroup 'cgrp' |
1740 | * @cgrp: the cgroup the task is attaching to | 1811 | * @cgrp: the cgroup the task is attaching to |
@@ -1745,11 +1816,9 @@ EXPORT_SYMBOL_GPL(cgroup_path); | |||
1745 | */ | 1816 | */ |
1746 | int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | 1817 | int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |
1747 | { | 1818 | { |
1748 | int retval = 0; | 1819 | int retval; |
1749 | struct cgroup_subsys *ss, *failed_ss = NULL; | 1820 | struct cgroup_subsys *ss, *failed_ss = NULL; |
1750 | struct cgroup *oldcgrp; | 1821 | struct cgroup *oldcgrp; |
1751 | struct css_set *cg; | ||
1752 | struct css_set *newcg; | ||
1753 | struct cgroupfs_root *root = cgrp->root; | 1822 | struct cgroupfs_root *root = cgrp->root; |
1754 | 1823 | ||
1755 | /* Nothing to do if the task is already in that cgroup */ | 1824 | /* Nothing to do if the task is already in that cgroup */ |
@@ -1759,7 +1828,7 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1759 | 1828 | ||
1760 | for_each_subsys(root, ss) { | 1829 | for_each_subsys(root, ss) { |
1761 | if (ss->can_attach) { | 1830 | if (ss->can_attach) { |
1762 | retval = ss->can_attach(ss, cgrp, tsk, false); | 1831 | retval = ss->can_attach(ss, cgrp, tsk); |
1763 | if (retval) { | 1832 | if (retval) { |
1764 | /* | 1833 | /* |
1765 | * Remember on which subsystem the can_attach() | 1834 | * Remember on which subsystem the can_attach() |
@@ -1771,46 +1840,29 @@ int cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
1771 | goto out; | 1840 | goto out; |
1772 | } | 1841 | } |
1773 | } | 1842 | } |
1843 | if (ss->can_attach_task) { | ||
1844 | retval = ss->can_attach_task(cgrp, tsk); | ||
1845 | if (retval) { | ||
1846 | failed_ss = ss; | ||
1847 | goto out; | ||
1848 | } | ||
1849 | } | ||
1774 | } | 1850 | } |
1775 | 1851 | ||
1776 | task_lock(tsk); | 1852 | retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, false); |
1777 | cg = tsk->cgroups; | 1853 | if (retval) |
1778 | get_css_set(cg); | ||
1779 | task_unlock(tsk); | ||
1780 | /* | ||
1781 | * Locate or allocate a new css_set for this task, | ||
1782 | * based on its final set of cgroups | ||
1783 | */ | ||
1784 | newcg = find_css_set(cg, cgrp); | ||
1785 | put_css_set(cg); | ||
1786 | if (!newcg) { | ||
1787 | retval = -ENOMEM; | ||
1788 | goto out; | ||
1789 | } | ||
1790 | |||
1791 | task_lock(tsk); | ||
1792 | if (tsk->flags & PF_EXITING) { | ||
1793 | task_unlock(tsk); | ||
1794 | put_css_set(newcg); | ||
1795 | retval = -ESRCH; | ||
1796 | goto out; | 1854 | goto out; |
1797 | } | ||
1798 | rcu_assign_pointer(tsk->cgroups, newcg); | ||
1799 | task_unlock(tsk); | ||
1800 | |||
1801 | /* Update the css_set linked lists if we're using them */ | ||
1802 | write_lock(&css_set_lock); | ||
1803 | if (!list_empty(&tsk->cg_list)) | ||
1804 | list_move(&tsk->cg_list, &newcg->tasks); | ||
1805 | write_unlock(&css_set_lock); | ||
1806 | 1855 | ||
1807 | for_each_subsys(root, ss) { | 1856 | for_each_subsys(root, ss) { |
1857 | if (ss->pre_attach) | ||
1858 | ss->pre_attach(cgrp); | ||
1859 | if (ss->attach_task) | ||
1860 | ss->attach_task(cgrp, tsk); | ||
1808 | if (ss->attach) | 1861 | if (ss->attach) |
1809 | ss->attach(ss, cgrp, oldcgrp, tsk, false); | 1862 | ss->attach(ss, cgrp, oldcgrp, tsk); |
1810 | } | 1863 | } |
1811 | set_bit(CGRP_RELEASABLE, &oldcgrp->flags); | 1864 | |
1812 | synchronize_rcu(); | 1865 | synchronize_rcu(); |
1813 | put_css_set(cg); | ||
1814 | 1866 | ||
1815 | /* | 1867 | /* |
1816 | * wake up rmdir() waiter. the rmdir should fail since the cgroup | 1868 | * wake up rmdir() waiter. the rmdir should fail since the cgroup |
@@ -1829,7 +1881,7 @@ out: | |||
1829 | */ | 1881 | */ |
1830 | break; | 1882 | break; |
1831 | if (ss->cancel_attach) | 1883 | if (ss->cancel_attach) |
1832 | ss->cancel_attach(ss, cgrp, tsk, false); | 1884 | ss->cancel_attach(ss, cgrp, tsk); |
1833 | } | 1885 | } |
1834 | } | 1886 | } |
1835 | return retval; | 1887 | return retval; |
@@ -1860,49 +1912,370 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk) | |||
1860 | EXPORT_SYMBOL_GPL(cgroup_attach_task_all); | 1912 | EXPORT_SYMBOL_GPL(cgroup_attach_task_all); |
1861 | 1913 | ||
1862 | /* | 1914 | /* |
1863 | * Attach task with pid 'pid' to cgroup 'cgrp'. Call with cgroup_mutex | 1915 | * cgroup_attach_proc works in two stages, the first of which prefetches all |
1864 | * held. May take task_lock of task | 1916 | * new css_sets needed (to make sure we have enough memory before committing |
1917 | * to the move) and stores them in a list of entries of the following type. | ||
1918 | * TODO: possible optimization: use css_set->rcu_head for chaining instead | ||
1919 | */ | ||
1920 | struct cg_list_entry { | ||
1921 | struct css_set *cg; | ||
1922 | struct list_head links; | ||
1923 | }; | ||
1924 | |||
1925 | static bool css_set_check_fetched(struct cgroup *cgrp, | ||
1926 | struct task_struct *tsk, struct css_set *cg, | ||
1927 | struct list_head *newcg_list) | ||
1928 | { | ||
1929 | struct css_set *newcg; | ||
1930 | struct cg_list_entry *cg_entry; | ||
1931 | struct cgroup_subsys_state *template[CGROUP_SUBSYS_COUNT]; | ||
1932 | |||
1933 | read_lock(&css_set_lock); | ||
1934 | newcg = find_existing_css_set(cg, cgrp, template); | ||
1935 | if (newcg) | ||
1936 | get_css_set(newcg); | ||
1937 | read_unlock(&css_set_lock); | ||
1938 | |||
1939 | /* doesn't exist at all? */ | ||
1940 | if (!newcg) | ||
1941 | return false; | ||
1942 | /* see if it's already in the list */ | ||
1943 | list_for_each_entry(cg_entry, newcg_list, links) { | ||
1944 | if (cg_entry->cg == newcg) { | ||
1945 | put_css_set(newcg); | ||
1946 | return true; | ||
1947 | } | ||
1948 | } | ||
1949 | |||
1950 | /* not found */ | ||
1951 | put_css_set(newcg); | ||
1952 | return false; | ||
1953 | } | ||
1954 | |||
1955 | /* | ||
1956 | * Find the new css_set and store it in the list in preparation for moving the | ||
1957 | * given task to the given cgroup. Returns 0 or -ENOMEM. | ||
1958 | */ | ||
1959 | static int css_set_prefetch(struct cgroup *cgrp, struct css_set *cg, | ||
1960 | struct list_head *newcg_list) | ||
1961 | { | ||
1962 | struct css_set *newcg; | ||
1963 | struct cg_list_entry *cg_entry; | ||
1964 | |||
1965 | /* ensure a new css_set will exist for this thread */ | ||
1966 | newcg = find_css_set(cg, cgrp); | ||
1967 | if (!newcg) | ||
1968 | return -ENOMEM; | ||
1969 | /* add it to the list */ | ||
1970 | cg_entry = kmalloc(sizeof(struct cg_list_entry), GFP_KERNEL); | ||
1971 | if (!cg_entry) { | ||
1972 | put_css_set(newcg); | ||
1973 | return -ENOMEM; | ||
1974 | } | ||
1975 | cg_entry->cg = newcg; | ||
1976 | list_add(&cg_entry->links, newcg_list); | ||
1977 | return 0; | ||
1978 | } | ||
1979 | |||
1980 | /** | ||
1981 | * cgroup_attach_proc - attach all threads in a threadgroup to a cgroup | ||
1982 | * @cgrp: the cgroup to attach to | ||
1983 | * @leader: the threadgroup leader task_struct of the group to be attached | ||
1984 | * | ||
1985 | * Call holding cgroup_mutex and the threadgroup_fork_lock of the leader. Will | ||
1986 | * take task_lock of each thread in leader's threadgroup individually in turn. | ||
1987 | */ | ||
1988 | int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) | ||
1989 | { | ||
1990 | int retval, i, group_size; | ||
1991 | struct cgroup_subsys *ss, *failed_ss = NULL; | ||
1992 | bool cancel_failed_ss = false; | ||
1993 | /* guaranteed to be initialized later, but the compiler needs this */ | ||
1994 | struct cgroup *oldcgrp = NULL; | ||
1995 | struct css_set *oldcg; | ||
1996 | struct cgroupfs_root *root = cgrp->root; | ||
1997 | /* threadgroup list cursor and array */ | ||
1998 | struct task_struct *tsk; | ||
1999 | struct flex_array *group; | ||
2000 | /* | ||
2001 | * we need to make sure we have css_sets for all the tasks we're | ||
2002 | * going to move -before- we actually start moving them, so that in | ||
2003 | * case we get an ENOMEM we can bail out before making any changes. | ||
2004 | */ | ||
2005 | struct list_head newcg_list; | ||
2006 | struct cg_list_entry *cg_entry, *temp_nobe; | ||
2007 | |||
2008 | /* | ||
2009 | * step 0: in order to do expensive, possibly blocking operations for | ||
2010 | * every thread, we cannot iterate the thread group list, since it needs | ||
2011 | * rcu or tasklist locked. instead, build an array of all threads in the | ||
2012 | * group - threadgroup_fork_lock prevents new threads from appearing, | ||
2013 | * and if threads exit, this will just be an over-estimate. | ||
2014 | */ | ||
2015 | group_size = get_nr_threads(leader); | ||
2016 | /* flex_array supports very large thread-groups better than kmalloc. */ | ||
2017 | group = flex_array_alloc(sizeof(struct task_struct *), group_size, | ||
2018 | GFP_KERNEL); | ||
2019 | if (!group) | ||
2020 | return -ENOMEM; | ||
2021 | /* pre-allocate to guarantee space while iterating in rcu read-side. */ | ||
2022 | retval = flex_array_prealloc(group, 0, group_size - 1, GFP_KERNEL); | ||
2023 | if (retval) | ||
2024 | goto out_free_group_list; | ||
2025 | |||
2026 | /* prevent changes to the threadgroup list while we take a snapshot. */ | ||
2027 | rcu_read_lock(); | ||
2028 | if (!thread_group_leader(leader)) { | ||
2029 | /* | ||
2030 | * a race with de_thread from another thread's exec() may strip | ||
2031 | * us of our leadership, making while_each_thread unsafe to use | ||
2032 | * on this task. if this happens, there is no choice but to | ||
2033 | * throw this task away and try again (from cgroup_procs_write); | ||
2034 | * this is "double-double-toil-and-trouble-check locking". | ||
2035 | */ | ||
2036 | rcu_read_unlock(); | ||
2037 | retval = -EAGAIN; | ||
2038 | goto out_free_group_list; | ||
2039 | } | ||
2040 | /* take a reference on each task in the group to go in the array. */ | ||
2041 | tsk = leader; | ||
2042 | i = 0; | ||
2043 | do { | ||
2044 | /* as per above, nr_threads may decrease, but not increase. */ | ||
2045 | BUG_ON(i >= group_size); | ||
2046 | get_task_struct(tsk); | ||
2047 | /* | ||
2048 | * saying GFP_ATOMIC has no effect here because we did prealloc | ||
2049 | * earlier, but it's good form to communicate our expectations. | ||
2050 | */ | ||
2051 | retval = flex_array_put_ptr(group, i, tsk, GFP_ATOMIC); | ||
2052 | BUG_ON(retval != 0); | ||
2053 | i++; | ||
2054 | } while_each_thread(leader, tsk); | ||
2055 | /* remember the number of threads in the array for later. */ | ||
2056 | group_size = i; | ||
2057 | rcu_read_unlock(); | ||
2058 | |||
2059 | /* | ||
2060 | * step 1: check that we can legitimately attach to the cgroup. | ||
2061 | */ | ||
2062 | for_each_subsys(root, ss) { | ||
2063 | if (ss->can_attach) { | ||
2064 | retval = ss->can_attach(ss, cgrp, leader); | ||
2065 | if (retval) { | ||
2066 | failed_ss = ss; | ||
2067 | goto out_cancel_attach; | ||
2068 | } | ||
2069 | } | ||
2070 | /* a callback to be run on every thread in the threadgroup. */ | ||
2071 | if (ss->can_attach_task) { | ||
2072 | /* run on each task in the threadgroup. */ | ||
2073 | for (i = 0; i < group_size; i++) { | ||
2074 | tsk = flex_array_get_ptr(group, i); | ||
2075 | retval = ss->can_attach_task(cgrp, tsk); | ||
2076 | if (retval) { | ||
2077 | failed_ss = ss; | ||
2078 | cancel_failed_ss = true; | ||
2079 | goto out_cancel_attach; | ||
2080 | } | ||
2081 | } | ||
2082 | } | ||
2083 | } | ||
2084 | |||
2085 | /* | ||
2086 | * step 2: make sure css_sets exist for all threads to be migrated. | ||
2087 | * we use find_css_set, which allocates a new one if necessary. | ||
2088 | */ | ||
2089 | INIT_LIST_HEAD(&newcg_list); | ||
2090 | for (i = 0; i < group_size; i++) { | ||
2091 | tsk = flex_array_get_ptr(group, i); | ||
2092 | /* nothing to do if this task is already in the cgroup */ | ||
2093 | oldcgrp = task_cgroup_from_root(tsk, root); | ||
2094 | if (cgrp == oldcgrp) | ||
2095 | continue; | ||
2096 | /* get old css_set pointer */ | ||
2097 | task_lock(tsk); | ||
2098 | if (tsk->flags & PF_EXITING) { | ||
2099 | /* ignore this task if it's going away */ | ||
2100 | task_unlock(tsk); | ||
2101 | continue; | ||
2102 | } | ||
2103 | oldcg = tsk->cgroups; | ||
2104 | get_css_set(oldcg); | ||
2105 | task_unlock(tsk); | ||
2106 | /* see if the new one for us is already in the list? */ | ||
2107 | if (css_set_check_fetched(cgrp, tsk, oldcg, &newcg_list)) { | ||
2108 | /* was already there, nothing to do. */ | ||
2109 | put_css_set(oldcg); | ||
2110 | } else { | ||
2111 | /* we don't already have it. get new one. */ | ||
2112 | retval = css_set_prefetch(cgrp, oldcg, &newcg_list); | ||
2113 | put_css_set(oldcg); | ||
2114 | if (retval) | ||
2115 | goto out_list_teardown; | ||
2116 | } | ||
2117 | } | ||
2118 | |||
2119 | /* | ||
2120 | * step 3: now that we're guaranteed success wrt the css_sets, proceed | ||
2121 | * to move all tasks to the new cgroup, calling ss->attach_task for each | ||
2122 | * one along the way. there are no failure cases after here, so this is | ||
2123 | * the commit point. | ||
2124 | */ | ||
2125 | for_each_subsys(root, ss) { | ||
2126 | if (ss->pre_attach) | ||
2127 | ss->pre_attach(cgrp); | ||
2128 | } | ||
2129 | for (i = 0; i < group_size; i++) { | ||
2130 | tsk = flex_array_get_ptr(group, i); | ||
2131 | /* leave current thread as it is if it's already there */ | ||
2132 | oldcgrp = task_cgroup_from_root(tsk, root); | ||
2133 | if (cgrp == oldcgrp) | ||
2134 | continue; | ||
2135 | /* attach each task to each subsystem */ | ||
2136 | for_each_subsys(root, ss) { | ||
2137 | if (ss->attach_task) | ||
2138 | ss->attach_task(cgrp, tsk); | ||
2139 | } | ||
2140 | /* if the thread is PF_EXITING, it can just get skipped. */ | ||
2141 | retval = cgroup_task_migrate(cgrp, oldcgrp, tsk, true); | ||
2142 | BUG_ON(retval != 0 && retval != -ESRCH); | ||
2143 | } | ||
2144 | /* nothing is sensitive to fork() after this point. */ | ||
2145 | |||
2146 | /* | ||
2147 | * step 4: do expensive, non-thread-specific subsystem callbacks. | ||
2148 | * TODO: if ever a subsystem needs to know the oldcgrp for each task | ||
2149 | * being moved, this call will need to be reworked to communicate that. | ||
2150 | */ | ||
2151 | for_each_subsys(root, ss) { | ||
2152 | if (ss->attach) | ||
2153 | ss->attach(ss, cgrp, oldcgrp, leader); | ||
2154 | } | ||
2155 | |||
2156 | /* | ||
2157 | * step 5: success! and cleanup | ||
2158 | */ | ||
2159 | synchronize_rcu(); | ||
2160 | cgroup_wakeup_rmdir_waiter(cgrp); | ||
2161 | retval = 0; | ||
2162 | out_list_teardown: | ||
2163 | /* clean up the list of prefetched css_sets. */ | ||
2164 | list_for_each_entry_safe(cg_entry, temp_nobe, &newcg_list, links) { | ||
2165 | list_del(&cg_entry->links); | ||
2166 | put_css_set(cg_entry->cg); | ||
2167 | kfree(cg_entry); | ||
2168 | } | ||
2169 | out_cancel_attach: | ||
2170 | /* same deal as in cgroup_attach_task */ | ||
2171 | if (retval) { | ||
2172 | for_each_subsys(root, ss) { | ||
2173 | if (ss == failed_ss) { | ||
2174 | if (cancel_failed_ss && ss->cancel_attach) | ||
2175 | ss->cancel_attach(ss, cgrp, leader); | ||
2176 | break; | ||
2177 | } | ||
2178 | if (ss->cancel_attach) | ||
2179 | ss->cancel_attach(ss, cgrp, leader); | ||
2180 | } | ||
2181 | } | ||
2182 | /* clean up the array of referenced threads in the group. */ | ||
2183 | for (i = 0; i < group_size; i++) { | ||
2184 | tsk = flex_array_get_ptr(group, i); | ||
2185 | put_task_struct(tsk); | ||
2186 | } | ||
2187 | out_free_group_list: | ||
2188 | flex_array_free(group); | ||
2189 | return retval; | ||
2190 | } | ||
2191 | |||
2192 | /* | ||
2193 | * Find the task_struct of the task to attach by vpid and pass it along to the | ||
2194 | * function to attach either it or all tasks in its threadgroup. Will take | ||
2195 | * cgroup_mutex; may take task_lock of task. | ||
1865 | */ | 2196 | */ |
1866 | static int attach_task_by_pid(struct cgroup *cgrp, u64 pid) | 2197 | static int attach_task_by_pid(struct cgroup *cgrp, u64 pid, bool threadgroup) |
1867 | { | 2198 | { |
1868 | struct task_struct *tsk; | 2199 | struct task_struct *tsk; |
1869 | const struct cred *cred = current_cred(), *tcred; | 2200 | const struct cred *cred = current_cred(), *tcred; |
1870 | int ret; | 2201 | int ret; |
1871 | 2202 | ||
2203 | if (!cgroup_lock_live_group(cgrp)) | ||
2204 | return -ENODEV; | ||
2205 | |||
1872 | if (pid) { | 2206 | if (pid) { |
1873 | rcu_read_lock(); | 2207 | rcu_read_lock(); |
1874 | tsk = find_task_by_vpid(pid); | 2208 | tsk = find_task_by_vpid(pid); |
1875 | if (!tsk || tsk->flags & PF_EXITING) { | 2209 | if (!tsk) { |
1876 | rcu_read_unlock(); | 2210 | rcu_read_unlock(); |
2211 | cgroup_unlock(); | ||
2212 | return -ESRCH; | ||
2213 | } | ||
2214 | if (threadgroup) { | ||
2215 | /* | ||
2216 | * RCU protects this access, since tsk was found in the | ||
2217 | * tid map. a race with de_thread may cause group_leader | ||
2218 | * to stop being the leader, but cgroup_attach_proc will | ||
2219 | * detect it later. | ||
2220 | */ | ||
2221 | tsk = tsk->group_leader; | ||
2222 | } else if (tsk->flags & PF_EXITING) { | ||
2223 | /* optimization for the single-task-only case */ | ||
2224 | rcu_read_unlock(); | ||
2225 | cgroup_unlock(); | ||
1877 | return -ESRCH; | 2226 | return -ESRCH; |
1878 | } | 2227 | } |
1879 | 2228 | ||
2229 | /* | ||
2230 | * even if we're attaching all tasks in the thread group, we | ||
2231 | * only need to check permissions on one of them. | ||
2232 | */ | ||
1880 | tcred = __task_cred(tsk); | 2233 | tcred = __task_cred(tsk); |
1881 | if (cred->euid && | 2234 | if (cred->euid && |
1882 | cred->euid != tcred->uid && | 2235 | cred->euid != tcred->uid && |
1883 | cred->euid != tcred->suid) { | 2236 | cred->euid != tcred->suid) { |
1884 | rcu_read_unlock(); | 2237 | rcu_read_unlock(); |
2238 | cgroup_unlock(); | ||
1885 | return -EACCES; | 2239 | return -EACCES; |
1886 | } | 2240 | } |
1887 | get_task_struct(tsk); | 2241 | get_task_struct(tsk); |
1888 | rcu_read_unlock(); | 2242 | rcu_read_unlock(); |
1889 | } else { | 2243 | } else { |
1890 | tsk = current; | 2244 | if (threadgroup) |
2245 | tsk = current->group_leader; | ||
2246 | else | ||
2247 | tsk = current; | ||
1891 | get_task_struct(tsk); | 2248 | get_task_struct(tsk); |
1892 | } | 2249 | } |
1893 | 2250 | ||
1894 | ret = cgroup_attach_task(cgrp, tsk); | 2251 | if (threadgroup) { |
2252 | threadgroup_fork_write_lock(tsk); | ||
2253 | ret = cgroup_attach_proc(cgrp, tsk); | ||
2254 | threadgroup_fork_write_unlock(tsk); | ||
2255 | } else { | ||
2256 | ret = cgroup_attach_task(cgrp, tsk); | ||
2257 | } | ||
1895 | put_task_struct(tsk); | 2258 | put_task_struct(tsk); |
2259 | cgroup_unlock(); | ||
1896 | return ret; | 2260 | return ret; |
1897 | } | 2261 | } |
1898 | 2262 | ||
1899 | static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid) | 2263 | static int cgroup_tasks_write(struct cgroup *cgrp, struct cftype *cft, u64 pid) |
1900 | { | 2264 | { |
2265 | return attach_task_by_pid(cgrp, pid, false); | ||
2266 | } | ||
2267 | |||
2268 | static int cgroup_procs_write(struct cgroup *cgrp, struct cftype *cft, u64 tgid) | ||
2269 | { | ||
1901 | int ret; | 2270 | int ret; |
1902 | if (!cgroup_lock_live_group(cgrp)) | 2271 | do { |
1903 | return -ENODEV; | 2272 | /* |
1904 | ret = attach_task_by_pid(cgrp, pid); | 2273 | * attach_proc fails with -EAGAIN if threadgroup leadership |
1905 | cgroup_unlock(); | 2274 | * changes in the middle of the operation, in which case we need |
2275 | * to find the task_struct for the new leader and start over. | ||
2276 | */ | ||
2277 | ret = attach_task_by_pid(cgrp, tgid, true); | ||
2278 | } while (ret == -EAGAIN); | ||
1906 | return ret; | 2279 | return ret; |
1907 | } | 2280 | } |
1908 | 2281 | ||
@@ -3259,9 +3632,9 @@ static struct cftype files[] = { | |||
3259 | { | 3632 | { |
3260 | .name = CGROUP_FILE_GENERIC_PREFIX "procs", | 3633 | .name = CGROUP_FILE_GENERIC_PREFIX "procs", |
3261 | .open = cgroup_procs_open, | 3634 | .open = cgroup_procs_open, |
3262 | /* .write_u64 = cgroup_procs_write, TODO */ | 3635 | .write_u64 = cgroup_procs_write, |
3263 | .release = cgroup_pidlist_release, | 3636 | .release = cgroup_pidlist_release, |
3264 | .mode = S_IRUGO, | 3637 | .mode = S_IRUGO | S_IWUSR, |
3265 | }, | 3638 | }, |
3266 | { | 3639 | { |
3267 | .name = "notify_on_release", | 3640 | .name = "notify_on_release", |
@@ -4257,122 +4630,6 @@ void cgroup_exit(struct task_struct *tsk, int run_callbacks) | |||
4257 | } | 4630 | } |
4258 | 4631 | ||
4259 | /** | 4632 | /** |
4260 | * cgroup_clone - clone the cgroup the given subsystem is attached to | ||
4261 | * @tsk: the task to be moved | ||
4262 | * @subsys: the given subsystem | ||
4263 | * @nodename: the name for the new cgroup | ||
4264 | * | ||
4265 | * Duplicate the current cgroup in the hierarchy that the given | ||
4266 | * subsystem is attached to, and move this task into the new | ||
4267 | * child. | ||
4268 | */ | ||
4269 | int cgroup_clone(struct task_struct *tsk, struct cgroup_subsys *subsys, | ||
4270 | char *nodename) | ||
4271 | { | ||
4272 | struct dentry *dentry; | ||
4273 | int ret = 0; | ||
4274 | struct cgroup *parent, *child; | ||
4275 | struct inode *inode; | ||
4276 | struct css_set *cg; | ||
4277 | struct cgroupfs_root *root; | ||
4278 | struct cgroup_subsys *ss; | ||
4279 | |||
4280 | /* We shouldn't be called by an unregistered subsystem */ | ||
4281 | BUG_ON(!subsys->active); | ||
4282 | |||
4283 | /* First figure out what hierarchy and cgroup we're dealing | ||
4284 | * with, and pin them so we can drop cgroup_mutex */ | ||
4285 | mutex_lock(&cgroup_mutex); | ||
4286 | again: | ||
4287 | root = subsys->root; | ||
4288 | if (root == &rootnode) { | ||
4289 | mutex_unlock(&cgroup_mutex); | ||
4290 | return 0; | ||
4291 | } | ||
4292 | |||
4293 | /* Pin the hierarchy */ | ||
4294 | if (!atomic_inc_not_zero(&root->sb->s_active)) { | ||
4295 | /* We race with the final deactivate_super() */ | ||
4296 | mutex_unlock(&cgroup_mutex); | ||
4297 | return 0; | ||
4298 | } | ||
4299 | |||
4300 | /* Keep the cgroup alive */ | ||
4301 | task_lock(tsk); | ||
4302 | parent = task_cgroup(tsk, subsys->subsys_id); | ||
4303 | cg = tsk->cgroups; | ||
4304 | get_css_set(cg); | ||
4305 | task_unlock(tsk); | ||
4306 | |||
4307 | mutex_unlock(&cgroup_mutex); | ||
4308 | |||
4309 | /* Now do the VFS work to create a cgroup */ | ||
4310 | inode = parent->dentry->d_inode; | ||
4311 | |||
4312 | /* Hold the parent directory mutex across this operation to | ||
4313 | * stop anyone else deleting the new cgroup */ | ||
4314 | mutex_lock(&inode->i_mutex); | ||
4315 | dentry = lookup_one_len(nodename, parent->dentry, strlen(nodename)); | ||
4316 | if (IS_ERR(dentry)) { | ||
4317 | printk(KERN_INFO | ||
4318 | "cgroup: Couldn't allocate dentry for %s: %ld\n", nodename, | ||
4319 | PTR_ERR(dentry)); | ||
4320 | ret = PTR_ERR(dentry); | ||
4321 | goto out_release; | ||
4322 | } | ||
4323 | |||
4324 | /* Create the cgroup directory, which also creates the cgroup */ | ||
4325 | ret = vfs_mkdir(inode, dentry, 0755); | ||
4326 | child = __d_cgrp(dentry); | ||
4327 | dput(dentry); | ||
4328 | if (ret) { | ||
4329 | printk(KERN_INFO | ||
4330 | "Failed to create cgroup %s: %d\n", nodename, | ||
4331 | ret); | ||
4332 | goto out_release; | ||
4333 | } | ||
4334 | |||
4335 | /* The cgroup now exists. Retake cgroup_mutex and check | ||
4336 | * that we're still in the same state that we thought we | ||
4337 | * were. */ | ||
4338 | mutex_lock(&cgroup_mutex); | ||
4339 | if ((root != subsys->root) || | ||
4340 | (parent != task_cgroup(tsk, subsys->subsys_id))) { | ||
4341 | /* Aargh, we raced ... */ | ||
4342 | mutex_unlock(&inode->i_mutex); | ||
4343 | put_css_set(cg); | ||
4344 | |||
4345 | deactivate_super(root->sb); | ||
4346 | /* The cgroup is still accessible in the VFS, but | ||
4347 | * we're not going to try to rmdir() it at this | ||
4348 | * point. */ | ||
4349 | printk(KERN_INFO | ||
4350 | "Race in cgroup_clone() - leaking cgroup %s\n", | ||
4351 | nodename); | ||
4352 | goto again; | ||
4353 | } | ||
4354 | |||
4355 | /* do any required auto-setup */ | ||
4356 | for_each_subsys(root, ss) { | ||
4357 | if (ss->post_clone) | ||
4358 | ss->post_clone(ss, child); | ||
4359 | } | ||
4360 | |||
4361 | /* All seems fine. Finish by moving the task into the new cgroup */ | ||
4362 | ret = cgroup_attach_task(child, tsk); | ||
4363 | mutex_unlock(&cgroup_mutex); | ||
4364 | |||
4365 | out_release: | ||
4366 | mutex_unlock(&inode->i_mutex); | ||
4367 | |||
4368 | mutex_lock(&cgroup_mutex); | ||
4369 | put_css_set(cg); | ||
4370 | mutex_unlock(&cgroup_mutex); | ||
4371 | deactivate_super(root->sb); | ||
4372 | return ret; | ||
4373 | } | ||
4374 | |||
4375 | /** | ||
4376 | * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp | 4633 | * cgroup_is_descendant - see if @cgrp is a descendant of @task's cgrp |
4377 | * @cgrp: the cgroup in question | 4634 | * @cgrp: the cgroup in question |
4378 | * @task: the task in question | 4635 | * @task: the task in question |
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c index e7bebb7c6c38..e691818d7e45 100644 --- a/kernel/cgroup_freezer.c +++ b/kernel/cgroup_freezer.c | |||
@@ -160,7 +160,7 @@ static void freezer_destroy(struct cgroup_subsys *ss, | |||
160 | */ | 160 | */ |
161 | static int freezer_can_attach(struct cgroup_subsys *ss, | 161 | static int freezer_can_attach(struct cgroup_subsys *ss, |
162 | struct cgroup *new_cgroup, | 162 | struct cgroup *new_cgroup, |
163 | struct task_struct *task, bool threadgroup) | 163 | struct task_struct *task) |
164 | { | 164 | { |
165 | struct freezer *freezer; | 165 | struct freezer *freezer; |
166 | 166 | ||
@@ -172,26 +172,17 @@ static int freezer_can_attach(struct cgroup_subsys *ss, | |||
172 | if (freezer->state != CGROUP_THAWED) | 172 | if (freezer->state != CGROUP_THAWED) |
173 | return -EBUSY; | 173 | return -EBUSY; |
174 | 174 | ||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | static int freezer_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | ||
179 | { | ||
175 | rcu_read_lock(); | 180 | rcu_read_lock(); |
176 | if (__cgroup_freezing_or_frozen(task)) { | 181 | if (__cgroup_freezing_or_frozen(tsk)) { |
177 | rcu_read_unlock(); | 182 | rcu_read_unlock(); |
178 | return -EBUSY; | 183 | return -EBUSY; |
179 | } | 184 | } |
180 | rcu_read_unlock(); | 185 | rcu_read_unlock(); |
181 | |||
182 | if (threadgroup) { | ||
183 | struct task_struct *c; | ||
184 | |||
185 | rcu_read_lock(); | ||
186 | list_for_each_entry_rcu(c, &task->thread_group, thread_group) { | ||
187 | if (__cgroup_freezing_or_frozen(c)) { | ||
188 | rcu_read_unlock(); | ||
189 | return -EBUSY; | ||
190 | } | ||
191 | } | ||
192 | rcu_read_unlock(); | ||
193 | } | ||
194 | |||
195 | return 0; | 186 | return 0; |
196 | } | 187 | } |
197 | 188 | ||
@@ -390,6 +381,9 @@ struct cgroup_subsys freezer_subsys = { | |||
390 | .populate = freezer_populate, | 381 | .populate = freezer_populate, |
391 | .subsys_id = freezer_subsys_id, | 382 | .subsys_id = freezer_subsys_id, |
392 | .can_attach = freezer_can_attach, | 383 | .can_attach = freezer_can_attach, |
384 | .can_attach_task = freezer_can_attach_task, | ||
385 | .pre_attach = NULL, | ||
386 | .attach_task = NULL, | ||
393 | .attach = NULL, | 387 | .attach = NULL, |
394 | .fork = freezer_fork, | 388 | .fork = freezer_fork, |
395 | .exit = NULL, | 389 | .exit = NULL, |
diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 2bb8c2e98fff..1ceeb049c827 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c | |||
@@ -1367,14 +1367,10 @@ static int fmeter_getrate(struct fmeter *fmp) | |||
1367 | return val; | 1367 | return val; |
1368 | } | 1368 | } |
1369 | 1369 | ||
1370 | /* Protected by cgroup_lock */ | ||
1371 | static cpumask_var_t cpus_attach; | ||
1372 | |||
1373 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ | 1370 | /* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */ |
1374 | static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, | 1371 | static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, |
1375 | struct task_struct *tsk, bool threadgroup) | 1372 | struct task_struct *tsk) |
1376 | { | 1373 | { |
1377 | int ret; | ||
1378 | struct cpuset *cs = cgroup_cs(cont); | 1374 | struct cpuset *cs = cgroup_cs(cont); |
1379 | 1375 | ||
1380 | if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) | 1376 | if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed)) |
@@ -1391,29 +1387,42 @@ static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cont, | |||
1391 | if (tsk->flags & PF_THREAD_BOUND) | 1387 | if (tsk->flags & PF_THREAD_BOUND) |
1392 | return -EINVAL; | 1388 | return -EINVAL; |
1393 | 1389 | ||
1394 | ret = security_task_setscheduler(tsk); | ||
1395 | if (ret) | ||
1396 | return ret; | ||
1397 | if (threadgroup) { | ||
1398 | struct task_struct *c; | ||
1399 | |||
1400 | rcu_read_lock(); | ||
1401 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
1402 | ret = security_task_setscheduler(c); | ||
1403 | if (ret) { | ||
1404 | rcu_read_unlock(); | ||
1405 | return ret; | ||
1406 | } | ||
1407 | } | ||
1408 | rcu_read_unlock(); | ||
1409 | } | ||
1410 | return 0; | 1390 | return 0; |
1411 | } | 1391 | } |
1412 | 1392 | ||
1413 | static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to, | 1393 | static int cpuset_can_attach_task(struct cgroup *cgrp, struct task_struct *task) |
1414 | struct cpuset *cs) | 1394 | { |
1395 | return security_task_setscheduler(task); | ||
1396 | } | ||
1397 | |||
1398 | /* | ||
1399 | * Protected by cgroup_lock. The nodemasks must be stored globally because | ||
1400 | * dynamically allocating them is not allowed in pre_attach, and they must | ||
1401 | * persist among pre_attach, attach_task, and attach. | ||
1402 | */ | ||
1403 | static cpumask_var_t cpus_attach; | ||
1404 | static nodemask_t cpuset_attach_nodemask_from; | ||
1405 | static nodemask_t cpuset_attach_nodemask_to; | ||
1406 | |||
1407 | /* Set-up work for before attaching each task. */ | ||
1408 | static void cpuset_pre_attach(struct cgroup *cont) | ||
1409 | { | ||
1410 | struct cpuset *cs = cgroup_cs(cont); | ||
1411 | |||
1412 | if (cs == &top_cpuset) | ||
1413 | cpumask_copy(cpus_attach, cpu_possible_mask); | ||
1414 | else | ||
1415 | guarantee_online_cpus(cs, cpus_attach); | ||
1416 | |||
1417 | guarantee_online_mems(cs, &cpuset_attach_nodemask_to); | ||
1418 | } | ||
1419 | |||
1420 | /* Per-thread attachment work. */ | ||
1421 | static void cpuset_attach_task(struct cgroup *cont, struct task_struct *tsk) | ||
1415 | { | 1422 | { |
1416 | int err; | 1423 | int err; |
1424 | struct cpuset *cs = cgroup_cs(cont); | ||
1425 | |||
1417 | /* | 1426 | /* |
1418 | * can_attach beforehand should guarantee that this doesn't fail. | 1427 | * can_attach beforehand should guarantee that this doesn't fail. |
1419 | * TODO: have a better way to handle failure here | 1428 | * TODO: have a better way to handle failure here |
@@ -1421,45 +1430,29 @@ static void cpuset_attach_task(struct task_struct *tsk, nodemask_t *to, | |||
1421 | err = set_cpus_allowed_ptr(tsk, cpus_attach); | 1430 | err = set_cpus_allowed_ptr(tsk, cpus_attach); |
1422 | WARN_ON_ONCE(err); | 1431 | WARN_ON_ONCE(err); |
1423 | 1432 | ||
1424 | cpuset_change_task_nodemask(tsk, to); | 1433 | cpuset_change_task_nodemask(tsk, &cpuset_attach_nodemask_to); |
1425 | cpuset_update_task_spread_flag(cs, tsk); | 1434 | cpuset_update_task_spread_flag(cs, tsk); |
1426 | |||
1427 | } | 1435 | } |
1428 | 1436 | ||
1429 | static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, | 1437 | static void cpuset_attach(struct cgroup_subsys *ss, struct cgroup *cont, |
1430 | struct cgroup *oldcont, struct task_struct *tsk, | 1438 | struct cgroup *oldcont, struct task_struct *tsk) |
1431 | bool threadgroup) | ||
1432 | { | 1439 | { |
1433 | struct mm_struct *mm; | 1440 | struct mm_struct *mm; |
1434 | struct cpuset *cs = cgroup_cs(cont); | 1441 | struct cpuset *cs = cgroup_cs(cont); |
1435 | struct cpuset *oldcs = cgroup_cs(oldcont); | 1442 | struct cpuset *oldcs = cgroup_cs(oldcont); |
1436 | static nodemask_t to; /* protected by cgroup_mutex */ | ||
1437 | 1443 | ||
1438 | if (cs == &top_cpuset) { | 1444 | /* |
1439 | cpumask_copy(cpus_attach, cpu_possible_mask); | 1445 | * Change mm, possibly for multiple threads in a threadgroup. This is |
1440 | } else { | 1446 | * expensive and may sleep. |
1441 | guarantee_online_cpus(cs, cpus_attach); | 1447 | */ |
1442 | } | 1448 | cpuset_attach_nodemask_from = oldcs->mems_allowed; |
1443 | guarantee_online_mems(cs, &to); | 1449 | cpuset_attach_nodemask_to = cs->mems_allowed; |
1444 | |||
1445 | /* do per-task migration stuff possibly for each in the threadgroup */ | ||
1446 | cpuset_attach_task(tsk, &to, cs); | ||
1447 | if (threadgroup) { | ||
1448 | struct task_struct *c; | ||
1449 | rcu_read_lock(); | ||
1450 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
1451 | cpuset_attach_task(c, &to, cs); | ||
1452 | } | ||
1453 | rcu_read_unlock(); | ||
1454 | } | ||
1455 | |||
1456 | /* change mm; only needs to be done once even if threadgroup */ | ||
1457 | to = cs->mems_allowed; | ||
1458 | mm = get_task_mm(tsk); | 1450 | mm = get_task_mm(tsk); |
1459 | if (mm) { | 1451 | if (mm) { |
1460 | mpol_rebind_mm(mm, &to); | 1452 | mpol_rebind_mm(mm, &cpuset_attach_nodemask_to); |
1461 | if (is_memory_migrate(cs)) | 1453 | if (is_memory_migrate(cs)) |
1462 | cpuset_migrate_mm(mm, &oldcs->mems_allowed, &to); | 1454 | cpuset_migrate_mm(mm, &cpuset_attach_nodemask_from, |
1455 | &cpuset_attach_nodemask_to); | ||
1463 | mmput(mm); | 1456 | mmput(mm); |
1464 | } | 1457 | } |
1465 | } | 1458 | } |
@@ -1809,10 +1802,9 @@ static int cpuset_populate(struct cgroup_subsys *ss, struct cgroup *cont) | |||
1809 | } | 1802 | } |
1810 | 1803 | ||
1811 | /* | 1804 | /* |
1812 | * post_clone() is called at the end of cgroup_clone(). | 1805 | * post_clone() is called during cgroup_create() when the |
1813 | * 'cgroup' was just created automatically as a result of | 1806 | * clone_children mount argument was specified. The cgroup |
1814 | * a cgroup_clone(), and the current task is about to | 1807 | * can not yet have any tasks. |
1815 | * be moved into 'cgroup'. | ||
1816 | * | 1808 | * |
1817 | * Currently we refuse to set up the cgroup - thereby | 1809 | * Currently we refuse to set up the cgroup - thereby |
1818 | * refusing the task to be entered, and as a result refusing | 1810 | * refusing the task to be entered, and as a result refusing |
@@ -1911,6 +1903,9 @@ struct cgroup_subsys cpuset_subsys = { | |||
1911 | .create = cpuset_create, | 1903 | .create = cpuset_create, |
1912 | .destroy = cpuset_destroy, | 1904 | .destroy = cpuset_destroy, |
1913 | .can_attach = cpuset_can_attach, | 1905 | .can_attach = cpuset_can_attach, |
1906 | .can_attach_task = cpuset_can_attach_task, | ||
1907 | .pre_attach = cpuset_pre_attach, | ||
1908 | .attach_task = cpuset_attach_task, | ||
1914 | .attach = cpuset_attach, | 1909 | .attach = cpuset_attach, |
1915 | .populate = cpuset_populate, | 1910 | .populate = cpuset_populate, |
1916 | .post_clone = cpuset_post_clone, | 1911 | .post_clone = cpuset_post_clone, |
diff --git a/kernel/cred.c b/kernel/cred.c index e12c8af793f8..174fa84eca30 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Task credentials management - see Documentation/credentials.txt | 1 | /* Task credentials management - see Documentation/security/credentials.txt |
2 | * | 2 | * |
3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. | 3 | * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved. |
4 | * Written by David Howells (dhowells@redhat.com) | 4 | * Written by David Howells (dhowells@redhat.com) |
diff --git a/kernel/fork.c b/kernel/fork.c index 8e7e135d0817..ca406d916713 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -59,7 +59,6 @@ | |||
59 | #include <linux/taskstats_kern.h> | 59 | #include <linux/taskstats_kern.h> |
60 | #include <linux/random.h> | 60 | #include <linux/random.h> |
61 | #include <linux/tty.h> | 61 | #include <linux/tty.h> |
62 | #include <linux/proc_fs.h> | ||
63 | #include <linux/blkdev.h> | 62 | #include <linux/blkdev.h> |
64 | #include <linux/fs_struct.h> | 63 | #include <linux/fs_struct.h> |
65 | #include <linux/magic.h> | 64 | #include <linux/magic.h> |
@@ -597,6 +596,57 @@ void mmput(struct mm_struct *mm) | |||
597 | } | 596 | } |
598 | EXPORT_SYMBOL_GPL(mmput); | 597 | EXPORT_SYMBOL_GPL(mmput); |
599 | 598 | ||
599 | /* | ||
600 | * We added or removed a vma mapping the executable. The vmas are only mapped | ||
601 | * during exec and are not mapped with the mmap system call. | ||
602 | * Callers must hold down_write() on the mm's mmap_sem for these | ||
603 | */ | ||
604 | void added_exe_file_vma(struct mm_struct *mm) | ||
605 | { | ||
606 | mm->num_exe_file_vmas++; | ||
607 | } | ||
608 | |||
609 | void removed_exe_file_vma(struct mm_struct *mm) | ||
610 | { | ||
611 | mm->num_exe_file_vmas--; | ||
612 | if ((mm->num_exe_file_vmas == 0) && mm->exe_file){ | ||
613 | fput(mm->exe_file); | ||
614 | mm->exe_file = NULL; | ||
615 | } | ||
616 | |||
617 | } | ||
618 | |||
619 | void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file) | ||
620 | { | ||
621 | if (new_exe_file) | ||
622 | get_file(new_exe_file); | ||
623 | if (mm->exe_file) | ||
624 | fput(mm->exe_file); | ||
625 | mm->exe_file = new_exe_file; | ||
626 | mm->num_exe_file_vmas = 0; | ||
627 | } | ||
628 | |||
629 | struct file *get_mm_exe_file(struct mm_struct *mm) | ||
630 | { | ||
631 | struct file *exe_file; | ||
632 | |||
633 | /* We need mmap_sem to protect against races with removal of | ||
634 | * VM_EXECUTABLE vmas */ | ||
635 | down_read(&mm->mmap_sem); | ||
636 | exe_file = mm->exe_file; | ||
637 | if (exe_file) | ||
638 | get_file(exe_file); | ||
639 | up_read(&mm->mmap_sem); | ||
640 | return exe_file; | ||
641 | } | ||
642 | |||
643 | static void dup_mm_exe_file(struct mm_struct *oldmm, struct mm_struct *newmm) | ||
644 | { | ||
645 | /* It's safe to write the exe_file pointer without exe_file_lock because | ||
646 | * this is called during fork when the task is not yet in /proc */ | ||
647 | newmm->exe_file = get_mm_exe_file(oldmm); | ||
648 | } | ||
649 | |||
600 | /** | 650 | /** |
601 | * get_task_mm - acquire a reference to the task's mm | 651 | * get_task_mm - acquire a reference to the task's mm |
602 | * | 652 | * |
@@ -957,6 +1007,10 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk) | |||
957 | tty_audit_fork(sig); | 1007 | tty_audit_fork(sig); |
958 | sched_autogroup_fork(sig); | 1008 | sched_autogroup_fork(sig); |
959 | 1009 | ||
1010 | #ifdef CONFIG_CGROUPS | ||
1011 | init_rwsem(&sig->threadgroup_fork_lock); | ||
1012 | #endif | ||
1013 | |||
960 | sig->oom_adj = current->signal->oom_adj; | 1014 | sig->oom_adj = current->signal->oom_adj; |
961 | sig->oom_score_adj = current->signal->oom_score_adj; | 1015 | sig->oom_score_adj = current->signal->oom_score_adj; |
962 | sig->oom_score_adj_min = current->signal->oom_score_adj_min; | 1016 | sig->oom_score_adj_min = current->signal->oom_score_adj_min; |
@@ -1138,6 +1192,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1138 | monotonic_to_bootbased(&p->real_start_time); | 1192 | monotonic_to_bootbased(&p->real_start_time); |
1139 | p->io_context = NULL; | 1193 | p->io_context = NULL; |
1140 | p->audit_context = NULL; | 1194 | p->audit_context = NULL; |
1195 | if (clone_flags & CLONE_THREAD) | ||
1196 | threadgroup_fork_read_lock(current); | ||
1141 | cgroup_fork(p); | 1197 | cgroup_fork(p); |
1142 | #ifdef CONFIG_NUMA | 1198 | #ifdef CONFIG_NUMA |
1143 | p->mempolicy = mpol_dup(p->mempolicy); | 1199 | p->mempolicy = mpol_dup(p->mempolicy); |
@@ -1223,12 +1279,6 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1223 | if (clone_flags & CLONE_THREAD) | 1279 | if (clone_flags & CLONE_THREAD) |
1224 | p->tgid = current->tgid; | 1280 | p->tgid = current->tgid; |
1225 | 1281 | ||
1226 | if (current->nsproxy != p->nsproxy) { | ||
1227 | retval = ns_cgroup_clone(p, pid); | ||
1228 | if (retval) | ||
1229 | goto bad_fork_free_pid; | ||
1230 | } | ||
1231 | |||
1232 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; | 1282 | p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; |
1233 | /* | 1283 | /* |
1234 | * Clear TID on mm_release()? | 1284 | * Clear TID on mm_release()? |
@@ -1342,6 +1392,8 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1342 | write_unlock_irq(&tasklist_lock); | 1392 | write_unlock_irq(&tasklist_lock); |
1343 | proc_fork_connector(p); | 1393 | proc_fork_connector(p); |
1344 | cgroup_post_fork(p); | 1394 | cgroup_post_fork(p); |
1395 | if (clone_flags & CLONE_THREAD) | ||
1396 | threadgroup_fork_read_unlock(current); | ||
1345 | perf_event_fork(p); | 1397 | perf_event_fork(p); |
1346 | return p; | 1398 | return p; |
1347 | 1399 | ||
@@ -1380,6 +1432,8 @@ bad_fork_cleanup_policy: | |||
1380 | mpol_put(p->mempolicy); | 1432 | mpol_put(p->mempolicy); |
1381 | bad_fork_cleanup_cgroup: | 1433 | bad_fork_cleanup_cgroup: |
1382 | #endif | 1434 | #endif |
1435 | if (clone_flags & CLONE_THREAD) | ||
1436 | threadgroup_fork_read_unlock(current); | ||
1383 | cgroup_exit(p, cgroup_callbacks_done); | 1437 | cgroup_exit(p, cgroup_callbacks_done); |
1384 | delayacct_tsk_free(p); | 1438 | delayacct_tsk_free(p); |
1385 | module_put(task_thread_info(p)->exec_domain->module); | 1439 | module_put(task_thread_info(p)->exec_domain->module); |
diff --git a/kernel/irq/proc.c b/kernel/irq/proc.c index 64e3df6ab1ef..4bd4faa6323a 100644 --- a/kernel/irq/proc.c +++ b/kernel/irq/proc.c | |||
@@ -352,6 +352,7 @@ void unregister_irq_proc(unsigned int irq, struct irq_desc *desc) | |||
352 | #ifdef CONFIG_SMP | 352 | #ifdef CONFIG_SMP |
353 | remove_proc_entry("smp_affinity", desc->dir); | 353 | remove_proc_entry("smp_affinity", desc->dir); |
354 | remove_proc_entry("affinity_hint", desc->dir); | 354 | remove_proc_entry("affinity_hint", desc->dir); |
355 | remove_proc_entry("smp_affinity_list", desc->dir); | ||
355 | remove_proc_entry("node", desc->dir); | 356 | remove_proc_entry("node", desc->dir); |
356 | #endif | 357 | #endif |
357 | remove_proc_entry("spurious", desc->dir); | 358 | remove_proc_entry("spurious", desc->dir); |
diff --git a/kernel/ns_cgroup.c b/kernel/ns_cgroup.c deleted file mode 100644 index 2c98ad94ba0e..000000000000 --- a/kernel/ns_cgroup.c +++ /dev/null | |||
@@ -1,118 +0,0 @@ | |||
1 | /* | ||
2 | * ns_cgroup.c - namespace cgroup subsystem | ||
3 | * | ||
4 | * Copyright 2006, 2007 IBM Corp | ||
5 | */ | ||
6 | |||
7 | #include <linux/module.h> | ||
8 | #include <linux/cgroup.h> | ||
9 | #include <linux/fs.h> | ||
10 | #include <linux/proc_fs.h> | ||
11 | #include <linux/slab.h> | ||
12 | #include <linux/nsproxy.h> | ||
13 | |||
14 | struct ns_cgroup { | ||
15 | struct cgroup_subsys_state css; | ||
16 | }; | ||
17 | |||
18 | struct cgroup_subsys ns_subsys; | ||
19 | |||
20 | static inline struct ns_cgroup *cgroup_to_ns( | ||
21 | struct cgroup *cgroup) | ||
22 | { | ||
23 | return container_of(cgroup_subsys_state(cgroup, ns_subsys_id), | ||
24 | struct ns_cgroup, css); | ||
25 | } | ||
26 | |||
27 | int ns_cgroup_clone(struct task_struct *task, struct pid *pid) | ||
28 | { | ||
29 | char name[PROC_NUMBUF]; | ||
30 | |||
31 | snprintf(name, PROC_NUMBUF, "%d", pid_vnr(pid)); | ||
32 | return cgroup_clone(task, &ns_subsys, name); | ||
33 | } | ||
34 | |||
35 | /* | ||
36 | * Rules: | ||
37 | * 1. you can only enter a cgroup which is a descendant of your current | ||
38 | * cgroup | ||
39 | * 2. you can only place another process into a cgroup if | ||
40 | * a. you have CAP_SYS_ADMIN | ||
41 | * b. your cgroup is an ancestor of task's destination cgroup | ||
42 | * (hence either you are in the same cgroup as task, or in an | ||
43 | * ancestor cgroup thereof) | ||
44 | */ | ||
45 | static int ns_can_attach(struct cgroup_subsys *ss, struct cgroup *new_cgroup, | ||
46 | struct task_struct *task, bool threadgroup) | ||
47 | { | ||
48 | if (current != task) { | ||
49 | if (!capable(CAP_SYS_ADMIN)) | ||
50 | return -EPERM; | ||
51 | |||
52 | if (!cgroup_is_descendant(new_cgroup, current)) | ||
53 | return -EPERM; | ||
54 | } | ||
55 | |||
56 | if (!cgroup_is_descendant(new_cgroup, task)) | ||
57 | return -EPERM; | ||
58 | |||
59 | if (threadgroup) { | ||
60 | struct task_struct *c; | ||
61 | rcu_read_lock(); | ||
62 | list_for_each_entry_rcu(c, &task->thread_group, thread_group) { | ||
63 | if (!cgroup_is_descendant(new_cgroup, c)) { | ||
64 | rcu_read_unlock(); | ||
65 | return -EPERM; | ||
66 | } | ||
67 | } | ||
68 | rcu_read_unlock(); | ||
69 | } | ||
70 | |||
71 | return 0; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * Rules: you can only create a cgroup if | ||
76 | * 1. you are capable(CAP_SYS_ADMIN) | ||
77 | * 2. the target cgroup is a descendant of your own cgroup | ||
78 | */ | ||
79 | static struct cgroup_subsys_state *ns_create(struct cgroup_subsys *ss, | ||
80 | struct cgroup *cgroup) | ||
81 | { | ||
82 | struct ns_cgroup *ns_cgroup; | ||
83 | |||
84 | if (!capable(CAP_SYS_ADMIN)) | ||
85 | return ERR_PTR(-EPERM); | ||
86 | if (!cgroup_is_descendant(cgroup, current)) | ||
87 | return ERR_PTR(-EPERM); | ||
88 | if (test_bit(CGRP_CLONE_CHILDREN, &cgroup->flags)) { | ||
89 | printk("ns_cgroup can't be created with parent " | ||
90 | "'clone_children' set.\n"); | ||
91 | return ERR_PTR(-EINVAL); | ||
92 | } | ||
93 | |||
94 | printk_once("ns_cgroup deprecated: consider using the " | ||
95 | "'clone_children' flag without the ns_cgroup.\n"); | ||
96 | |||
97 | ns_cgroup = kzalloc(sizeof(*ns_cgroup), GFP_KERNEL); | ||
98 | if (!ns_cgroup) | ||
99 | return ERR_PTR(-ENOMEM); | ||
100 | return &ns_cgroup->css; | ||
101 | } | ||
102 | |||
103 | static void ns_destroy(struct cgroup_subsys *ss, | ||
104 | struct cgroup *cgroup) | ||
105 | { | ||
106 | struct ns_cgroup *ns_cgroup; | ||
107 | |||
108 | ns_cgroup = cgroup_to_ns(cgroup); | ||
109 | kfree(ns_cgroup); | ||
110 | } | ||
111 | |||
112 | struct cgroup_subsys ns_subsys = { | ||
113 | .name = "ns", | ||
114 | .can_attach = ns_can_attach, | ||
115 | .create = ns_create, | ||
116 | .destroy = ns_destroy, | ||
117 | .subsys_id = ns_subsys_id, | ||
118 | }; | ||
diff --git a/kernel/nsproxy.c b/kernel/nsproxy.c index 5424e37673ed..d6a00f3de15d 100644 --- a/kernel/nsproxy.c +++ b/kernel/nsproxy.c | |||
@@ -201,10 +201,6 @@ int unshare_nsproxy_namespaces(unsigned long unshare_flags, | |||
201 | goto out; | 201 | goto out; |
202 | } | 202 | } |
203 | 203 | ||
204 | err = ns_cgroup_clone(current, task_pid(current)); | ||
205 | if (err) | ||
206 | put_nsproxy(*new_nsp); | ||
207 | |||
208 | out: | 204 | out: |
209 | return err; | 205 | return err; |
210 | } | 206 | } |
diff --git a/kernel/pm_qos_params.c b/kernel/pm_qos_params.c index beb184689af9..fd8d1e035df9 100644 --- a/kernel/pm_qos_params.c +++ b/kernel/pm_qos_params.c | |||
@@ -40,6 +40,7 @@ | |||
40 | #include <linux/string.h> | 40 | #include <linux/string.h> |
41 | #include <linux/platform_device.h> | 41 | #include <linux/platform_device.h> |
42 | #include <linux/init.h> | 42 | #include <linux/init.h> |
43 | #include <linux/kernel.h> | ||
43 | 44 | ||
44 | #include <linux/uaccess.h> | 45 | #include <linux/uaccess.h> |
45 | 46 | ||
@@ -404,24 +405,36 @@ static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, | |||
404 | size_t count, loff_t *f_pos) | 405 | size_t count, loff_t *f_pos) |
405 | { | 406 | { |
406 | s32 value; | 407 | s32 value; |
407 | int x; | ||
408 | char ascii_value[11]; | ||
409 | struct pm_qos_request_list *pm_qos_req; | 408 | struct pm_qos_request_list *pm_qos_req; |
410 | 409 | ||
411 | if (count == sizeof(s32)) { | 410 | if (count == sizeof(s32)) { |
412 | if (copy_from_user(&value, buf, sizeof(s32))) | 411 | if (copy_from_user(&value, buf, sizeof(s32))) |
413 | return -EFAULT; | 412 | return -EFAULT; |
414 | } else if (count == 11) { /* len('0x12345678/0') */ | 413 | } else if (count <= 11) { /* ASCII perhaps? */ |
415 | if (copy_from_user(ascii_value, buf, 11)) | 414 | char ascii_value[11]; |
415 | unsigned long int ulval; | ||
416 | int ret; | ||
417 | |||
418 | if (copy_from_user(ascii_value, buf, count)) | ||
416 | return -EFAULT; | 419 | return -EFAULT; |
417 | if (strlen(ascii_value) != 10) | 420 | |
418 | return -EINVAL; | 421 | if (count > 10) { |
419 | x = sscanf(ascii_value, "%x", &value); | 422 | if (ascii_value[10] == '\n') |
420 | if (x != 1) | 423 | ascii_value[10] = '\0'; |
424 | else | ||
425 | return -EINVAL; | ||
426 | } else { | ||
427 | ascii_value[count] = '\0'; | ||
428 | } | ||
429 | ret = strict_strtoul(ascii_value, 16, &ulval); | ||
430 | if (ret) { | ||
431 | pr_debug("%s, 0x%lx, 0x%x\n", ascii_value, ulval, ret); | ||
421 | return -EINVAL; | 432 | return -EINVAL; |
422 | pr_debug("%s, %d, 0x%x\n", ascii_value, x, value); | 433 | } |
423 | } else | 434 | value = (s32)lower_32_bits(ulval); |
435 | } else { | ||
424 | return -EINVAL; | 436 | return -EINVAL; |
437 | } | ||
425 | 438 | ||
426 | pm_qos_req = filp->private_data; | 439 | pm_qos_req = filp->private_data; |
427 | pm_qos_update_request(pm_qos_req, value); | 440 | pm_qos_update_request(pm_qos_req, value); |
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c index f9bec56d8825..8f7b1db1ece1 100644 --- a/kernel/power/hibernate.c +++ b/kernel/power/hibernate.c | |||
@@ -25,7 +25,6 @@ | |||
25 | #include <linux/gfp.h> | 25 | #include <linux/gfp.h> |
26 | #include <linux/syscore_ops.h> | 26 | #include <linux/syscore_ops.h> |
27 | #include <scsi/scsi_scan.h> | 27 | #include <scsi/scsi_scan.h> |
28 | #include <asm/suspend.h> | ||
29 | 28 | ||
30 | #include "power.h" | 29 | #include "power.h" |
31 | 30 | ||
@@ -55,10 +54,9 @@ static int hibernation_mode = HIBERNATION_SHUTDOWN; | |||
55 | static const struct platform_hibernation_ops *hibernation_ops; | 54 | static const struct platform_hibernation_ops *hibernation_ops; |
56 | 55 | ||
57 | /** | 56 | /** |
58 | * hibernation_set_ops - set the global hibernate operations | 57 | * hibernation_set_ops - Set the global hibernate operations. |
59 | * @ops: the hibernation operations to use in subsequent hibernation transitions | 58 | * @ops: Hibernation operations to use in subsequent hibernation transitions. |
60 | */ | 59 | */ |
61 | |||
62 | void hibernation_set_ops(const struct platform_hibernation_ops *ops) | 60 | void hibernation_set_ops(const struct platform_hibernation_ops *ops) |
63 | { | 61 | { |
64 | if (ops && !(ops->begin && ops->end && ops->pre_snapshot | 62 | if (ops && !(ops->begin && ops->end && ops->pre_snapshot |
@@ -115,10 +113,9 @@ static int hibernation_test(int level) { return 0; } | |||
115 | #endif /* !CONFIG_PM_DEBUG */ | 113 | #endif /* !CONFIG_PM_DEBUG */ |
116 | 114 | ||
117 | /** | 115 | /** |
118 | * platform_begin - tell the platform driver that we're starting | 116 | * platform_begin - Call platform to start hibernation. |
119 | * hibernation | 117 | * @platform_mode: Whether or not to use the platform driver. |
120 | */ | 118 | */ |
121 | |||
122 | static int platform_begin(int platform_mode) | 119 | static int platform_begin(int platform_mode) |
123 | { | 120 | { |
124 | return (platform_mode && hibernation_ops) ? | 121 | return (platform_mode && hibernation_ops) ? |
@@ -126,10 +123,9 @@ static int platform_begin(int platform_mode) | |||
126 | } | 123 | } |
127 | 124 | ||
128 | /** | 125 | /** |
129 | * platform_end - tell the platform driver that we've entered the | 126 | * platform_end - Call platform to finish transition to the working state. |
130 | * working state | 127 | * @platform_mode: Whether or not to use the platform driver. |
131 | */ | 128 | */ |
132 | |||
133 | static void platform_end(int platform_mode) | 129 | static void platform_end(int platform_mode) |
134 | { | 130 | { |
135 | if (platform_mode && hibernation_ops) | 131 | if (platform_mode && hibernation_ops) |
@@ -137,8 +133,11 @@ static void platform_end(int platform_mode) | |||
137 | } | 133 | } |
138 | 134 | ||
139 | /** | 135 | /** |
140 | * platform_pre_snapshot - prepare the machine for hibernation using the | 136 | * platform_pre_snapshot - Call platform to prepare the machine for hibernation. |
141 | * platform driver if so configured and return an error code if it fails | 137 | * @platform_mode: Whether or not to use the platform driver. |
138 | * | ||
139 | * Use the platform driver to prepare the system for creating a hibernate image, | ||
140 | * if so configured, and return an error code if that fails. | ||
142 | */ | 141 | */ |
143 | 142 | ||
144 | static int platform_pre_snapshot(int platform_mode) | 143 | static int platform_pre_snapshot(int platform_mode) |
@@ -148,10 +147,14 @@ static int platform_pre_snapshot(int platform_mode) | |||
148 | } | 147 | } |
149 | 148 | ||
150 | /** | 149 | /** |
151 | * platform_leave - prepare the machine for switching to the normal mode | 150 | * platform_leave - Call platform to prepare a transition to the working state. |
152 | * of operation using the platform driver (called with interrupts disabled) | 151 | * @platform_mode: Whether or not to use the platform driver. |
152 | * | ||
153 | * Use the platform driver prepare to prepare the machine for switching to the | ||
154 | * normal mode of operation. | ||
155 | * | ||
156 | * This routine is called on one CPU with interrupts disabled. | ||
153 | */ | 157 | */ |
154 | |||
155 | static void platform_leave(int platform_mode) | 158 | static void platform_leave(int platform_mode) |
156 | { | 159 | { |
157 | if (platform_mode && hibernation_ops) | 160 | if (platform_mode && hibernation_ops) |
@@ -159,10 +162,14 @@ static void platform_leave(int platform_mode) | |||
159 | } | 162 | } |
160 | 163 | ||
161 | /** | 164 | /** |
162 | * platform_finish - switch the machine to the normal mode of operation | 165 | * platform_finish - Call platform to switch the system to the working state. |
163 | * using the platform driver (must be called after platform_prepare()) | 166 | * @platform_mode: Whether or not to use the platform driver. |
167 | * | ||
168 | * Use the platform driver to switch the machine to the normal mode of | ||
169 | * operation. | ||
170 | * | ||
171 | * This routine must be called after platform_prepare(). | ||
164 | */ | 172 | */ |
165 | |||
166 | static void platform_finish(int platform_mode) | 173 | static void platform_finish(int platform_mode) |
167 | { | 174 | { |
168 | if (platform_mode && hibernation_ops) | 175 | if (platform_mode && hibernation_ops) |
@@ -170,11 +177,15 @@ static void platform_finish(int platform_mode) | |||
170 | } | 177 | } |
171 | 178 | ||
172 | /** | 179 | /** |
173 | * platform_pre_restore - prepare the platform for the restoration from a | 180 | * platform_pre_restore - Prepare for hibernate image restoration. |
174 | * hibernation image. If the restore fails after this function has been | 181 | * @platform_mode: Whether or not to use the platform driver. |
175 | * called, platform_restore_cleanup() must be called. | 182 | * |
183 | * Use the platform driver to prepare the system for resume from a hibernation | ||
184 | * image. | ||
185 | * | ||
186 | * If the restore fails after this function has been called, | ||
187 | * platform_restore_cleanup() must be called. | ||
176 | */ | 188 | */ |
177 | |||
178 | static int platform_pre_restore(int platform_mode) | 189 | static int platform_pre_restore(int platform_mode) |
179 | { | 190 | { |
180 | return (platform_mode && hibernation_ops) ? | 191 | return (platform_mode && hibernation_ops) ? |
@@ -182,12 +193,16 @@ static int platform_pre_restore(int platform_mode) | |||
182 | } | 193 | } |
183 | 194 | ||
184 | /** | 195 | /** |
185 | * platform_restore_cleanup - switch the platform to the normal mode of | 196 | * platform_restore_cleanup - Switch to the working state after failing restore. |
186 | * operation after a failing restore. If platform_pre_restore() has been | 197 | * @platform_mode: Whether or not to use the platform driver. |
187 | * called before the failing restore, this function must be called too, | 198 | * |
188 | * regardless of the result of platform_pre_restore(). | 199 | * Use the platform driver to switch the system to the normal mode of operation |
200 | * after a failing restore. | ||
201 | * | ||
202 | * If platform_pre_restore() has been called before the failing restore, this | ||
203 | * function must be called too, regardless of the result of | ||
204 | * platform_pre_restore(). | ||
189 | */ | 205 | */ |
190 | |||
191 | static void platform_restore_cleanup(int platform_mode) | 206 | static void platform_restore_cleanup(int platform_mode) |
192 | { | 207 | { |
193 | if (platform_mode && hibernation_ops) | 208 | if (platform_mode && hibernation_ops) |
@@ -195,10 +210,9 @@ static void platform_restore_cleanup(int platform_mode) | |||
195 | } | 210 | } |
196 | 211 | ||
197 | /** | 212 | /** |
198 | * platform_recover - recover the platform from a failure to suspend | 213 | * platform_recover - Recover from a failure to suspend devices. |
199 | * devices. | 214 | * @platform_mode: Whether or not to use the platform driver. |
200 | */ | 215 | */ |
201 | |||
202 | static void platform_recover(int platform_mode) | 216 | static void platform_recover(int platform_mode) |
203 | { | 217 | { |
204 | if (platform_mode && hibernation_ops && hibernation_ops->recover) | 218 | if (platform_mode && hibernation_ops && hibernation_ops->recover) |
@@ -206,13 +220,12 @@ static void platform_recover(int platform_mode) | |||
206 | } | 220 | } |
207 | 221 | ||
208 | /** | 222 | /** |
209 | * swsusp_show_speed - print the time elapsed between two events. | 223 | * swsusp_show_speed - Print time elapsed between two events during hibernation. |
210 | * @start: Starting event. | 224 | * @start: Starting event. |
211 | * @stop: Final event. | 225 | * @stop: Final event. |
212 | * @nr_pages - number of pages processed between @start and @stop | 226 | * @nr_pages: Number of memory pages processed between @start and @stop. |
213 | * @msg - introductory message to print | 227 | * @msg: Additional diagnostic message to print. |
214 | */ | 228 | */ |
215 | |||
216 | void swsusp_show_speed(struct timeval *start, struct timeval *stop, | 229 | void swsusp_show_speed(struct timeval *start, struct timeval *stop, |
217 | unsigned nr_pages, char *msg) | 230 | unsigned nr_pages, char *msg) |
218 | { | 231 | { |
@@ -235,25 +248,18 @@ void swsusp_show_speed(struct timeval *start, struct timeval *stop, | |||
235 | } | 248 | } |
236 | 249 | ||
237 | /** | 250 | /** |
238 | * create_image - freeze devices that need to be frozen with interrupts | 251 | * create_image - Create a hibernation image. |
239 | * off, create the hibernation image and thaw those devices. Control | 252 | * @platform_mode: Whether or not to use the platform driver. |
240 | * reappears in this routine after a restore. | 253 | * |
254 | * Execute device drivers' .freeze_noirq() callbacks, create a hibernation image | ||
255 | * and execute the drivers' .thaw_noirq() callbacks. | ||
256 | * | ||
257 | * Control reappears in this routine after the subsequent restore. | ||
241 | */ | 258 | */ |
242 | |||
243 | static int create_image(int platform_mode) | 259 | static int create_image(int platform_mode) |
244 | { | 260 | { |
245 | int error; | 261 | int error; |
246 | 262 | ||
247 | error = arch_prepare_suspend(); | ||
248 | if (error) | ||
249 | return error; | ||
250 | |||
251 | /* At this point, dpm_suspend_start() has been called, but *not* | ||
252 | * dpm_suspend_noirq(). We *must* call dpm_suspend_noirq() now. | ||
253 | * Otherwise, drivers for some devices (e.g. interrupt controllers) | ||
254 | * become desynchronized with the actual state of the hardware | ||
255 | * at resume time, and evil weirdness ensues. | ||
256 | */ | ||
257 | error = dpm_suspend_noirq(PMSG_FREEZE); | 263 | error = dpm_suspend_noirq(PMSG_FREEZE); |
258 | if (error) { | 264 | if (error) { |
259 | printk(KERN_ERR "PM: Some devices failed to power down, " | 265 | printk(KERN_ERR "PM: Some devices failed to power down, " |
@@ -297,9 +303,6 @@ static int create_image(int platform_mode) | |||
297 | 303 | ||
298 | Power_up: | 304 | Power_up: |
299 | syscore_resume(); | 305 | syscore_resume(); |
300 | /* NOTE: dpm_resume_noirq() is just a resume() for devices | ||
301 | * that suspended with irqs off ... no overall powerup. | ||
302 | */ | ||
303 | 306 | ||
304 | Enable_irqs: | 307 | Enable_irqs: |
305 | local_irq_enable(); | 308 | local_irq_enable(); |
@@ -317,14 +320,11 @@ static int create_image(int platform_mode) | |||
317 | } | 320 | } |
318 | 321 | ||
319 | /** | 322 | /** |
320 | * hibernation_snapshot - quiesce devices and create the hibernation | 323 | * hibernation_snapshot - Quiesce devices and create a hibernation image. |
321 | * snapshot image. | 324 | * @platform_mode: If set, use platform driver to prepare for the transition. |
322 | * @platform_mode - if set, use the platform driver, if available, to | ||
323 | * prepare the platform firmware for the power transition. | ||
324 | * | 325 | * |
325 | * Must be called with pm_mutex held | 326 | * This routine must be called with pm_mutex held. |
326 | */ | 327 | */ |
327 | |||
328 | int hibernation_snapshot(int platform_mode) | 328 | int hibernation_snapshot(int platform_mode) |
329 | { | 329 | { |
330 | pm_message_t msg = PMSG_RECOVER; | 330 | pm_message_t msg = PMSG_RECOVER; |
@@ -384,13 +384,14 @@ int hibernation_snapshot(int platform_mode) | |||
384 | } | 384 | } |
385 | 385 | ||
386 | /** | 386 | /** |
387 | * resume_target_kernel - prepare devices that need to be suspended with | 387 | * resume_target_kernel - Restore system state from a hibernation image. |
388 | * interrupts off, restore the contents of highmem that have not been | 388 | * @platform_mode: Whether or not to use the platform driver. |
389 | * restored yet from the image and run the low level code that will restore | 389 | * |
390 | * the remaining contents of memory and switch to the just restored target | 390 | * Execute device drivers' .freeze_noirq() callbacks, restore the contents of |
391 | * kernel. | 391 | * highmem that have not been restored yet from the image and run the low-level |
392 | * code that will restore the remaining contents of memory and switch to the | ||
393 | * just restored target kernel. | ||
392 | */ | 394 | */ |
393 | |||
394 | static int resume_target_kernel(bool platform_mode) | 395 | static int resume_target_kernel(bool platform_mode) |
395 | { | 396 | { |
396 | int error; | 397 | int error; |
@@ -416,24 +417,26 @@ static int resume_target_kernel(bool platform_mode) | |||
416 | if (error) | 417 | if (error) |
417 | goto Enable_irqs; | 418 | goto Enable_irqs; |
418 | 419 | ||
419 | /* We'll ignore saved state, but this gets preempt count (etc) right */ | ||
420 | save_processor_state(); | 420 | save_processor_state(); |
421 | error = restore_highmem(); | 421 | error = restore_highmem(); |
422 | if (!error) { | 422 | if (!error) { |
423 | error = swsusp_arch_resume(); | 423 | error = swsusp_arch_resume(); |
424 | /* | 424 | /* |
425 | * The code below is only ever reached in case of a failure. | 425 | * The code below is only ever reached in case of a failure. |
426 | * Otherwise execution continues at place where | 426 | * Otherwise, execution continues at the place where |
427 | * swsusp_arch_suspend() was called | 427 | * swsusp_arch_suspend() was called. |
428 | */ | 428 | */ |
429 | BUG_ON(!error); | 429 | BUG_ON(!error); |
430 | /* This call to restore_highmem() undos the previous one */ | 430 | /* |
431 | * This call to restore_highmem() reverts the changes made by | ||
432 | * the previous one. | ||
433 | */ | ||
431 | restore_highmem(); | 434 | restore_highmem(); |
432 | } | 435 | } |
433 | /* | 436 | /* |
434 | * The only reason why swsusp_arch_resume() can fail is memory being | 437 | * The only reason why swsusp_arch_resume() can fail is memory being |
435 | * very tight, so we have to free it as soon as we can to avoid | 438 | * very tight, so we have to free it as soon as we can to avoid |
436 | * subsequent failures | 439 | * subsequent failures. |
437 | */ | 440 | */ |
438 | swsusp_free(); | 441 | swsusp_free(); |
439 | restore_processor_state(); | 442 | restore_processor_state(); |
@@ -456,14 +459,12 @@ static int resume_target_kernel(bool platform_mode) | |||
456 | } | 459 | } |
457 | 460 | ||
458 | /** | 461 | /** |
459 | * hibernation_restore - quiesce devices and restore the hibernation | 462 | * hibernation_restore - Quiesce devices and restore from a hibernation image. |
460 | * snapshot image. If successful, control returns in hibernation_snaphot() | 463 | * @platform_mode: If set, use platform driver to prepare for the transition. |
461 | * @platform_mode - if set, use the platform driver, if available, to | ||
462 | * prepare the platform firmware for the transition. | ||
463 | * | 464 | * |
464 | * Must be called with pm_mutex held | 465 | * This routine must be called with pm_mutex held. If it is successful, control |
466 | * reappears in the restored target kernel in hibernation_snaphot(). | ||
465 | */ | 467 | */ |
466 | |||
467 | int hibernation_restore(int platform_mode) | 468 | int hibernation_restore(int platform_mode) |
468 | { | 469 | { |
469 | int error; | 470 | int error; |
@@ -483,10 +484,8 @@ int hibernation_restore(int platform_mode) | |||
483 | } | 484 | } |
484 | 485 | ||
485 | /** | 486 | /** |
486 | * hibernation_platform_enter - enter the hibernation state using the | 487 | * hibernation_platform_enter - Power off the system using the platform driver. |
487 | * platform driver (if available) | ||
488 | */ | 488 | */ |
489 | |||
490 | int hibernation_platform_enter(void) | 489 | int hibernation_platform_enter(void) |
491 | { | 490 | { |
492 | int error; | 491 | int error; |
@@ -557,12 +556,12 @@ int hibernation_platform_enter(void) | |||
557 | } | 556 | } |
558 | 557 | ||
559 | /** | 558 | /** |
560 | * power_down - Shut the machine down for hibernation. | 559 | * power_down - Shut the machine down for hibernation. |
561 | * | 560 | * |
562 | * Use the platform driver, if configured so; otherwise try | 561 | * Use the platform driver, if configured, to put the system into the sleep |
563 | * to power off or reboot. | 562 | * state corresponding to hibernation, or try to power it off or reboot, |
563 | * depending on the value of hibernation_mode. | ||
564 | */ | 564 | */ |
565 | |||
566 | static void power_down(void) | 565 | static void power_down(void) |
567 | { | 566 | { |
568 | switch (hibernation_mode) { | 567 | switch (hibernation_mode) { |
@@ -599,9 +598,8 @@ static int prepare_processes(void) | |||
599 | } | 598 | } |
600 | 599 | ||
601 | /** | 600 | /** |
602 | * hibernate - The granpappy of the built-in hibernation management | 601 | * hibernate - Carry out system hibernation, including saving the image. |
603 | */ | 602 | */ |
604 | |||
605 | int hibernate(void) | 603 | int hibernate(void) |
606 | { | 604 | { |
607 | int error; | 605 | int error; |
@@ -679,17 +677,20 @@ int hibernate(void) | |||
679 | 677 | ||
680 | 678 | ||
681 | /** | 679 | /** |
682 | * software_resume - Resume from a saved image. | 680 | * software_resume - Resume from a saved hibernation image. |
683 | * | 681 | * |
684 | * Called as a late_initcall (so all devices are discovered and | 682 | * This routine is called as a late initcall, when all devices have been |
685 | * initialized), we call swsusp to see if we have a saved image or not. | 683 | * discovered and initialized already. |
686 | * If so, we quiesce devices, the restore the saved image. We will | ||
687 | * return above (in hibernate() ) if everything goes well. | ||
688 | * Otherwise, we fail gracefully and return to the normally | ||
689 | * scheduled program. | ||
690 | * | 684 | * |
685 | * The image reading code is called to see if there is a hibernation image | ||
686 | * available for reading. If that is the case, devices are quiesced and the | ||
687 | * contents of memory is restored from the saved image. | ||
688 | * | ||
689 | * If this is successful, control reappears in the restored target kernel in | ||
690 | * hibernation_snaphot() which returns to hibernate(). Otherwise, the routine | ||
691 | * attempts to recover gracefully and make the kernel return to the normal mode | ||
692 | * of operation. | ||
691 | */ | 693 | */ |
692 | |||
693 | static int software_resume(void) | 694 | static int software_resume(void) |
694 | { | 695 | { |
695 | int error; | 696 | int error; |
@@ -819,21 +820,17 @@ static const char * const hibernation_modes[] = { | |||
819 | [HIBERNATION_TESTPROC] = "testproc", | 820 | [HIBERNATION_TESTPROC] = "testproc", |
820 | }; | 821 | }; |
821 | 822 | ||
822 | /** | 823 | /* |
823 | * disk - Control hibernation mode | 824 | * /sys/power/disk - Control hibernation mode. |
824 | * | ||
825 | * Suspend-to-disk can be handled in several ways. We have a few options | ||
826 | * for putting the system to sleep - using the platform driver (e.g. ACPI | ||
827 | * or other hibernation_ops), powering off the system or rebooting the | ||
828 | * system (for testing) as well as the two test modes. | ||
829 | * | 825 | * |
830 | * The system can support 'platform', and that is known a priori (and | 826 | * Hibernation can be handled in several ways. There are a few different ways |
831 | * encoded by the presence of hibernation_ops). However, the user may | 827 | * to put the system into the sleep state: using the platform driver (e.g. ACPI |
832 | * choose 'shutdown' or 'reboot' as alternatives, as well as one fo the | 828 | * or other hibernation_ops), powering it off or rebooting it (for testing |
833 | * test modes, 'test' or 'testproc'. | 829 | * mostly), or using one of the two available test modes. |
834 | * | 830 | * |
835 | * show() will display what the mode is currently set to. | 831 | * The sysfs file /sys/power/disk provides an interface for selecting the |
836 | * store() will accept one of | 832 | * hibernation mode to use. Reading from this file causes the available modes |
833 | * to be printed. There are 5 modes that can be supported: | ||
837 | * | 834 | * |
838 | * 'platform' | 835 | * 'platform' |
839 | * 'shutdown' | 836 | * 'shutdown' |
@@ -841,8 +838,14 @@ static const char * const hibernation_modes[] = { | |||
841 | * 'test' | 838 | * 'test' |
842 | * 'testproc' | 839 | * 'testproc' |
843 | * | 840 | * |
844 | * It will only change to 'platform' if the system | 841 | * If a platform hibernation driver is in use, 'platform' will be supported |
845 | * supports it (as determined by having hibernation_ops). | 842 | * and will be used by default. Otherwise, 'shutdown' will be used by default. |
843 | * The selected option (i.e. the one corresponding to the current value of | ||
844 | * hibernation_mode) is enclosed by a square bracket. | ||
845 | * | ||
846 | * To select a given hibernation mode it is necessary to write the mode's | ||
847 | * string representation (as returned by reading from /sys/power/disk) back | ||
848 | * into /sys/power/disk. | ||
846 | */ | 849 | */ |
847 | 850 | ||
848 | static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, | 851 | static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, |
@@ -875,7 +878,6 @@ static ssize_t disk_show(struct kobject *kobj, struct kobj_attribute *attr, | |||
875 | return buf-start; | 878 | return buf-start; |
876 | } | 879 | } |
877 | 880 | ||
878 | |||
879 | static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, | 881 | static ssize_t disk_store(struct kobject *kobj, struct kobj_attribute *attr, |
880 | const char *buf, size_t n) | 882 | const char *buf, size_t n) |
881 | { | 883 | { |
diff --git a/kernel/profile.c b/kernel/profile.c index 14c9f87b9fc9..961b389fe52f 100644 --- a/kernel/profile.c +++ b/kernel/profile.c | |||
@@ -303,14 +303,12 @@ static void profile_discard_flip_buffers(void) | |||
303 | mutex_unlock(&profile_flip_mutex); | 303 | mutex_unlock(&profile_flip_mutex); |
304 | } | 304 | } |
305 | 305 | ||
306 | void profile_hits(int type, void *__pc, unsigned int nr_hits) | 306 | static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) |
307 | { | 307 | { |
308 | unsigned long primary, secondary, flags, pc = (unsigned long)__pc; | 308 | unsigned long primary, secondary, flags, pc = (unsigned long)__pc; |
309 | int i, j, cpu; | 309 | int i, j, cpu; |
310 | struct profile_hit *hits; | 310 | struct profile_hit *hits; |
311 | 311 | ||
312 | if (prof_on != type || !prof_buffer) | ||
313 | return; | ||
314 | pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); | 312 | pc = min((pc - (unsigned long)_stext) >> prof_shift, prof_len - 1); |
315 | i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; | 313 | i = primary = (pc & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; |
316 | secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; | 314 | secondary = (~(pc << 1) & (NR_PROFILE_GRP - 1)) << PROFILE_GRPSHIFT; |
@@ -417,16 +415,20 @@ out_free: | |||
417 | #define profile_discard_flip_buffers() do { } while (0) | 415 | #define profile_discard_flip_buffers() do { } while (0) |
418 | #define profile_cpu_callback NULL | 416 | #define profile_cpu_callback NULL |
419 | 417 | ||
420 | void profile_hits(int type, void *__pc, unsigned int nr_hits) | 418 | static void do_profile_hits(int type, void *__pc, unsigned int nr_hits) |
421 | { | 419 | { |
422 | unsigned long pc; | 420 | unsigned long pc; |
423 | |||
424 | if (prof_on != type || !prof_buffer) | ||
425 | return; | ||
426 | pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; | 421 | pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift; |
427 | atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); | 422 | atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]); |
428 | } | 423 | } |
429 | #endif /* !CONFIG_SMP */ | 424 | #endif /* !CONFIG_SMP */ |
425 | |||
426 | void profile_hits(int type, void *__pc, unsigned int nr_hits) | ||
427 | { | ||
428 | if (prof_on != type || !prof_buffer) | ||
429 | return; | ||
430 | do_profile_hits(type, __pc, nr_hits); | ||
431 | } | ||
430 | EXPORT_SYMBOL_GPL(profile_hits); | 432 | EXPORT_SYMBOL_GPL(profile_hits); |
431 | 433 | ||
432 | void profile_tick(int type) | 434 | void profile_tick(int type) |
diff --git a/kernel/sched.c b/kernel/sched.c index 2d12893b8b0f..5e43e9dc65d1 100644 --- a/kernel/sched.c +++ b/kernel/sched.c | |||
@@ -8764,42 +8764,10 @@ cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk) | |||
8764 | return 0; | 8764 | return 0; |
8765 | } | 8765 | } |
8766 | 8766 | ||
8767 | static int | ||
8768 | cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | ||
8769 | struct task_struct *tsk, bool threadgroup) | ||
8770 | { | ||
8771 | int retval = cpu_cgroup_can_attach_task(cgrp, tsk); | ||
8772 | if (retval) | ||
8773 | return retval; | ||
8774 | if (threadgroup) { | ||
8775 | struct task_struct *c; | ||
8776 | rcu_read_lock(); | ||
8777 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
8778 | retval = cpu_cgroup_can_attach_task(cgrp, c); | ||
8779 | if (retval) { | ||
8780 | rcu_read_unlock(); | ||
8781 | return retval; | ||
8782 | } | ||
8783 | } | ||
8784 | rcu_read_unlock(); | ||
8785 | } | ||
8786 | return 0; | ||
8787 | } | ||
8788 | |||
8789 | static void | 8767 | static void |
8790 | cpu_cgroup_attach(struct cgroup_subsys *ss, struct cgroup *cgrp, | 8768 | cpu_cgroup_attach_task(struct cgroup *cgrp, struct task_struct *tsk) |
8791 | struct cgroup *old_cont, struct task_struct *tsk, | ||
8792 | bool threadgroup) | ||
8793 | { | 8769 | { |
8794 | sched_move_task(tsk); | 8770 | sched_move_task(tsk); |
8795 | if (threadgroup) { | ||
8796 | struct task_struct *c; | ||
8797 | rcu_read_lock(); | ||
8798 | list_for_each_entry_rcu(c, &tsk->thread_group, thread_group) { | ||
8799 | sched_move_task(c); | ||
8800 | } | ||
8801 | rcu_read_unlock(); | ||
8802 | } | ||
8803 | } | 8771 | } |
8804 | 8772 | ||
8805 | static void | 8773 | static void |
@@ -8887,8 +8855,8 @@ struct cgroup_subsys cpu_cgroup_subsys = { | |||
8887 | .name = "cpu", | 8855 | .name = "cpu", |
8888 | .create = cpu_cgroup_create, | 8856 | .create = cpu_cgroup_create, |
8889 | .destroy = cpu_cgroup_destroy, | 8857 | .destroy = cpu_cgroup_destroy, |
8890 | .can_attach = cpu_cgroup_can_attach, | 8858 | .can_attach_task = cpu_cgroup_can_attach_task, |
8891 | .attach = cpu_cgroup_attach, | 8859 | .attach_task = cpu_cgroup_attach_task, |
8892 | .exit = cpu_cgroup_exit, | 8860 | .exit = cpu_cgroup_exit, |
8893 | .populate = cpu_cgroup_populate, | 8861 | .populate = cpu_cgroup_populate, |
8894 | .subsys_id = cpu_cgroup_subsys_id, | 8862 | .subsys_id = cpu_cgroup_subsys_id, |
diff --git a/lib/Kconfig b/lib/Kconfig index 9c10e38fc609..830181cc7a83 100644 --- a/lib/Kconfig +++ b/lib/Kconfig | |||
@@ -19,16 +19,6 @@ config RATIONAL | |||
19 | config GENERIC_FIND_FIRST_BIT | 19 | config GENERIC_FIND_FIRST_BIT |
20 | bool | 20 | bool |
21 | 21 | ||
22 | config GENERIC_FIND_NEXT_BIT | ||
23 | bool | ||
24 | |||
25 | config GENERIC_FIND_BIT_LE | ||
26 | bool | ||
27 | |||
28 | config GENERIC_FIND_LAST_BIT | ||
29 | bool | ||
30 | default y | ||
31 | |||
32 | config CRC_CCITT | 22 | config CRC_CCITT |
33 | tristate "CRC-CCITT functions" | 23 | tristate "CRC-CCITT functions" |
34 | help | 24 | help |
diff --git a/lib/Makefile b/lib/Makefile index 4b49a249064b..6b597fdb1898 100644 --- a/lib/Makefile +++ b/lib/Makefile | |||
@@ -12,7 +12,7 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \ | |||
12 | idr.o int_sqrt.o extable.o prio_tree.o \ | 12 | idr.o int_sqrt.o extable.o prio_tree.o \ |
13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ | 13 | sha1.o irq_regs.o reciprocal_div.o argv_split.o \ |
14 | proportions.o prio_heap.o ratelimit.o show_mem.o \ | 14 | proportions.o prio_heap.o ratelimit.o show_mem.o \ |
15 | is_single_threaded.o plist.o decompress.o | 15 | is_single_threaded.o plist.o decompress.o find_next_bit.o |
16 | 16 | ||
17 | lib-$(CONFIG_MMU) += ioremap.o | 17 | lib-$(CONFIG_MMU) += ioremap.o |
18 | lib-$(CONFIG_SMP) += cpumask.o | 18 | lib-$(CONFIG_SMP) += cpumask.o |
@@ -22,7 +22,7 @@ lib-y += kobject.o kref.o klist.o | |||
22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ | 22 | obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \ |
23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ | 23 | bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ |
24 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ | 24 | string_helpers.o gcd.o lcm.o list_sort.o uuid.o flex_array.o \ |
25 | bsearch.o | 25 | bsearch.o find_last_bit.o |
26 | obj-y += kstrtox.o | 26 | obj-y += kstrtox.o |
27 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o | 27 | obj-$(CONFIG_TEST_KSTRTOX) += test-kstrtox.o |
28 | 28 | ||
@@ -39,10 +39,6 @@ obj-$(CONFIG_DEBUG_LOCKING_API_SELFTESTS) += locking-selftest.o | |||
39 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o | 39 | obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o |
40 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o | 40 | lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o |
41 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o | 41 | lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o |
42 | lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o | ||
43 | lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o | ||
44 | lib-$(CONFIG_GENERIC_FIND_BIT_LE) += find_next_bit.o | ||
45 | obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o | ||
46 | 42 | ||
47 | CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) | 43 | CFLAGS_hweight.o = $(subst $(quote),,$(CONFIG_ARCH_HWEIGHT_CFLAGS)) |
48 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o | 44 | obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o |
diff --git a/lib/find_last_bit.c b/lib/find_last_bit.c index 5d202e36bdd8..d903959ad695 100644 --- a/lib/find_last_bit.c +++ b/lib/find_last_bit.c | |||
@@ -15,6 +15,8 @@ | |||
15 | #include <asm/types.h> | 15 | #include <asm/types.h> |
16 | #include <asm/byteorder.h> | 16 | #include <asm/byteorder.h> |
17 | 17 | ||
18 | #ifndef find_last_bit | ||
19 | |||
18 | unsigned long find_last_bit(const unsigned long *addr, unsigned long size) | 20 | unsigned long find_last_bit(const unsigned long *addr, unsigned long size) |
19 | { | 21 | { |
20 | unsigned long words; | 22 | unsigned long words; |
@@ -43,3 +45,5 @@ found: | |||
43 | return size; | 45 | return size; |
44 | } | 46 | } |
45 | EXPORT_SYMBOL(find_last_bit); | 47 | EXPORT_SYMBOL(find_last_bit); |
48 | |||
49 | #endif | ||
diff --git a/lib/find_next_bit.c b/lib/find_next_bit.c index b0a8767282bf..4bd75a73ba00 100644 --- a/lib/find_next_bit.c +++ b/lib/find_next_bit.c | |||
@@ -16,7 +16,7 @@ | |||
16 | 16 | ||
17 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) | 17 | #define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) |
18 | 18 | ||
19 | #ifdef CONFIG_GENERIC_FIND_NEXT_BIT | 19 | #ifndef find_next_bit |
20 | /* | 20 | /* |
21 | * Find the next set bit in a memory region. | 21 | * Find the next set bit in a memory region. |
22 | */ | 22 | */ |
@@ -59,7 +59,9 @@ found_middle: | |||
59 | return result + __ffs(tmp); | 59 | return result + __ffs(tmp); |
60 | } | 60 | } |
61 | EXPORT_SYMBOL(find_next_bit); | 61 | EXPORT_SYMBOL(find_next_bit); |
62 | #endif | ||
62 | 63 | ||
64 | #ifndef find_next_zero_bit | ||
63 | /* | 65 | /* |
64 | * This implementation of find_{first,next}_zero_bit was stolen from | 66 | * This implementation of find_{first,next}_zero_bit was stolen from |
65 | * Linus' asm-alpha/bitops.h. | 67 | * Linus' asm-alpha/bitops.h. |
@@ -103,9 +105,9 @@ found_middle: | |||
103 | return result + ffz(tmp); | 105 | return result + ffz(tmp); |
104 | } | 106 | } |
105 | EXPORT_SYMBOL(find_next_zero_bit); | 107 | EXPORT_SYMBOL(find_next_zero_bit); |
106 | #endif /* CONFIG_GENERIC_FIND_NEXT_BIT */ | 108 | #endif |
107 | 109 | ||
108 | #ifdef CONFIG_GENERIC_FIND_FIRST_BIT | 110 | #ifndef find_first_bit |
109 | /* | 111 | /* |
110 | * Find the first set bit in a memory region. | 112 | * Find the first set bit in a memory region. |
111 | */ | 113 | */ |
@@ -131,7 +133,9 @@ found: | |||
131 | return result + __ffs(tmp); | 133 | return result + __ffs(tmp); |
132 | } | 134 | } |
133 | EXPORT_SYMBOL(find_first_bit); | 135 | EXPORT_SYMBOL(find_first_bit); |
136 | #endif | ||
134 | 137 | ||
138 | #ifndef find_first_zero_bit | ||
135 | /* | 139 | /* |
136 | * Find the first cleared bit in a memory region. | 140 | * Find the first cleared bit in a memory region. |
137 | */ | 141 | */ |
@@ -157,10 +161,9 @@ found: | |||
157 | return result + ffz(tmp); | 161 | return result + ffz(tmp); |
158 | } | 162 | } |
159 | EXPORT_SYMBOL(find_first_zero_bit); | 163 | EXPORT_SYMBOL(find_first_zero_bit); |
160 | #endif /* CONFIG_GENERIC_FIND_FIRST_BIT */ | 164 | #endif |
161 | 165 | ||
162 | #ifdef __BIG_ENDIAN | 166 | #ifdef __BIG_ENDIAN |
163 | #ifdef CONFIG_GENERIC_FIND_BIT_LE | ||
164 | 167 | ||
165 | /* include/linux/byteorder does not support "unsigned long" type */ | 168 | /* include/linux/byteorder does not support "unsigned long" type */ |
166 | static inline unsigned long ext2_swabp(const unsigned long * x) | 169 | static inline unsigned long ext2_swabp(const unsigned long * x) |
@@ -186,6 +189,7 @@ static inline unsigned long ext2_swab(const unsigned long y) | |||
186 | #endif | 189 | #endif |
187 | } | 190 | } |
188 | 191 | ||
192 | #ifndef find_next_zero_bit_le | ||
189 | unsigned long find_next_zero_bit_le(const void *addr, unsigned | 193 | unsigned long find_next_zero_bit_le(const void *addr, unsigned |
190 | long size, unsigned long offset) | 194 | long size, unsigned long offset) |
191 | { | 195 | { |
@@ -229,7 +233,9 @@ found_middle_swap: | |||
229 | return result + ffz(ext2_swab(tmp)); | 233 | return result + ffz(ext2_swab(tmp)); |
230 | } | 234 | } |
231 | EXPORT_SYMBOL(find_next_zero_bit_le); | 235 | EXPORT_SYMBOL(find_next_zero_bit_le); |
236 | #endif | ||
232 | 237 | ||
238 | #ifndef find_next_bit_le | ||
233 | unsigned long find_next_bit_le(const void *addr, unsigned | 239 | unsigned long find_next_bit_le(const void *addr, unsigned |
234 | long size, unsigned long offset) | 240 | long size, unsigned long offset) |
235 | { | 241 | { |
@@ -274,6 +280,6 @@ found_middle_swap: | |||
274 | return result + __ffs(ext2_swab(tmp)); | 280 | return result + __ffs(ext2_swab(tmp)); |
275 | } | 281 | } |
276 | EXPORT_SYMBOL(find_next_bit_le); | 282 | EXPORT_SYMBOL(find_next_bit_le); |
283 | #endif | ||
277 | 284 | ||
278 | #endif /* CONFIG_GENERIC_FIND_BIT_LE */ | ||
279 | #endif /* __BIG_ENDIAN */ | 285 | #endif /* __BIG_ENDIAN */ |
diff --git a/lib/flex_array.c b/lib/flex_array.c index cab7621f98aa..9b8b89458c4c 100644 --- a/lib/flex_array.c +++ b/lib/flex_array.c | |||
@@ -24,6 +24,7 @@ | |||
24 | #include <linux/slab.h> | 24 | #include <linux/slab.h> |
25 | #include <linux/stddef.h> | 25 | #include <linux/stddef.h> |
26 | #include <linux/module.h> | 26 | #include <linux/module.h> |
27 | #include <linux/reciprocal_div.h> | ||
27 | 28 | ||
28 | struct flex_array_part { | 29 | struct flex_array_part { |
29 | char elements[FLEX_ARRAY_PART_SIZE]; | 30 | char elements[FLEX_ARRAY_PART_SIZE]; |
@@ -70,15 +71,15 @@ static inline int elements_fit_in_base(struct flex_array *fa) | |||
70 | * Element size | Objects | Objects | | 71 | * Element size | Objects | Objects | |
71 | * PAGE_SIZE=4k | 32-bit | 64-bit | | 72 | * PAGE_SIZE=4k | 32-bit | 64-bit | |
72 | * ---------------------------------| | 73 | * ---------------------------------| |
73 | * 1 bytes | 4186112 | 2093056 | | 74 | * 1 bytes | 4177920 | 2088960 | |
74 | * 2 bytes | 2093056 | 1046528 | | 75 | * 2 bytes | 2088960 | 1044480 | |
75 | * 3 bytes | 1395030 | 697515 | | 76 | * 3 bytes | 1392300 | 696150 | |
76 | * 4 bytes | 1046528 | 523264 | | 77 | * 4 bytes | 1044480 | 522240 | |
77 | * 32 bytes | 130816 | 65408 | | 78 | * 32 bytes | 130560 | 65408 | |
78 | * 33 bytes | 126728 | 63364 | | 79 | * 33 bytes | 126480 | 63240 | |
79 | * 2048 bytes | 2044 | 1022 | | 80 | * 2048 bytes | 2040 | 1020 | |
80 | * 2049 bytes | 1022 | 511 | | 81 | * 2049 bytes | 1020 | 510 | |
81 | * void * | 1046528 | 261632 | | 82 | * void * | 1044480 | 261120 | |
82 | * | 83 | * |
83 | * Since 64-bit pointers are twice the size, we lose half the | 84 | * Since 64-bit pointers are twice the size, we lose half the |
84 | * capacity in the base structure. Also note that no effort is made | 85 | * capacity in the base structure. Also note that no effort is made |
@@ -88,11 +89,15 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, | |||
88 | gfp_t flags) | 89 | gfp_t flags) |
89 | { | 90 | { |
90 | struct flex_array *ret; | 91 | struct flex_array *ret; |
92 | int elems_per_part = 0; | ||
93 | int reciprocal_elems = 0; | ||
91 | int max_size = 0; | 94 | int max_size = 0; |
92 | 95 | ||
93 | if (element_size) | 96 | if (element_size) { |
94 | max_size = FLEX_ARRAY_NR_BASE_PTRS * | 97 | elems_per_part = FLEX_ARRAY_ELEMENTS_PER_PART(element_size); |
95 | FLEX_ARRAY_ELEMENTS_PER_PART(element_size); | 98 | reciprocal_elems = reciprocal_value(elems_per_part); |
99 | max_size = FLEX_ARRAY_NR_BASE_PTRS * elems_per_part; | ||
100 | } | ||
96 | 101 | ||
97 | /* max_size will end up 0 if element_size > PAGE_SIZE */ | 102 | /* max_size will end up 0 if element_size > PAGE_SIZE */ |
98 | if (total > max_size) | 103 | if (total > max_size) |
@@ -102,6 +107,8 @@ struct flex_array *flex_array_alloc(int element_size, unsigned int total, | |||
102 | return NULL; | 107 | return NULL; |
103 | ret->element_size = element_size; | 108 | ret->element_size = element_size; |
104 | ret->total_nr_elements = total; | 109 | ret->total_nr_elements = total; |
110 | ret->elems_per_part = elems_per_part; | ||
111 | ret->reciprocal_elems = reciprocal_elems; | ||
105 | if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) | 112 | if (elements_fit_in_base(ret) && !(flags & __GFP_ZERO)) |
106 | memset(&ret->parts[0], FLEX_ARRAY_FREE, | 113 | memset(&ret->parts[0], FLEX_ARRAY_FREE, |
107 | FLEX_ARRAY_BASE_BYTES_LEFT); | 114 | FLEX_ARRAY_BASE_BYTES_LEFT); |
@@ -112,7 +119,7 @@ EXPORT_SYMBOL(flex_array_alloc); | |||
112 | static int fa_element_to_part_nr(struct flex_array *fa, | 119 | static int fa_element_to_part_nr(struct flex_array *fa, |
113 | unsigned int element_nr) | 120 | unsigned int element_nr) |
114 | { | 121 | { |
115 | return element_nr / FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size); | 122 | return reciprocal_divide(element_nr, fa->reciprocal_elems); |
116 | } | 123 | } |
117 | 124 | ||
118 | /** | 125 | /** |
@@ -141,12 +148,12 @@ void flex_array_free(struct flex_array *fa) | |||
141 | EXPORT_SYMBOL(flex_array_free); | 148 | EXPORT_SYMBOL(flex_array_free); |
142 | 149 | ||
143 | static unsigned int index_inside_part(struct flex_array *fa, | 150 | static unsigned int index_inside_part(struct flex_array *fa, |
144 | unsigned int element_nr) | 151 | unsigned int element_nr, |
152 | unsigned int part_nr) | ||
145 | { | 153 | { |
146 | unsigned int part_offset; | 154 | unsigned int part_offset; |
147 | 155 | ||
148 | part_offset = element_nr % | 156 | part_offset = element_nr - part_nr * fa->elems_per_part; |
149 | FLEX_ARRAY_ELEMENTS_PER_PART(fa->element_size); | ||
150 | return part_offset * fa->element_size; | 157 | return part_offset * fa->element_size; |
151 | } | 158 | } |
152 | 159 | ||
@@ -186,7 +193,7 @@ __fa_get_part(struct flex_array *fa, int part_nr, gfp_t flags) | |||
186 | int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, | 193 | int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, |
187 | gfp_t flags) | 194 | gfp_t flags) |
188 | { | 195 | { |
189 | int part_nr; | 196 | int part_nr = 0; |
190 | struct flex_array_part *part; | 197 | struct flex_array_part *part; |
191 | void *dst; | 198 | void *dst; |
192 | 199 | ||
@@ -202,7 +209,7 @@ int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src, | |||
202 | if (!part) | 209 | if (!part) |
203 | return -ENOMEM; | 210 | return -ENOMEM; |
204 | } | 211 | } |
205 | dst = &part->elements[index_inside_part(fa, element_nr)]; | 212 | dst = &part->elements[index_inside_part(fa, element_nr, part_nr)]; |
206 | memcpy(dst, src, fa->element_size); | 213 | memcpy(dst, src, fa->element_size); |
207 | return 0; | 214 | return 0; |
208 | } | 215 | } |
@@ -217,7 +224,7 @@ EXPORT_SYMBOL(flex_array_put); | |||
217 | */ | 224 | */ |
218 | int flex_array_clear(struct flex_array *fa, unsigned int element_nr) | 225 | int flex_array_clear(struct flex_array *fa, unsigned int element_nr) |
219 | { | 226 | { |
220 | int part_nr; | 227 | int part_nr = 0; |
221 | struct flex_array_part *part; | 228 | struct flex_array_part *part; |
222 | void *dst; | 229 | void *dst; |
223 | 230 | ||
@@ -233,7 +240,7 @@ int flex_array_clear(struct flex_array *fa, unsigned int element_nr) | |||
233 | if (!part) | 240 | if (!part) |
234 | return -EINVAL; | 241 | return -EINVAL; |
235 | } | 242 | } |
236 | dst = &part->elements[index_inside_part(fa, element_nr)]; | 243 | dst = &part->elements[index_inside_part(fa, element_nr, part_nr)]; |
237 | memset(dst, FLEX_ARRAY_FREE, fa->element_size); | 244 | memset(dst, FLEX_ARRAY_FREE, fa->element_size); |
238 | return 0; | 245 | return 0; |
239 | } | 246 | } |
@@ -302,7 +309,7 @@ EXPORT_SYMBOL(flex_array_prealloc); | |||
302 | */ | 309 | */ |
303 | void *flex_array_get(struct flex_array *fa, unsigned int element_nr) | 310 | void *flex_array_get(struct flex_array *fa, unsigned int element_nr) |
304 | { | 311 | { |
305 | int part_nr; | 312 | int part_nr = 0; |
306 | struct flex_array_part *part; | 313 | struct flex_array_part *part; |
307 | 314 | ||
308 | if (!fa->element_size) | 315 | if (!fa->element_size) |
@@ -317,7 +324,7 @@ void *flex_array_get(struct flex_array *fa, unsigned int element_nr) | |||
317 | if (!part) | 324 | if (!part) |
318 | return NULL; | 325 | return NULL; |
319 | } | 326 | } |
320 | return &part->elements[index_inside_part(fa, element_nr)]; | 327 | return &part->elements[index_inside_part(fa, element_nr, part_nr)]; |
321 | } | 328 | } |
322 | EXPORT_SYMBOL(flex_array_get); | 329 | EXPORT_SYMBOL(flex_array_get); |
323 | 330 | ||
diff --git a/mm/Kconfig b/mm/Kconfig index e9c0c61f2ddd..8ca47a5ee9c8 100644 --- a/mm/Kconfig +++ b/mm/Kconfig | |||
@@ -347,3 +347,26 @@ config NEED_PER_CPU_KM | |||
347 | depends on !SMP | 347 | depends on !SMP |
348 | bool | 348 | bool |
349 | default y | 349 | default y |
350 | |||
351 | config CLEANCACHE | ||
352 | bool "Enable cleancache driver to cache clean pages if tmem is present" | ||
353 | default n | ||
354 | help | ||
355 | Cleancache can be thought of as a page-granularity victim cache | ||
356 | for clean pages that the kernel's pageframe replacement algorithm | ||
357 | (PFRA) would like to keep around, but can't since there isn't enough | ||
358 | memory. So when the PFRA "evicts" a page, it first attempts to use | ||
359 | cleancacne code to put the data contained in that page into | ||
360 | "transcendent memory", memory that is not directly accessible or | ||
361 | addressable by the kernel and is of unknown and possibly | ||
362 | time-varying size. And when a cleancache-enabled | ||
363 | filesystem wishes to access a page in a file on disk, it first | ||
364 | checks cleancache to see if it already contains it; if it does, | ||
365 | the page is copied into the kernel and a disk access is avoided. | ||
366 | When a transcendent memory driver is available (such as zcache or | ||
367 | Xen transcendent memory), a significant I/O reduction | ||
368 | may be achieved. When none is available, all cleancache calls | ||
369 | are reduced to a single pointer-compare-against-NULL resulting | ||
370 | in a negligible performance hit. | ||
371 | |||
372 | If unsure, say Y to enable cleancache | ||
diff --git a/mm/Makefile b/mm/Makefile index 42a8326c3e3d..836e4163c1bf 100644 --- a/mm/Makefile +++ b/mm/Makefile | |||
@@ -49,3 +49,4 @@ obj-$(CONFIG_MEMORY_FAILURE) += memory-failure.o | |||
49 | obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o | 49 | obj-$(CONFIG_HWPOISON_INJECT) += hwpoison-inject.o |
50 | obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o | 50 | obj-$(CONFIG_DEBUG_KMEMLEAK) += kmemleak.o |
51 | obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o | 51 | obj-$(CONFIG_DEBUG_KMEMLEAK_TEST) += kmemleak-test.o |
52 | obj-$(CONFIG_CLEANCACHE) += cleancache.o | ||
diff --git a/mm/cleancache.c b/mm/cleancache.c new file mode 100644 index 000000000000..bcaae4c2a770 --- /dev/null +++ b/mm/cleancache.c | |||
@@ -0,0 +1,244 @@ | |||
1 | /* | ||
2 | * Cleancache frontend | ||
3 | * | ||
4 | * This code provides the generic "frontend" layer to call a matching | ||
5 | * "backend" driver implementation of cleancache. See | ||
6 | * Documentation/vm/cleancache.txt for more information. | ||
7 | * | ||
8 | * Copyright (C) 2009-2010 Oracle Corp. All rights reserved. | ||
9 | * Author: Dan Magenheimer | ||
10 | * | ||
11 | * This work is licensed under the terms of the GNU GPL, version 2. | ||
12 | */ | ||
13 | |||
14 | #include <linux/module.h> | ||
15 | #include <linux/fs.h> | ||
16 | #include <linux/exportfs.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/cleancache.h> | ||
19 | |||
20 | /* | ||
21 | * This global enablement flag may be read thousands of times per second | ||
22 | * by cleancache_get/put/flush even on systems where cleancache_ops | ||
23 | * is not claimed (e.g. cleancache is config'ed on but remains | ||
24 | * disabled), so is preferred to the slower alternative: a function | ||
25 | * call that checks a non-global. | ||
26 | */ | ||
27 | int cleancache_enabled; | ||
28 | EXPORT_SYMBOL(cleancache_enabled); | ||
29 | |||
30 | /* | ||
31 | * cleancache_ops is set by cleancache_ops_register to contain the pointers | ||
32 | * to the cleancache "backend" implementation functions. | ||
33 | */ | ||
34 | static struct cleancache_ops cleancache_ops; | ||
35 | |||
36 | /* useful stats available in /sys/kernel/mm/cleancache */ | ||
37 | static unsigned long cleancache_succ_gets; | ||
38 | static unsigned long cleancache_failed_gets; | ||
39 | static unsigned long cleancache_puts; | ||
40 | static unsigned long cleancache_flushes; | ||
41 | |||
42 | /* | ||
43 | * register operations for cleancache, returning previous thus allowing | ||
44 | * detection of multiple backends and possible nesting | ||
45 | */ | ||
46 | struct cleancache_ops cleancache_register_ops(struct cleancache_ops *ops) | ||
47 | { | ||
48 | struct cleancache_ops old = cleancache_ops; | ||
49 | |||
50 | cleancache_ops = *ops; | ||
51 | cleancache_enabled = 1; | ||
52 | return old; | ||
53 | } | ||
54 | EXPORT_SYMBOL(cleancache_register_ops); | ||
55 | |||
56 | /* Called by a cleancache-enabled filesystem at time of mount */ | ||
57 | void __cleancache_init_fs(struct super_block *sb) | ||
58 | { | ||
59 | sb->cleancache_poolid = (*cleancache_ops.init_fs)(PAGE_SIZE); | ||
60 | } | ||
61 | EXPORT_SYMBOL(__cleancache_init_fs); | ||
62 | |||
63 | /* Called by a cleancache-enabled clustered filesystem at time of mount */ | ||
64 | void __cleancache_init_shared_fs(char *uuid, struct super_block *sb) | ||
65 | { | ||
66 | sb->cleancache_poolid = | ||
67 | (*cleancache_ops.init_shared_fs)(uuid, PAGE_SIZE); | ||
68 | } | ||
69 | EXPORT_SYMBOL(__cleancache_init_shared_fs); | ||
70 | |||
71 | /* | ||
72 | * If the filesystem uses exportable filehandles, use the filehandle as | ||
73 | * the key, else use the inode number. | ||
74 | */ | ||
75 | static int cleancache_get_key(struct inode *inode, | ||
76 | struct cleancache_filekey *key) | ||
77 | { | ||
78 | int (*fhfn)(struct dentry *, __u32 *fh, int *, int); | ||
79 | int len = 0, maxlen = CLEANCACHE_KEY_MAX; | ||
80 | struct super_block *sb = inode->i_sb; | ||
81 | |||
82 | key->u.ino = inode->i_ino; | ||
83 | if (sb->s_export_op != NULL) { | ||
84 | fhfn = sb->s_export_op->encode_fh; | ||
85 | if (fhfn) { | ||
86 | struct dentry d; | ||
87 | d.d_inode = inode; | ||
88 | len = (*fhfn)(&d, &key->u.fh[0], &maxlen, 0); | ||
89 | if (len <= 0 || len == 255) | ||
90 | return -1; | ||
91 | if (maxlen > CLEANCACHE_KEY_MAX) | ||
92 | return -1; | ||
93 | } | ||
94 | } | ||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | /* | ||
99 | * "Get" data from cleancache associated with the poolid/inode/index | ||
100 | * that were specified when the data was put to cleanache and, if | ||
101 | * successful, use it to fill the specified page with data and return 0. | ||
102 | * The pageframe is unchanged and returns -1 if the get fails. | ||
103 | * Page must be locked by caller. | ||
104 | */ | ||
105 | int __cleancache_get_page(struct page *page) | ||
106 | { | ||
107 | int ret = -1; | ||
108 | int pool_id; | ||
109 | struct cleancache_filekey key = { .u.key = { 0 } }; | ||
110 | |||
111 | VM_BUG_ON(!PageLocked(page)); | ||
112 | pool_id = page->mapping->host->i_sb->cleancache_poolid; | ||
113 | if (pool_id < 0) | ||
114 | goto out; | ||
115 | |||
116 | if (cleancache_get_key(page->mapping->host, &key) < 0) | ||
117 | goto out; | ||
118 | |||
119 | ret = (*cleancache_ops.get_page)(pool_id, key, page->index, page); | ||
120 | if (ret == 0) | ||
121 | cleancache_succ_gets++; | ||
122 | else | ||
123 | cleancache_failed_gets++; | ||
124 | out: | ||
125 | return ret; | ||
126 | } | ||
127 | EXPORT_SYMBOL(__cleancache_get_page); | ||
128 | |||
129 | /* | ||
130 | * "Put" data from a page to cleancache and associate it with the | ||
131 | * (previously-obtained per-filesystem) poolid and the page's, | ||
132 | * inode and page index. Page must be locked. Note that a put_page | ||
133 | * always "succeeds", though a subsequent get_page may succeed or fail. | ||
134 | */ | ||
135 | void __cleancache_put_page(struct page *page) | ||
136 | { | ||
137 | int pool_id; | ||
138 | struct cleancache_filekey key = { .u.key = { 0 } }; | ||
139 | |||
140 | VM_BUG_ON(!PageLocked(page)); | ||
141 | pool_id = page->mapping->host->i_sb->cleancache_poolid; | ||
142 | if (pool_id >= 0 && | ||
143 | cleancache_get_key(page->mapping->host, &key) >= 0) { | ||
144 | (*cleancache_ops.put_page)(pool_id, key, page->index, page); | ||
145 | cleancache_puts++; | ||
146 | } | ||
147 | } | ||
148 | EXPORT_SYMBOL(__cleancache_put_page); | ||
149 | |||
150 | /* | ||
151 | * Flush any data from cleancache associated with the poolid and the | ||
152 | * page's inode and page index so that a subsequent "get" will fail. | ||
153 | */ | ||
154 | void __cleancache_flush_page(struct address_space *mapping, struct page *page) | ||
155 | { | ||
156 | /* careful... page->mapping is NULL sometimes when this is called */ | ||
157 | int pool_id = mapping->host->i_sb->cleancache_poolid; | ||
158 | struct cleancache_filekey key = { .u.key = { 0 } }; | ||
159 | |||
160 | if (pool_id >= 0) { | ||
161 | VM_BUG_ON(!PageLocked(page)); | ||
162 | if (cleancache_get_key(mapping->host, &key) >= 0) { | ||
163 | (*cleancache_ops.flush_page)(pool_id, key, page->index); | ||
164 | cleancache_flushes++; | ||
165 | } | ||
166 | } | ||
167 | } | ||
168 | EXPORT_SYMBOL(__cleancache_flush_page); | ||
169 | |||
170 | /* | ||
171 | * Flush all data from cleancache associated with the poolid and the | ||
172 | * mappings's inode so that all subsequent gets to this poolid/inode | ||
173 | * will fail. | ||
174 | */ | ||
175 | void __cleancache_flush_inode(struct address_space *mapping) | ||
176 | { | ||
177 | int pool_id = mapping->host->i_sb->cleancache_poolid; | ||
178 | struct cleancache_filekey key = { .u.key = { 0 } }; | ||
179 | |||
180 | if (pool_id >= 0 && cleancache_get_key(mapping->host, &key) >= 0) | ||
181 | (*cleancache_ops.flush_inode)(pool_id, key); | ||
182 | } | ||
183 | EXPORT_SYMBOL(__cleancache_flush_inode); | ||
184 | |||
185 | /* | ||
186 | * Called by any cleancache-enabled filesystem at time of unmount; | ||
187 | * note that pool_id is surrendered and may be reutrned by a subsequent | ||
188 | * cleancache_init_fs or cleancache_init_shared_fs | ||
189 | */ | ||
190 | void __cleancache_flush_fs(struct super_block *sb) | ||
191 | { | ||
192 | if (sb->cleancache_poolid >= 0) { | ||
193 | int old_poolid = sb->cleancache_poolid; | ||
194 | sb->cleancache_poolid = -1; | ||
195 | (*cleancache_ops.flush_fs)(old_poolid); | ||
196 | } | ||
197 | } | ||
198 | EXPORT_SYMBOL(__cleancache_flush_fs); | ||
199 | |||
200 | #ifdef CONFIG_SYSFS | ||
201 | |||
202 | /* see Documentation/ABI/xxx/sysfs-kernel-mm-cleancache */ | ||
203 | |||
204 | #define CLEANCACHE_SYSFS_RO(_name) \ | ||
205 | static ssize_t cleancache_##_name##_show(struct kobject *kobj, \ | ||
206 | struct kobj_attribute *attr, char *buf) \ | ||
207 | { \ | ||
208 | return sprintf(buf, "%lu\n", cleancache_##_name); \ | ||
209 | } \ | ||
210 | static struct kobj_attribute cleancache_##_name##_attr = { \ | ||
211 | .attr = { .name = __stringify(_name), .mode = 0444 }, \ | ||
212 | .show = cleancache_##_name##_show, \ | ||
213 | } | ||
214 | |||
215 | CLEANCACHE_SYSFS_RO(succ_gets); | ||
216 | CLEANCACHE_SYSFS_RO(failed_gets); | ||
217 | CLEANCACHE_SYSFS_RO(puts); | ||
218 | CLEANCACHE_SYSFS_RO(flushes); | ||
219 | |||
220 | static struct attribute *cleancache_attrs[] = { | ||
221 | &cleancache_succ_gets_attr.attr, | ||
222 | &cleancache_failed_gets_attr.attr, | ||
223 | &cleancache_puts_attr.attr, | ||
224 | &cleancache_flushes_attr.attr, | ||
225 | NULL, | ||
226 | }; | ||
227 | |||
228 | static struct attribute_group cleancache_attr_group = { | ||
229 | .attrs = cleancache_attrs, | ||
230 | .name = "cleancache", | ||
231 | }; | ||
232 | |||
233 | #endif /* CONFIG_SYSFS */ | ||
234 | |||
235 | static int __init init_cleancache(void) | ||
236 | { | ||
237 | #ifdef CONFIG_SYSFS | ||
238 | int err; | ||
239 | |||
240 | err = sysfs_create_group(mm_kobj, &cleancache_attr_group); | ||
241 | #endif /* CONFIG_SYSFS */ | ||
242 | return 0; | ||
243 | } | ||
244 | module_init(init_cleancache) | ||
diff --git a/mm/filemap.c b/mm/filemap.c index 68e782b3d3de..bcdc393b6580 100644 --- a/mm/filemap.c +++ b/mm/filemap.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ | 34 | #include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ |
35 | #include <linux/memcontrol.h> | 35 | #include <linux/memcontrol.h> |
36 | #include <linux/mm_inline.h> /* for page_is_file_cache() */ | 36 | #include <linux/mm_inline.h> /* for page_is_file_cache() */ |
37 | #include <linux/cleancache.h> | ||
37 | #include "internal.h" | 38 | #include "internal.h" |
38 | 39 | ||
39 | /* | 40 | /* |
@@ -118,6 +119,16 @@ void __delete_from_page_cache(struct page *page) | |||
118 | { | 119 | { |
119 | struct address_space *mapping = page->mapping; | 120 | struct address_space *mapping = page->mapping; |
120 | 121 | ||
122 | /* | ||
123 | * if we're uptodate, flush out into the cleancache, otherwise | ||
124 | * invalidate any existing cleancache entries. We can't leave | ||
125 | * stale data around in the cleancache once our page is gone | ||
126 | */ | ||
127 | if (PageUptodate(page) && PageMappedToDisk(page)) | ||
128 | cleancache_put_page(page); | ||
129 | else | ||
130 | cleancache_flush_page(mapping, page); | ||
131 | |||
121 | radix_tree_delete(&mapping->page_tree, page->index); | 132 | radix_tree_delete(&mapping->page_tree, page->index); |
122 | page->mapping = NULL; | 133 | page->mapping = NULL; |
123 | mapping->nrpages--; | 134 | mapping->nrpages--; |
@@ -1650,6 +1661,7 @@ int filemap_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1650 | /* No page in the page cache at all */ | 1661 | /* No page in the page cache at all */ |
1651 | do_sync_mmap_readahead(vma, ra, file, offset); | 1662 | do_sync_mmap_readahead(vma, ra, file, offset); |
1652 | count_vm_event(PGMAJFAULT); | 1663 | count_vm_event(PGMAJFAULT); |
1664 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | ||
1653 | ret = VM_FAULT_MAJOR; | 1665 | ret = VM_FAULT_MAJOR; |
1654 | retry_find: | 1666 | retry_find: |
1655 | page = find_get_page(mapping, offset); | 1667 | page = find_get_page(mapping, offset); |
diff --git a/mm/fremap.c b/mm/fremap.c index 7f4123056e06..b8e0e2d468af 100644 --- a/mm/fremap.c +++ b/mm/fremap.c | |||
@@ -224,7 +224,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, | |||
224 | /* | 224 | /* |
225 | * drop PG_Mlocked flag for over-mapped range | 225 | * drop PG_Mlocked flag for over-mapped range |
226 | */ | 226 | */ |
227 | unsigned int saved_flags = vma->vm_flags; | 227 | vm_flags_t saved_flags = vma->vm_flags; |
228 | munlock_vma_pages_range(vma, start, start + size); | 228 | munlock_vma_pages_range(vma, start, start + size); |
229 | vma->vm_flags = saved_flags; | 229 | vma->vm_flags = saved_flags; |
230 | } | 230 | } |
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5fd68b95c671..f33bb319b73f 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c | |||
@@ -2833,7 +2833,7 @@ void hugetlb_change_protection(struct vm_area_struct *vma, | |||
2833 | int hugetlb_reserve_pages(struct inode *inode, | 2833 | int hugetlb_reserve_pages(struct inode *inode, |
2834 | long from, long to, | 2834 | long from, long to, |
2835 | struct vm_area_struct *vma, | 2835 | struct vm_area_struct *vma, |
2836 | int acctflag) | 2836 | vm_flags_t vm_flags) |
2837 | { | 2837 | { |
2838 | long ret, chg; | 2838 | long ret, chg; |
2839 | struct hstate *h = hstate_inode(inode); | 2839 | struct hstate *h = hstate_inode(inode); |
@@ -2843,7 +2843,7 @@ int hugetlb_reserve_pages(struct inode *inode, | |||
2843 | * attempt will be made for VM_NORESERVE to allocate a page | 2843 | * attempt will be made for VM_NORESERVE to allocate a page |
2844 | * and filesystem quota without using reserves | 2844 | * and filesystem quota without using reserves |
2845 | */ | 2845 | */ |
2846 | if (acctflag & VM_NORESERVE) | 2846 | if (vm_flags & VM_NORESERVE) |
2847 | return 0; | 2847 | return 0; |
2848 | 2848 | ||
2849 | /* | 2849 | /* |
diff --git a/mm/memcontrol.c b/mm/memcontrol.c index d5fd3dcd3f2e..bd9052a5d3ad 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c | |||
@@ -94,6 +94,8 @@ enum mem_cgroup_events_index { | |||
94 | MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ | 94 | MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ |
95 | MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ | 95 | MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ |
96 | MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */ | 96 | MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */ |
97 | MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ | ||
98 | MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ | ||
97 | MEM_CGROUP_EVENTS_NSTATS, | 99 | MEM_CGROUP_EVENTS_NSTATS, |
98 | }; | 100 | }; |
99 | /* | 101 | /* |
@@ -231,6 +233,11 @@ struct mem_cgroup { | |||
231 | * reclaimed from. | 233 | * reclaimed from. |
232 | */ | 234 | */ |
233 | int last_scanned_child; | 235 | int last_scanned_child; |
236 | int last_scanned_node; | ||
237 | #if MAX_NUMNODES > 1 | ||
238 | nodemask_t scan_nodes; | ||
239 | unsigned long next_scan_node_update; | ||
240 | #endif | ||
234 | /* | 241 | /* |
235 | * Should the accounting and control be hierarchical, per subtree? | 242 | * Should the accounting and control be hierarchical, per subtree? |
236 | */ | 243 | */ |
@@ -585,6 +592,16 @@ static void mem_cgroup_swap_statistics(struct mem_cgroup *mem, | |||
585 | this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); | 592 | this_cpu_add(mem->stat->count[MEM_CGROUP_STAT_SWAPOUT], val); |
586 | } | 593 | } |
587 | 594 | ||
595 | void mem_cgroup_pgfault(struct mem_cgroup *mem, int val) | ||
596 | { | ||
597 | this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGFAULT], val); | ||
598 | } | ||
599 | |||
600 | void mem_cgroup_pgmajfault(struct mem_cgroup *mem, int val) | ||
601 | { | ||
602 | this_cpu_add(mem->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT], val); | ||
603 | } | ||
604 | |||
588 | static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem, | 605 | static unsigned long mem_cgroup_read_events(struct mem_cgroup *mem, |
589 | enum mem_cgroup_events_index idx) | 606 | enum mem_cgroup_events_index idx) |
590 | { | 607 | { |
@@ -624,18 +641,27 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *mem, | |||
624 | preempt_enable(); | 641 | preempt_enable(); |
625 | } | 642 | } |
626 | 643 | ||
644 | static unsigned long | ||
645 | mem_cgroup_get_zonestat_node(struct mem_cgroup *mem, int nid, enum lru_list idx) | ||
646 | { | ||
647 | struct mem_cgroup_per_zone *mz; | ||
648 | u64 total = 0; | ||
649 | int zid; | ||
650 | |||
651 | for (zid = 0; zid < MAX_NR_ZONES; zid++) { | ||
652 | mz = mem_cgroup_zoneinfo(mem, nid, zid); | ||
653 | total += MEM_CGROUP_ZSTAT(mz, idx); | ||
654 | } | ||
655 | return total; | ||
656 | } | ||
627 | static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem, | 657 | static unsigned long mem_cgroup_get_local_zonestat(struct mem_cgroup *mem, |
628 | enum lru_list idx) | 658 | enum lru_list idx) |
629 | { | 659 | { |
630 | int nid, zid; | 660 | int nid; |
631 | struct mem_cgroup_per_zone *mz; | ||
632 | u64 total = 0; | 661 | u64 total = 0; |
633 | 662 | ||
634 | for_each_online_node(nid) | 663 | for_each_online_node(nid) |
635 | for (zid = 0; zid < MAX_NR_ZONES; zid++) { | 664 | total += mem_cgroup_get_zonestat_node(mem, nid, idx); |
636 | mz = mem_cgroup_zoneinfo(mem, nid, zid); | ||
637 | total += MEM_CGROUP_ZSTAT(mz, idx); | ||
638 | } | ||
639 | return total; | 665 | return total; |
640 | } | 666 | } |
641 | 667 | ||
@@ -813,6 +839,33 @@ static inline bool mem_cgroup_is_root(struct mem_cgroup *mem) | |||
813 | return (mem == root_mem_cgroup); | 839 | return (mem == root_mem_cgroup); |
814 | } | 840 | } |
815 | 841 | ||
842 | void mem_cgroup_count_vm_event(struct mm_struct *mm, enum vm_event_item idx) | ||
843 | { | ||
844 | struct mem_cgroup *mem; | ||
845 | |||
846 | if (!mm) | ||
847 | return; | ||
848 | |||
849 | rcu_read_lock(); | ||
850 | mem = mem_cgroup_from_task(rcu_dereference(mm->owner)); | ||
851 | if (unlikely(!mem)) | ||
852 | goto out; | ||
853 | |||
854 | switch (idx) { | ||
855 | case PGMAJFAULT: | ||
856 | mem_cgroup_pgmajfault(mem, 1); | ||
857 | break; | ||
858 | case PGFAULT: | ||
859 | mem_cgroup_pgfault(mem, 1); | ||
860 | break; | ||
861 | default: | ||
862 | BUG(); | ||
863 | } | ||
864 | out: | ||
865 | rcu_read_unlock(); | ||
866 | } | ||
867 | EXPORT_SYMBOL(mem_cgroup_count_vm_event); | ||
868 | |||
816 | /* | 869 | /* |
817 | * Following LRU functions are allowed to be used without PCG_LOCK. | 870 | * Following LRU functions are allowed to be used without PCG_LOCK. |
818 | * Operations are called by routine of global LRU independently from memcg. | 871 | * Operations are called by routine of global LRU independently from memcg. |
@@ -1064,9 +1117,9 @@ int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg) | |||
1064 | return (active > inactive); | 1117 | return (active > inactive); |
1065 | } | 1118 | } |
1066 | 1119 | ||
1067 | unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, | 1120 | unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, |
1068 | struct zone *zone, | 1121 | struct zone *zone, |
1069 | enum lru_list lru) | 1122 | enum lru_list lru) |
1070 | { | 1123 | { |
1071 | int nid = zone_to_nid(zone); | 1124 | int nid = zone_to_nid(zone); |
1072 | int zid = zone_idx(zone); | 1125 | int zid = zone_idx(zone); |
@@ -1075,6 +1128,93 @@ unsigned long mem_cgroup_zone_nr_pages(struct mem_cgroup *memcg, | |||
1075 | return MEM_CGROUP_ZSTAT(mz, lru); | 1128 | return MEM_CGROUP_ZSTAT(mz, lru); |
1076 | } | 1129 | } |
1077 | 1130 | ||
1131 | #ifdef CONFIG_NUMA | ||
1132 | static unsigned long mem_cgroup_node_nr_file_lru_pages(struct mem_cgroup *memcg, | ||
1133 | int nid) | ||
1134 | { | ||
1135 | unsigned long ret; | ||
1136 | |||
1137 | ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_FILE) + | ||
1138 | mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_FILE); | ||
1139 | |||
1140 | return ret; | ||
1141 | } | ||
1142 | |||
1143 | static unsigned long mem_cgroup_nr_file_lru_pages(struct mem_cgroup *memcg) | ||
1144 | { | ||
1145 | u64 total = 0; | ||
1146 | int nid; | ||
1147 | |||
1148 | for_each_node_state(nid, N_HIGH_MEMORY) | ||
1149 | total += mem_cgroup_node_nr_file_lru_pages(memcg, nid); | ||
1150 | |||
1151 | return total; | ||
1152 | } | ||
1153 | |||
1154 | static unsigned long mem_cgroup_node_nr_anon_lru_pages(struct mem_cgroup *memcg, | ||
1155 | int nid) | ||
1156 | { | ||
1157 | unsigned long ret; | ||
1158 | |||
1159 | ret = mem_cgroup_get_zonestat_node(memcg, nid, LRU_INACTIVE_ANON) + | ||
1160 | mem_cgroup_get_zonestat_node(memcg, nid, LRU_ACTIVE_ANON); | ||
1161 | |||
1162 | return ret; | ||
1163 | } | ||
1164 | |||
1165 | static unsigned long mem_cgroup_nr_anon_lru_pages(struct mem_cgroup *memcg) | ||
1166 | { | ||
1167 | u64 total = 0; | ||
1168 | int nid; | ||
1169 | |||
1170 | for_each_node_state(nid, N_HIGH_MEMORY) | ||
1171 | total += mem_cgroup_node_nr_anon_lru_pages(memcg, nid); | ||
1172 | |||
1173 | return total; | ||
1174 | } | ||
1175 | |||
1176 | static unsigned long | ||
1177 | mem_cgroup_node_nr_unevictable_lru_pages(struct mem_cgroup *memcg, int nid) | ||
1178 | { | ||
1179 | return mem_cgroup_get_zonestat_node(memcg, nid, LRU_UNEVICTABLE); | ||
1180 | } | ||
1181 | |||
1182 | static unsigned long | ||
1183 | mem_cgroup_nr_unevictable_lru_pages(struct mem_cgroup *memcg) | ||
1184 | { | ||
1185 | u64 total = 0; | ||
1186 | int nid; | ||
1187 | |||
1188 | for_each_node_state(nid, N_HIGH_MEMORY) | ||
1189 | total += mem_cgroup_node_nr_unevictable_lru_pages(memcg, nid); | ||
1190 | |||
1191 | return total; | ||
1192 | } | ||
1193 | |||
1194 | static unsigned long mem_cgroup_node_nr_lru_pages(struct mem_cgroup *memcg, | ||
1195 | int nid) | ||
1196 | { | ||
1197 | enum lru_list l; | ||
1198 | u64 total = 0; | ||
1199 | |||
1200 | for_each_lru(l) | ||
1201 | total += mem_cgroup_get_zonestat_node(memcg, nid, l); | ||
1202 | |||
1203 | return total; | ||
1204 | } | ||
1205 | |||
1206 | static unsigned long mem_cgroup_nr_lru_pages(struct mem_cgroup *memcg) | ||
1207 | { | ||
1208 | u64 total = 0; | ||
1209 | int nid; | ||
1210 | |||
1211 | for_each_node_state(nid, N_HIGH_MEMORY) | ||
1212 | total += mem_cgroup_node_nr_lru_pages(memcg, nid); | ||
1213 | |||
1214 | return total; | ||
1215 | } | ||
1216 | #endif /* CONFIG_NUMA */ | ||
1217 | |||
1078 | struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, | 1218 | struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, |
1079 | struct zone *zone) | 1219 | struct zone *zone) |
1080 | { | 1220 | { |
@@ -1418,6 +1558,81 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem) | |||
1418 | return ret; | 1558 | return ret; |
1419 | } | 1559 | } |
1420 | 1560 | ||
1561 | #if MAX_NUMNODES > 1 | ||
1562 | |||
1563 | /* | ||
1564 | * Always updating the nodemask is not very good - even if we have an empty | ||
1565 | * list or the wrong list here, we can start from some node and traverse all | ||
1566 | * nodes based on the zonelist. So update the list loosely once per 10 secs. | ||
1567 | * | ||
1568 | */ | ||
1569 | static void mem_cgroup_may_update_nodemask(struct mem_cgroup *mem) | ||
1570 | { | ||
1571 | int nid; | ||
1572 | |||
1573 | if (time_after(mem->next_scan_node_update, jiffies)) | ||
1574 | return; | ||
1575 | |||
1576 | mem->next_scan_node_update = jiffies + 10*HZ; | ||
1577 | /* make a nodemask where this memcg uses memory from */ | ||
1578 | mem->scan_nodes = node_states[N_HIGH_MEMORY]; | ||
1579 | |||
1580 | for_each_node_mask(nid, node_states[N_HIGH_MEMORY]) { | ||
1581 | |||
1582 | if (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_FILE) || | ||
1583 | mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_FILE)) | ||
1584 | continue; | ||
1585 | |||
1586 | if (total_swap_pages && | ||
1587 | (mem_cgroup_get_zonestat_node(mem, nid, LRU_INACTIVE_ANON) || | ||
1588 | mem_cgroup_get_zonestat_node(mem, nid, LRU_ACTIVE_ANON))) | ||
1589 | continue; | ||
1590 | node_clear(nid, mem->scan_nodes); | ||
1591 | } | ||
1592 | } | ||
1593 | |||
1594 | /* | ||
1595 | * Selecting a node where we start reclaim from. Because what we need is just | ||
1596 | * reducing usage counter, start from anywhere is O,K. Considering | ||
1597 | * memory reclaim from current node, there are pros. and cons. | ||
1598 | * | ||
1599 | * Freeing memory from current node means freeing memory from a node which | ||
1600 | * we'll use or we've used. So, it may make LRU bad. And if several threads | ||
1601 | * hit limits, it will see a contention on a node. But freeing from remote | ||
1602 | * node means more costs for memory reclaim because of memory latency. | ||
1603 | * | ||
1604 | * Now, we use round-robin. Better algorithm is welcomed. | ||
1605 | */ | ||
1606 | int mem_cgroup_select_victim_node(struct mem_cgroup *mem) | ||
1607 | { | ||
1608 | int node; | ||
1609 | |||
1610 | mem_cgroup_may_update_nodemask(mem); | ||
1611 | node = mem->last_scanned_node; | ||
1612 | |||
1613 | node = next_node(node, mem->scan_nodes); | ||
1614 | if (node == MAX_NUMNODES) | ||
1615 | node = first_node(mem->scan_nodes); | ||
1616 | /* | ||
1617 | * We call this when we hit limit, not when pages are added to LRU. | ||
1618 | * No LRU may hold pages because all pages are UNEVICTABLE or | ||
1619 | * memcg is too small and all pages are not on LRU. In that case, | ||
1620 | * we use curret node. | ||
1621 | */ | ||
1622 | if (unlikely(node == MAX_NUMNODES)) | ||
1623 | node = numa_node_id(); | ||
1624 | |||
1625 | mem->last_scanned_node = node; | ||
1626 | return node; | ||
1627 | } | ||
1628 | |||
1629 | #else | ||
1630 | int mem_cgroup_select_victim_node(struct mem_cgroup *mem) | ||
1631 | { | ||
1632 | return 0; | ||
1633 | } | ||
1634 | #endif | ||
1635 | |||
1421 | /* | 1636 | /* |
1422 | * Scan the hierarchy if needed to reclaim memory. We remember the last child | 1637 | * Scan the hierarchy if needed to reclaim memory. We remember the last child |
1423 | * we reclaimed from, so that we don't end up penalizing one child extensively | 1638 | * we reclaimed from, so that we don't end up penalizing one child extensively |
@@ -1433,7 +1648,8 @@ mem_cgroup_select_victim(struct mem_cgroup *root_mem) | |||
1433 | static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | 1648 | static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, |
1434 | struct zone *zone, | 1649 | struct zone *zone, |
1435 | gfp_t gfp_mask, | 1650 | gfp_t gfp_mask, |
1436 | unsigned long reclaim_options) | 1651 | unsigned long reclaim_options, |
1652 | unsigned long *total_scanned) | ||
1437 | { | 1653 | { |
1438 | struct mem_cgroup *victim; | 1654 | struct mem_cgroup *victim; |
1439 | int ret, total = 0; | 1655 | int ret, total = 0; |
@@ -1442,6 +1658,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | |||
1442 | bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; | 1658 | bool shrink = reclaim_options & MEM_CGROUP_RECLAIM_SHRINK; |
1443 | bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; | 1659 | bool check_soft = reclaim_options & MEM_CGROUP_RECLAIM_SOFT; |
1444 | unsigned long excess; | 1660 | unsigned long excess; |
1661 | unsigned long nr_scanned; | ||
1445 | 1662 | ||
1446 | excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; | 1663 | excess = res_counter_soft_limit_excess(&root_mem->res) >> PAGE_SHIFT; |
1447 | 1664 | ||
@@ -1484,10 +1701,12 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | |||
1484 | continue; | 1701 | continue; |
1485 | } | 1702 | } |
1486 | /* we use swappiness of local cgroup */ | 1703 | /* we use swappiness of local cgroup */ |
1487 | if (check_soft) | 1704 | if (check_soft) { |
1488 | ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, | 1705 | ret = mem_cgroup_shrink_node_zone(victim, gfp_mask, |
1489 | noswap, get_swappiness(victim), zone); | 1706 | noswap, get_swappiness(victim), zone, |
1490 | else | 1707 | &nr_scanned); |
1708 | *total_scanned += nr_scanned; | ||
1709 | } else | ||
1491 | ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, | 1710 | ret = try_to_free_mem_cgroup_pages(victim, gfp_mask, |
1492 | noswap, get_swappiness(victim)); | 1711 | noswap, get_swappiness(victim)); |
1493 | css_put(&victim->css); | 1712 | css_put(&victim->css); |
@@ -1503,7 +1722,7 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem, | |||
1503 | if (!res_counter_soft_limit_excess(&root_mem->res)) | 1722 | if (!res_counter_soft_limit_excess(&root_mem->res)) |
1504 | return total; | 1723 | return total; |
1505 | } else if (mem_cgroup_margin(root_mem)) | 1724 | } else if (mem_cgroup_margin(root_mem)) |
1506 | return 1 + total; | 1725 | return total; |
1507 | } | 1726 | } |
1508 | return total; | 1727 | return total; |
1509 | } | 1728 | } |
@@ -1928,7 +2147,7 @@ static int mem_cgroup_do_charge(struct mem_cgroup *mem, gfp_t gfp_mask, | |||
1928 | return CHARGE_WOULDBLOCK; | 2147 | return CHARGE_WOULDBLOCK; |
1929 | 2148 | ||
1930 | ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, | 2149 | ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, NULL, |
1931 | gfp_mask, flags); | 2150 | gfp_mask, flags, NULL); |
1932 | if (mem_cgroup_margin(mem_over_limit) >= nr_pages) | 2151 | if (mem_cgroup_margin(mem_over_limit) >= nr_pages) |
1933 | return CHARGE_RETRY; | 2152 | return CHARGE_RETRY; |
1934 | /* | 2153 | /* |
@@ -3211,7 +3430,8 @@ static int mem_cgroup_resize_limit(struct mem_cgroup *memcg, | |||
3211 | break; | 3430 | break; |
3212 | 3431 | ||
3213 | mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, | 3432 | mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, |
3214 | MEM_CGROUP_RECLAIM_SHRINK); | 3433 | MEM_CGROUP_RECLAIM_SHRINK, |
3434 | NULL); | ||
3215 | curusage = res_counter_read_u64(&memcg->res, RES_USAGE); | 3435 | curusage = res_counter_read_u64(&memcg->res, RES_USAGE); |
3216 | /* Usage is reduced ? */ | 3436 | /* Usage is reduced ? */ |
3217 | if (curusage >= oldusage) | 3437 | if (curusage >= oldusage) |
@@ -3271,7 +3491,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, | |||
3271 | 3491 | ||
3272 | mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, | 3492 | mem_cgroup_hierarchical_reclaim(memcg, NULL, GFP_KERNEL, |
3273 | MEM_CGROUP_RECLAIM_NOSWAP | | 3493 | MEM_CGROUP_RECLAIM_NOSWAP | |
3274 | MEM_CGROUP_RECLAIM_SHRINK); | 3494 | MEM_CGROUP_RECLAIM_SHRINK, |
3495 | NULL); | ||
3275 | curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); | 3496 | curusage = res_counter_read_u64(&memcg->memsw, RES_USAGE); |
3276 | /* Usage is reduced ? */ | 3497 | /* Usage is reduced ? */ |
3277 | if (curusage >= oldusage) | 3498 | if (curusage >= oldusage) |
@@ -3285,7 +3506,8 @@ static int mem_cgroup_resize_memsw_limit(struct mem_cgroup *memcg, | |||
3285 | } | 3506 | } |
3286 | 3507 | ||
3287 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | 3508 | unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, |
3288 | gfp_t gfp_mask) | 3509 | gfp_t gfp_mask, |
3510 | unsigned long *total_scanned) | ||
3289 | { | 3511 | { |
3290 | unsigned long nr_reclaimed = 0; | 3512 | unsigned long nr_reclaimed = 0; |
3291 | struct mem_cgroup_per_zone *mz, *next_mz = NULL; | 3513 | struct mem_cgroup_per_zone *mz, *next_mz = NULL; |
@@ -3293,6 +3515,7 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
3293 | int loop = 0; | 3515 | int loop = 0; |
3294 | struct mem_cgroup_tree_per_zone *mctz; | 3516 | struct mem_cgroup_tree_per_zone *mctz; |
3295 | unsigned long long excess; | 3517 | unsigned long long excess; |
3518 | unsigned long nr_scanned; | ||
3296 | 3519 | ||
3297 | if (order > 0) | 3520 | if (order > 0) |
3298 | return 0; | 3521 | return 0; |
@@ -3311,10 +3534,13 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
3311 | if (!mz) | 3534 | if (!mz) |
3312 | break; | 3535 | break; |
3313 | 3536 | ||
3537 | nr_scanned = 0; | ||
3314 | reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone, | 3538 | reclaimed = mem_cgroup_hierarchical_reclaim(mz->mem, zone, |
3315 | gfp_mask, | 3539 | gfp_mask, |
3316 | MEM_CGROUP_RECLAIM_SOFT); | 3540 | MEM_CGROUP_RECLAIM_SOFT, |
3541 | &nr_scanned); | ||
3317 | nr_reclaimed += reclaimed; | 3542 | nr_reclaimed += reclaimed; |
3543 | *total_scanned += nr_scanned; | ||
3318 | spin_lock(&mctz->lock); | 3544 | spin_lock(&mctz->lock); |
3319 | 3545 | ||
3320 | /* | 3546 | /* |
@@ -3337,10 +3563,9 @@ unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, | |||
3337 | */ | 3563 | */ |
3338 | next_mz = | 3564 | next_mz = |
3339 | __mem_cgroup_largest_soft_limit_node(mctz); | 3565 | __mem_cgroup_largest_soft_limit_node(mctz); |
3340 | if (next_mz == mz) { | 3566 | if (next_mz == mz) |
3341 | css_put(&next_mz->mem->css); | 3567 | css_put(&next_mz->mem->css); |
3342 | next_mz = NULL; | 3568 | else /* next_mz == NULL or other memcg */ |
3343 | } else /* next_mz == NULL or other memcg */ | ||
3344 | break; | 3569 | break; |
3345 | } while (1); | 3570 | } while (1); |
3346 | } | 3571 | } |
@@ -3772,6 +3997,8 @@ enum { | |||
3772 | MCS_PGPGIN, | 3997 | MCS_PGPGIN, |
3773 | MCS_PGPGOUT, | 3998 | MCS_PGPGOUT, |
3774 | MCS_SWAP, | 3999 | MCS_SWAP, |
4000 | MCS_PGFAULT, | ||
4001 | MCS_PGMAJFAULT, | ||
3775 | MCS_INACTIVE_ANON, | 4002 | MCS_INACTIVE_ANON, |
3776 | MCS_ACTIVE_ANON, | 4003 | MCS_ACTIVE_ANON, |
3777 | MCS_INACTIVE_FILE, | 4004 | MCS_INACTIVE_FILE, |
@@ -3794,6 +4021,8 @@ struct { | |||
3794 | {"pgpgin", "total_pgpgin"}, | 4021 | {"pgpgin", "total_pgpgin"}, |
3795 | {"pgpgout", "total_pgpgout"}, | 4022 | {"pgpgout", "total_pgpgout"}, |
3796 | {"swap", "total_swap"}, | 4023 | {"swap", "total_swap"}, |
4024 | {"pgfault", "total_pgfault"}, | ||
4025 | {"pgmajfault", "total_pgmajfault"}, | ||
3797 | {"inactive_anon", "total_inactive_anon"}, | 4026 | {"inactive_anon", "total_inactive_anon"}, |
3798 | {"active_anon", "total_active_anon"}, | 4027 | {"active_anon", "total_active_anon"}, |
3799 | {"inactive_file", "total_inactive_file"}, | 4028 | {"inactive_file", "total_inactive_file"}, |
@@ -3822,6 +4051,10 @@ mem_cgroup_get_local_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) | |||
3822 | val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); | 4051 | val = mem_cgroup_read_stat(mem, MEM_CGROUP_STAT_SWAPOUT); |
3823 | s->stat[MCS_SWAP] += val * PAGE_SIZE; | 4052 | s->stat[MCS_SWAP] += val * PAGE_SIZE; |
3824 | } | 4053 | } |
4054 | val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGFAULT); | ||
4055 | s->stat[MCS_PGFAULT] += val; | ||
4056 | val = mem_cgroup_read_events(mem, MEM_CGROUP_EVENTS_PGMAJFAULT); | ||
4057 | s->stat[MCS_PGMAJFAULT] += val; | ||
3825 | 4058 | ||
3826 | /* per zone stat */ | 4059 | /* per zone stat */ |
3827 | val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON); | 4060 | val = mem_cgroup_get_local_zonestat(mem, LRU_INACTIVE_ANON); |
@@ -3845,6 +4078,51 @@ mem_cgroup_get_total_stat(struct mem_cgroup *mem, struct mcs_total_stat *s) | |||
3845 | mem_cgroup_get_local_stat(iter, s); | 4078 | mem_cgroup_get_local_stat(iter, s); |
3846 | } | 4079 | } |
3847 | 4080 | ||
4081 | #ifdef CONFIG_NUMA | ||
4082 | static int mem_control_numa_stat_show(struct seq_file *m, void *arg) | ||
4083 | { | ||
4084 | int nid; | ||
4085 | unsigned long total_nr, file_nr, anon_nr, unevictable_nr; | ||
4086 | unsigned long node_nr; | ||
4087 | struct cgroup *cont = m->private; | ||
4088 | struct mem_cgroup *mem_cont = mem_cgroup_from_cont(cont); | ||
4089 | |||
4090 | total_nr = mem_cgroup_nr_lru_pages(mem_cont); | ||
4091 | seq_printf(m, "total=%lu", total_nr); | ||
4092 | for_each_node_state(nid, N_HIGH_MEMORY) { | ||
4093 | node_nr = mem_cgroup_node_nr_lru_pages(mem_cont, nid); | ||
4094 | seq_printf(m, " N%d=%lu", nid, node_nr); | ||
4095 | } | ||
4096 | seq_putc(m, '\n'); | ||
4097 | |||
4098 | file_nr = mem_cgroup_nr_file_lru_pages(mem_cont); | ||
4099 | seq_printf(m, "file=%lu", file_nr); | ||
4100 | for_each_node_state(nid, N_HIGH_MEMORY) { | ||
4101 | node_nr = mem_cgroup_node_nr_file_lru_pages(mem_cont, nid); | ||
4102 | seq_printf(m, " N%d=%lu", nid, node_nr); | ||
4103 | } | ||
4104 | seq_putc(m, '\n'); | ||
4105 | |||
4106 | anon_nr = mem_cgroup_nr_anon_lru_pages(mem_cont); | ||
4107 | seq_printf(m, "anon=%lu", anon_nr); | ||
4108 | for_each_node_state(nid, N_HIGH_MEMORY) { | ||
4109 | node_nr = mem_cgroup_node_nr_anon_lru_pages(mem_cont, nid); | ||
4110 | seq_printf(m, " N%d=%lu", nid, node_nr); | ||
4111 | } | ||
4112 | seq_putc(m, '\n'); | ||
4113 | |||
4114 | unevictable_nr = mem_cgroup_nr_unevictable_lru_pages(mem_cont); | ||
4115 | seq_printf(m, "unevictable=%lu", unevictable_nr); | ||
4116 | for_each_node_state(nid, N_HIGH_MEMORY) { | ||
4117 | node_nr = mem_cgroup_node_nr_unevictable_lru_pages(mem_cont, | ||
4118 | nid); | ||
4119 | seq_printf(m, " N%d=%lu", nid, node_nr); | ||
4120 | } | ||
4121 | seq_putc(m, '\n'); | ||
4122 | return 0; | ||
4123 | } | ||
4124 | #endif /* CONFIG_NUMA */ | ||
4125 | |||
3848 | static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, | 4126 | static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, |
3849 | struct cgroup_map_cb *cb) | 4127 | struct cgroup_map_cb *cb) |
3850 | { | 4128 | { |
@@ -3855,6 +4133,7 @@ static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, | |||
3855 | memset(&mystat, 0, sizeof(mystat)); | 4133 | memset(&mystat, 0, sizeof(mystat)); |
3856 | mem_cgroup_get_local_stat(mem_cont, &mystat); | 4134 | mem_cgroup_get_local_stat(mem_cont, &mystat); |
3857 | 4135 | ||
4136 | |||
3858 | for (i = 0; i < NR_MCS_STAT; i++) { | 4137 | for (i = 0; i < NR_MCS_STAT; i++) { |
3859 | if (i == MCS_SWAP && !do_swap_account) | 4138 | if (i == MCS_SWAP && !do_swap_account) |
3860 | continue; | 4139 | continue; |
@@ -4278,6 +4557,22 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp, | |||
4278 | return 0; | 4557 | return 0; |
4279 | } | 4558 | } |
4280 | 4559 | ||
4560 | #ifdef CONFIG_NUMA | ||
4561 | static const struct file_operations mem_control_numa_stat_file_operations = { | ||
4562 | .read = seq_read, | ||
4563 | .llseek = seq_lseek, | ||
4564 | .release = single_release, | ||
4565 | }; | ||
4566 | |||
4567 | static int mem_control_numa_stat_open(struct inode *unused, struct file *file) | ||
4568 | { | ||
4569 | struct cgroup *cont = file->f_dentry->d_parent->d_fsdata; | ||
4570 | |||
4571 | file->f_op = &mem_control_numa_stat_file_operations; | ||
4572 | return single_open(file, mem_control_numa_stat_show, cont); | ||
4573 | } | ||
4574 | #endif /* CONFIG_NUMA */ | ||
4575 | |||
4281 | static struct cftype mem_cgroup_files[] = { | 4576 | static struct cftype mem_cgroup_files[] = { |
4282 | { | 4577 | { |
4283 | .name = "usage_in_bytes", | 4578 | .name = "usage_in_bytes", |
@@ -4341,6 +4636,12 @@ static struct cftype mem_cgroup_files[] = { | |||
4341 | .unregister_event = mem_cgroup_oom_unregister_event, | 4636 | .unregister_event = mem_cgroup_oom_unregister_event, |
4342 | .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), | 4637 | .private = MEMFILE_PRIVATE(_OOM_TYPE, OOM_CONTROL), |
4343 | }, | 4638 | }, |
4639 | #ifdef CONFIG_NUMA | ||
4640 | { | ||
4641 | .name = "numa_stat", | ||
4642 | .open = mem_control_numa_stat_open, | ||
4643 | }, | ||
4644 | #endif | ||
4344 | }; | 4645 | }; |
4345 | 4646 | ||
4346 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP | 4647 | #ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP |
@@ -4596,6 +4897,7 @@ mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont) | |||
4596 | res_counter_init(&mem->memsw, NULL); | 4897 | res_counter_init(&mem->memsw, NULL); |
4597 | } | 4898 | } |
4598 | mem->last_scanned_child = 0; | 4899 | mem->last_scanned_child = 0; |
4900 | mem->last_scanned_node = MAX_NUMNODES; | ||
4599 | INIT_LIST_HEAD(&mem->oom_notify); | 4901 | INIT_LIST_HEAD(&mem->oom_notify); |
4600 | 4902 | ||
4601 | if (parent) | 4903 | if (parent) |
@@ -4953,8 +5255,7 @@ static void mem_cgroup_clear_mc(void) | |||
4953 | 5255 | ||
4954 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | 5256 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, |
4955 | struct cgroup *cgroup, | 5257 | struct cgroup *cgroup, |
4956 | struct task_struct *p, | 5258 | struct task_struct *p) |
4957 | bool threadgroup) | ||
4958 | { | 5259 | { |
4959 | int ret = 0; | 5260 | int ret = 0; |
4960 | struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup); | 5261 | struct mem_cgroup *mem = mem_cgroup_from_cont(cgroup); |
@@ -4993,8 +5294,7 @@ static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | |||
4993 | 5294 | ||
4994 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | 5295 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, |
4995 | struct cgroup *cgroup, | 5296 | struct cgroup *cgroup, |
4996 | struct task_struct *p, | 5297 | struct task_struct *p) |
4997 | bool threadgroup) | ||
4998 | { | 5298 | { |
4999 | mem_cgroup_clear_mc(); | 5299 | mem_cgroup_clear_mc(); |
5000 | } | 5300 | } |
@@ -5112,8 +5412,7 @@ retry: | |||
5112 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | 5412 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, |
5113 | struct cgroup *cont, | 5413 | struct cgroup *cont, |
5114 | struct cgroup *old_cont, | 5414 | struct cgroup *old_cont, |
5115 | struct task_struct *p, | 5415 | struct task_struct *p) |
5116 | bool threadgroup) | ||
5117 | { | 5416 | { |
5118 | struct mm_struct *mm; | 5417 | struct mm_struct *mm; |
5119 | 5418 | ||
@@ -5131,22 +5430,19 @@ static void mem_cgroup_move_task(struct cgroup_subsys *ss, | |||
5131 | #else /* !CONFIG_MMU */ | 5430 | #else /* !CONFIG_MMU */ |
5132 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, | 5431 | static int mem_cgroup_can_attach(struct cgroup_subsys *ss, |
5133 | struct cgroup *cgroup, | 5432 | struct cgroup *cgroup, |
5134 | struct task_struct *p, | 5433 | struct task_struct *p) |
5135 | bool threadgroup) | ||
5136 | { | 5434 | { |
5137 | return 0; | 5435 | return 0; |
5138 | } | 5436 | } |
5139 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, | 5437 | static void mem_cgroup_cancel_attach(struct cgroup_subsys *ss, |
5140 | struct cgroup *cgroup, | 5438 | struct cgroup *cgroup, |
5141 | struct task_struct *p, | 5439 | struct task_struct *p) |
5142 | bool threadgroup) | ||
5143 | { | 5440 | { |
5144 | } | 5441 | } |
5145 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, | 5442 | static void mem_cgroup_move_task(struct cgroup_subsys *ss, |
5146 | struct cgroup *cont, | 5443 | struct cgroup *cont, |
5147 | struct cgroup *old_cont, | 5444 | struct cgroup *old_cont, |
5148 | struct task_struct *p, | 5445 | struct task_struct *p) |
5149 | bool threadgroup) | ||
5150 | { | 5446 | { |
5151 | } | 5447 | } |
5152 | #endif | 5448 | #endif |
diff --git a/mm/memory.c b/mm/memory.c index b73f677f0bb1..6953d3926e01 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -730,7 +730,7 @@ static void print_bad_pte(struct vm_area_struct *vma, unsigned long addr, | |||
730 | add_taint(TAINT_BAD_PAGE); | 730 | add_taint(TAINT_BAD_PAGE); |
731 | } | 731 | } |
732 | 732 | ||
733 | static inline int is_cow_mapping(unsigned int flags) | 733 | static inline int is_cow_mapping(vm_flags_t flags) |
734 | { | 734 | { |
735 | return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; | 735 | return (flags & (VM_SHARED | VM_MAYWRITE)) == VM_MAYWRITE; |
736 | } | 736 | } |
@@ -2874,6 +2874,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma, | |||
2874 | /* Had to read the page from swap area: Major fault */ | 2874 | /* Had to read the page from swap area: Major fault */ |
2875 | ret = VM_FAULT_MAJOR; | 2875 | ret = VM_FAULT_MAJOR; |
2876 | count_vm_event(PGMAJFAULT); | 2876 | count_vm_event(PGMAJFAULT); |
2877 | mem_cgroup_count_vm_event(mm, PGMAJFAULT); | ||
2877 | } else if (PageHWPoison(page)) { | 2878 | } else if (PageHWPoison(page)) { |
2878 | /* | 2879 | /* |
2879 | * hwpoisoned dirty swapcache pages are kept for killing | 2880 | * hwpoisoned dirty swapcache pages are kept for killing |
@@ -3413,6 +3414,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma, | |||
3413 | __set_current_state(TASK_RUNNING); | 3414 | __set_current_state(TASK_RUNNING); |
3414 | 3415 | ||
3415 | count_vm_event(PGFAULT); | 3416 | count_vm_event(PGFAULT); |
3417 | mem_cgroup_count_vm_event(mm, PGFAULT); | ||
3416 | 3418 | ||
3417 | /* do counter updates before entering really critical section. */ | 3419 | /* do counter updates before entering really critical section. */ |
3418 | check_sync_rss_stat(current); | 3420 | check_sync_rss_stat(current); |
diff --git a/mm/mlock.c b/mm/mlock.c index 516b2c2ddd5a..048260c4e02e 100644 --- a/mm/mlock.c +++ b/mm/mlock.c | |||
@@ -307,13 +307,13 @@ void munlock_vma_pages_range(struct vm_area_struct *vma, | |||
307 | * For vmas that pass the filters, merge/split as appropriate. | 307 | * For vmas that pass the filters, merge/split as appropriate. |
308 | */ | 308 | */ |
309 | static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, | 309 | static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev, |
310 | unsigned long start, unsigned long end, unsigned int newflags) | 310 | unsigned long start, unsigned long end, vm_flags_t newflags) |
311 | { | 311 | { |
312 | struct mm_struct *mm = vma->vm_mm; | 312 | struct mm_struct *mm = vma->vm_mm; |
313 | pgoff_t pgoff; | 313 | pgoff_t pgoff; |
314 | int nr_pages; | 314 | int nr_pages; |
315 | int ret = 0; | 315 | int ret = 0; |
316 | int lock = newflags & VM_LOCKED; | 316 | int lock = !!(newflags & VM_LOCKED); |
317 | 317 | ||
318 | if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || | 318 | if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) || |
319 | is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) | 319 | is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm)) |
@@ -385,7 +385,7 @@ static int do_mlock(unsigned long start, size_t len, int on) | |||
385 | prev = vma; | 385 | prev = vma; |
386 | 386 | ||
387 | for (nstart = start ; ; ) { | 387 | for (nstart = start ; ; ) { |
388 | unsigned int newflags; | 388 | vm_flags_t newflags; |
389 | 389 | ||
390 | /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ | 390 | /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ |
391 | 391 | ||
@@ -524,7 +524,7 @@ static int do_mlockall(int flags) | |||
524 | goto out; | 524 | goto out; |
525 | 525 | ||
526 | for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { | 526 | for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { |
527 | unsigned int newflags; | 527 | vm_flags_t newflags; |
528 | 528 | ||
529 | newflags = vma->vm_flags | VM_LOCKED; | 529 | newflags = vma->vm_flags | VM_LOCKED; |
530 | if (!(flags & MCL_CURRENT)) | 530 | if (!(flags & MCL_CURRENT)) |
@@ -960,7 +960,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, | |||
960 | { | 960 | { |
961 | struct mm_struct * mm = current->mm; | 961 | struct mm_struct * mm = current->mm; |
962 | struct inode *inode; | 962 | struct inode *inode; |
963 | unsigned int vm_flags; | 963 | vm_flags_t vm_flags; |
964 | int error; | 964 | int error; |
965 | unsigned long reqprot = prot; | 965 | unsigned long reqprot = prot; |
966 | 966 | ||
@@ -1165,7 +1165,7 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg) | |||
1165 | */ | 1165 | */ |
1166 | int vma_wants_writenotify(struct vm_area_struct *vma) | 1166 | int vma_wants_writenotify(struct vm_area_struct *vma) |
1167 | { | 1167 | { |
1168 | unsigned int vm_flags = vma->vm_flags; | 1168 | vm_flags_t vm_flags = vma->vm_flags; |
1169 | 1169 | ||
1170 | /* If it was private or non-writable, the write bit is already clear */ | 1170 | /* If it was private or non-writable, the write bit is already clear */ |
1171 | if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) | 1171 | if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) |
@@ -1193,7 +1193,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma) | |||
1193 | * We account for memory if it's a private writeable mapping, | 1193 | * We account for memory if it's a private writeable mapping, |
1194 | * not hugepages and VM_NORESERVE wasn't set. | 1194 | * not hugepages and VM_NORESERVE wasn't set. |
1195 | */ | 1195 | */ |
1196 | static inline int accountable_mapping(struct file *file, unsigned int vm_flags) | 1196 | static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags) |
1197 | { | 1197 | { |
1198 | /* | 1198 | /* |
1199 | * hugetlb has its own accounting separate from the core VM | 1199 | * hugetlb has its own accounting separate from the core VM |
@@ -1207,7 +1207,7 @@ static inline int accountable_mapping(struct file *file, unsigned int vm_flags) | |||
1207 | 1207 | ||
1208 | unsigned long mmap_region(struct file *file, unsigned long addr, | 1208 | unsigned long mmap_region(struct file *file, unsigned long addr, |
1209 | unsigned long len, unsigned long flags, | 1209 | unsigned long len, unsigned long flags, |
1210 | unsigned int vm_flags, unsigned long pgoff) | 1210 | vm_flags_t vm_flags, unsigned long pgoff) |
1211 | { | 1211 | { |
1212 | struct mm_struct *mm = current->mm; | 1212 | struct mm_struct *mm = current->mm; |
1213 | struct vm_area_struct *vma, *prev; | 1213 | struct vm_area_struct *vma, *prev; |
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2a00f17c3bf4..a4e1db3f1981 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -4323,10 +4323,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
4323 | zone->zone_pgdat = pgdat; | 4323 | zone->zone_pgdat = pgdat; |
4324 | 4324 | ||
4325 | zone_pcp_init(zone); | 4325 | zone_pcp_init(zone); |
4326 | for_each_lru(l) { | 4326 | for_each_lru(l) |
4327 | INIT_LIST_HEAD(&zone->lru[l].list); | 4327 | INIT_LIST_HEAD(&zone->lru[l].list); |
4328 | zone->reclaim_stat.nr_saved_scan[l] = 0; | ||
4329 | } | ||
4330 | zone->reclaim_stat.recent_rotated[0] = 0; | 4328 | zone->reclaim_stat.recent_rotated[0] = 0; |
4331 | zone->reclaim_stat.recent_rotated[1] = 0; | 4329 | zone->reclaim_stat.recent_rotated[1] = 0; |
4332 | zone->reclaim_stat.recent_scanned[0] = 0; | 4330 | zone->reclaim_stat.recent_scanned[0] = 0; |
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c index 2daadc322ba6..74ccff61d1be 100644 --- a/mm/page_cgroup.c +++ b/mm/page_cgroup.c | |||
@@ -130,7 +130,7 @@ struct page *lookup_cgroup_page(struct page_cgroup *pc) | |||
130 | return page; | 130 | return page; |
131 | } | 131 | } |
132 | 132 | ||
133 | static void *__init_refok alloc_page_cgroup(size_t size, int nid) | 133 | static void *__meminit alloc_page_cgroup(size_t size, int nid) |
134 | { | 134 | { |
135 | void *addr = NULL; | 135 | void *addr = NULL; |
136 | 136 | ||
@@ -162,7 +162,7 @@ static void free_page_cgroup(void *addr) | |||
162 | } | 162 | } |
163 | #endif | 163 | #endif |
164 | 164 | ||
165 | static int __init_refok init_section_page_cgroup(unsigned long pfn) | 165 | static int __meminit init_section_page_cgroup(unsigned long pfn) |
166 | { | 166 | { |
167 | struct page_cgroup *base, *pc; | 167 | struct page_cgroup *base, *pc; |
168 | struct mem_section *section; | 168 | struct mem_section *section; |
@@ -475,7 +475,7 @@ int swap_cgroup_swapon(int type, unsigned long max_pages) | |||
475 | if (!do_swap_account) | 475 | if (!do_swap_account) |
476 | return 0; | 476 | return 0; |
477 | 477 | ||
478 | length = ((max_pages/SC_PER_PAGE) + 1); | 478 | length = DIV_ROUND_UP(max_pages, SC_PER_PAGE); |
479 | array_size = length * sizeof(void *); | 479 | array_size = length * sizeof(void *); |
480 | 480 | ||
481 | array = vmalloc(array_size); | 481 | array = vmalloc(array_size); |
@@ -492,8 +492,8 @@ int swap_cgroup_swapon(int type, unsigned long max_pages) | |||
492 | /* memory shortage */ | 492 | /* memory shortage */ |
493 | ctrl->map = NULL; | 493 | ctrl->map = NULL; |
494 | ctrl->length = 0; | 494 | ctrl->length = 0; |
495 | vfree(array); | ||
496 | mutex_unlock(&swap_cgroup_mutex); | 495 | mutex_unlock(&swap_cgroup_mutex); |
496 | vfree(array); | ||
497 | goto nomem; | 497 | goto nomem; |
498 | } | 498 | } |
499 | mutex_unlock(&swap_cgroup_mutex); | 499 | mutex_unlock(&swap_cgroup_mutex); |
@@ -508,7 +508,8 @@ nomem: | |||
508 | 508 | ||
509 | void swap_cgroup_swapoff(int type) | 509 | void swap_cgroup_swapoff(int type) |
510 | { | 510 | { |
511 | int i; | 511 | struct page **map; |
512 | unsigned long i, length; | ||
512 | struct swap_cgroup_ctrl *ctrl; | 513 | struct swap_cgroup_ctrl *ctrl; |
513 | 514 | ||
514 | if (!do_swap_account) | 515 | if (!do_swap_account) |
@@ -516,17 +517,20 @@ void swap_cgroup_swapoff(int type) | |||
516 | 517 | ||
517 | mutex_lock(&swap_cgroup_mutex); | 518 | mutex_lock(&swap_cgroup_mutex); |
518 | ctrl = &swap_cgroup_ctrl[type]; | 519 | ctrl = &swap_cgroup_ctrl[type]; |
519 | if (ctrl->map) { | 520 | map = ctrl->map; |
520 | for (i = 0; i < ctrl->length; i++) { | 521 | length = ctrl->length; |
521 | struct page *page = ctrl->map[i]; | 522 | ctrl->map = NULL; |
523 | ctrl->length = 0; | ||
524 | mutex_unlock(&swap_cgroup_mutex); | ||
525 | |||
526 | if (map) { | ||
527 | for (i = 0; i < length; i++) { | ||
528 | struct page *page = map[i]; | ||
522 | if (page) | 529 | if (page) |
523 | __free_page(page); | 530 | __free_page(page); |
524 | } | 531 | } |
525 | vfree(ctrl->map); | 532 | vfree(map); |
526 | ctrl->map = NULL; | ||
527 | ctrl->length = 0; | ||
528 | } | 533 | } |
529 | mutex_unlock(&swap_cgroup_mutex); | ||
530 | } | 534 | } |
531 | 535 | ||
532 | #endif | 536 | #endif |
diff --git a/mm/shmem.c b/mm/shmem.c index 69edb45a9f28..1acfb2687bfa 100644 --- a/mm/shmem.c +++ b/mm/shmem.c | |||
@@ -1305,12 +1305,10 @@ repeat: | |||
1305 | swappage = lookup_swap_cache(swap); | 1305 | swappage = lookup_swap_cache(swap); |
1306 | if (!swappage) { | 1306 | if (!swappage) { |
1307 | shmem_swp_unmap(entry); | 1307 | shmem_swp_unmap(entry); |
1308 | spin_unlock(&info->lock); | ||
1308 | /* here we actually do the io */ | 1309 | /* here we actually do the io */ |
1309 | if (type && !(*type & VM_FAULT_MAJOR)) { | 1310 | if (type) |
1310 | __count_vm_event(PGMAJFAULT); | ||
1311 | *type |= VM_FAULT_MAJOR; | 1311 | *type |= VM_FAULT_MAJOR; |
1312 | } | ||
1313 | spin_unlock(&info->lock); | ||
1314 | swappage = shmem_swapin(swap, gfp, info, idx); | 1312 | swappage = shmem_swapin(swap, gfp, info, idx); |
1315 | if (!swappage) { | 1313 | if (!swappage) { |
1316 | spin_lock(&info->lock); | 1314 | spin_lock(&info->lock); |
@@ -1549,7 +1547,10 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf) | |||
1549 | error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); | 1547 | error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); |
1550 | if (error) | 1548 | if (error) |
1551 | return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); | 1549 | return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); |
1552 | 1550 | if (ret & VM_FAULT_MAJOR) { | |
1551 | count_vm_event(PGMAJFAULT); | ||
1552 | mem_cgroup_count_vm_event(vma->vm_mm, PGMAJFAULT); | ||
1553 | } | ||
1553 | return ret | VM_FAULT_LOCKED; | 1554 | return ret | VM_FAULT_LOCKED; |
1554 | } | 1555 | } |
1555 | 1556 | ||
diff --git a/mm/truncate.c b/mm/truncate.c index a95667529135..3a29a6180212 100644 --- a/mm/truncate.c +++ b/mm/truncate.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/task_io_accounting_ops.h> | 19 | #include <linux/task_io_accounting_ops.h> |
20 | #include <linux/buffer_head.h> /* grr. try_to_release_page, | 20 | #include <linux/buffer_head.h> /* grr. try_to_release_page, |
21 | do_invalidatepage */ | 21 | do_invalidatepage */ |
22 | #include <linux/cleancache.h> | ||
22 | #include "internal.h" | 23 | #include "internal.h" |
23 | 24 | ||
24 | 25 | ||
@@ -51,6 +52,7 @@ void do_invalidatepage(struct page *page, unsigned long offset) | |||
51 | static inline void truncate_partial_page(struct page *page, unsigned partial) | 52 | static inline void truncate_partial_page(struct page *page, unsigned partial) |
52 | { | 53 | { |
53 | zero_user_segment(page, partial, PAGE_CACHE_SIZE); | 54 | zero_user_segment(page, partial, PAGE_CACHE_SIZE); |
55 | cleancache_flush_page(page->mapping, page); | ||
54 | if (page_has_private(page)) | 56 | if (page_has_private(page)) |
55 | do_invalidatepage(page, partial); | 57 | do_invalidatepage(page, partial); |
56 | } | 58 | } |
@@ -214,6 +216,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
214 | pgoff_t next; | 216 | pgoff_t next; |
215 | int i; | 217 | int i; |
216 | 218 | ||
219 | cleancache_flush_inode(mapping); | ||
217 | if (mapping->nrpages == 0) | 220 | if (mapping->nrpages == 0) |
218 | return; | 221 | return; |
219 | 222 | ||
@@ -291,6 +294,7 @@ void truncate_inode_pages_range(struct address_space *mapping, | |||
291 | pagevec_release(&pvec); | 294 | pagevec_release(&pvec); |
292 | mem_cgroup_uncharge_end(); | 295 | mem_cgroup_uncharge_end(); |
293 | } | 296 | } |
297 | cleancache_flush_inode(mapping); | ||
294 | } | 298 | } |
295 | EXPORT_SYMBOL(truncate_inode_pages_range); | 299 | EXPORT_SYMBOL(truncate_inode_pages_range); |
296 | 300 | ||
@@ -440,6 +444,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, | |||
440 | int did_range_unmap = 0; | 444 | int did_range_unmap = 0; |
441 | int wrapped = 0; | 445 | int wrapped = 0; |
442 | 446 | ||
447 | cleancache_flush_inode(mapping); | ||
443 | pagevec_init(&pvec, 0); | 448 | pagevec_init(&pvec, 0); |
444 | next = start; | 449 | next = start; |
445 | while (next <= end && !wrapped && | 450 | while (next <= end && !wrapped && |
@@ -498,6 +503,7 @@ int invalidate_inode_pages2_range(struct address_space *mapping, | |||
498 | mem_cgroup_uncharge_end(); | 503 | mem_cgroup_uncharge_end(); |
499 | cond_resched(); | 504 | cond_resched(); |
500 | } | 505 | } |
506 | cleancache_flush_inode(mapping); | ||
501 | return ret; | 507 | return ret; |
502 | } | 508 | } |
503 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); | 509 | EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range); |
diff --git a/mm/vmalloc.c b/mm/vmalloc.c index b5ccf3158d82..1d34d75366a7 100644 --- a/mm/vmalloc.c +++ b/mm/vmalloc.c | |||
@@ -2153,10 +2153,6 @@ struct vm_struct *alloc_vm_area(size_t size) | |||
2153 | return NULL; | 2153 | return NULL; |
2154 | } | 2154 | } |
2155 | 2155 | ||
2156 | /* Make sure the pagetables are constructed in process kernel | ||
2157 | mappings */ | ||
2158 | vmalloc_sync_all(); | ||
2159 | |||
2160 | return area; | 2156 | return area; |
2161 | } | 2157 | } |
2162 | EXPORT_SYMBOL_GPL(alloc_vm_area); | 2158 | EXPORT_SYMBOL_GPL(alloc_vm_area); |
diff --git a/mm/vmscan.c b/mm/vmscan.c index 7e0116150dc7..faa0a088f9cc 100644 --- a/mm/vmscan.c +++ b/mm/vmscan.c | |||
@@ -173,7 +173,7 @@ static unsigned long zone_nr_lru_pages(struct zone *zone, | |||
173 | struct scan_control *sc, enum lru_list lru) | 173 | struct scan_control *sc, enum lru_list lru) |
174 | { | 174 | { |
175 | if (!scanning_global_lru(sc)) | 175 | if (!scanning_global_lru(sc)) |
176 | return mem_cgroup_zone_nr_pages(sc->mem_cgroup, zone, lru); | 176 | return mem_cgroup_zone_nr_lru_pages(sc->mem_cgroup, zone, lru); |
177 | 177 | ||
178 | return zone_page_state(zone, NR_LRU_BASE + lru); | 178 | return zone_page_state(zone, NR_LRU_BASE + lru); |
179 | } | 179 | } |
@@ -1718,26 +1718,6 @@ static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, | |||
1718 | } | 1718 | } |
1719 | 1719 | ||
1720 | /* | 1720 | /* |
1721 | * Smallish @nr_to_scan's are deposited in @nr_saved_scan, | ||
1722 | * until we collected @swap_cluster_max pages to scan. | ||
1723 | */ | ||
1724 | static unsigned long nr_scan_try_batch(unsigned long nr_to_scan, | ||
1725 | unsigned long *nr_saved_scan) | ||
1726 | { | ||
1727 | unsigned long nr; | ||
1728 | |||
1729 | *nr_saved_scan += nr_to_scan; | ||
1730 | nr = *nr_saved_scan; | ||
1731 | |||
1732 | if (nr >= SWAP_CLUSTER_MAX) | ||
1733 | *nr_saved_scan = 0; | ||
1734 | else | ||
1735 | nr = 0; | ||
1736 | |||
1737 | return nr; | ||
1738 | } | ||
1739 | |||
1740 | /* | ||
1741 | * Determine how aggressively the anon and file LRU lists should be | 1721 | * Determine how aggressively the anon and file LRU lists should be |
1742 | * scanned. The relative value of each set of LRU lists is determined | 1722 | * scanned. The relative value of each set of LRU lists is determined |
1743 | * by looking at the fraction of the pages scanned we did rotate back | 1723 | * by looking at the fraction of the pages scanned we did rotate back |
@@ -1755,6 +1735,22 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, | |||
1755 | u64 fraction[2], denominator; | 1735 | u64 fraction[2], denominator; |
1756 | enum lru_list l; | 1736 | enum lru_list l; |
1757 | int noswap = 0; | 1737 | int noswap = 0; |
1738 | int force_scan = 0; | ||
1739 | |||
1740 | |||
1741 | anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + | ||
1742 | zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); | ||
1743 | file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + | ||
1744 | zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); | ||
1745 | |||
1746 | if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) { | ||
1747 | /* kswapd does zone balancing and need to scan this zone */ | ||
1748 | if (scanning_global_lru(sc) && current_is_kswapd()) | ||
1749 | force_scan = 1; | ||
1750 | /* memcg may have small limit and need to avoid priority drop */ | ||
1751 | if (!scanning_global_lru(sc)) | ||
1752 | force_scan = 1; | ||
1753 | } | ||
1758 | 1754 | ||
1759 | /* If we have no swap space, do not bother scanning anon pages. */ | 1755 | /* If we have no swap space, do not bother scanning anon pages. */ |
1760 | if (!sc->may_swap || (nr_swap_pages <= 0)) { | 1756 | if (!sc->may_swap || (nr_swap_pages <= 0)) { |
@@ -1765,11 +1761,6 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc, | |||
1765 | goto out; | 1761 | goto out; |
1766 | } | 1762 | } |
1767 | 1763 | ||
1768 | anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) + | ||
1769 | zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON); | ||
1770 | file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) + | ||
1771 | zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE); | ||
1772 | |||
1773 | if (scanning_global_lru(sc)) { | 1764 | if (scanning_global_lru(sc)) { |
1774 | free = zone_page_state(zone, NR_FREE_PAGES); | 1765 | free = zone_page_state(zone, NR_FREE_PAGES); |
1775 | /* If we have very few page cache pages, | 1766 | /* If we have very few page cache pages, |
@@ -1836,8 +1827,23 @@ out: | |||
1836 | scan >>= priority; | 1827 | scan >>= priority; |
1837 | scan = div64_u64(scan * fraction[file], denominator); | 1828 | scan = div64_u64(scan * fraction[file], denominator); |
1838 | } | 1829 | } |
1839 | nr[l] = nr_scan_try_batch(scan, | 1830 | |
1840 | &reclaim_stat->nr_saved_scan[l]); | 1831 | /* |
1832 | * If zone is small or memcg is small, nr[l] can be 0. | ||
1833 | * This results no-scan on this priority and priority drop down. | ||
1834 | * For global direct reclaim, it can visit next zone and tend | ||
1835 | * not to have problems. For global kswapd, it's for zone | ||
1836 | * balancing and it need to scan a small amounts. When using | ||
1837 | * memcg, priority drop can cause big latency. So, it's better | ||
1838 | * to scan small amount. See may_noscan above. | ||
1839 | */ | ||
1840 | if (!scan && force_scan) { | ||
1841 | if (file) | ||
1842 | scan = SWAP_CLUSTER_MAX; | ||
1843 | else if (!noswap) | ||
1844 | scan = SWAP_CLUSTER_MAX; | ||
1845 | } | ||
1846 | nr[l] = scan; | ||
1841 | } | 1847 | } |
1842 | } | 1848 | } |
1843 | 1849 | ||
@@ -1977,11 +1983,14 @@ restart: | |||
1977 | * If a zone is deemed to be full of pinned pages then just give it a light | 1983 | * If a zone is deemed to be full of pinned pages then just give it a light |
1978 | * scan then give up on it. | 1984 | * scan then give up on it. |
1979 | */ | 1985 | */ |
1980 | static void shrink_zones(int priority, struct zonelist *zonelist, | 1986 | static unsigned long shrink_zones(int priority, struct zonelist *zonelist, |
1981 | struct scan_control *sc) | 1987 | struct scan_control *sc) |
1982 | { | 1988 | { |
1983 | struct zoneref *z; | 1989 | struct zoneref *z; |
1984 | struct zone *zone; | 1990 | struct zone *zone; |
1991 | unsigned long nr_soft_reclaimed; | ||
1992 | unsigned long nr_soft_scanned; | ||
1993 | unsigned long total_scanned = 0; | ||
1985 | 1994 | ||
1986 | for_each_zone_zonelist_nodemask(zone, z, zonelist, | 1995 | for_each_zone_zonelist_nodemask(zone, z, zonelist, |
1987 | gfp_zone(sc->gfp_mask), sc->nodemask) { | 1996 | gfp_zone(sc->gfp_mask), sc->nodemask) { |
@@ -1998,8 +2007,17 @@ static void shrink_zones(int priority, struct zonelist *zonelist, | |||
1998 | continue; /* Let kswapd poll it */ | 2007 | continue; /* Let kswapd poll it */ |
1999 | } | 2008 | } |
2000 | 2009 | ||
2010 | nr_soft_scanned = 0; | ||
2011 | nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, | ||
2012 | sc->order, sc->gfp_mask, | ||
2013 | &nr_soft_scanned); | ||
2014 | sc->nr_reclaimed += nr_soft_reclaimed; | ||
2015 | total_scanned += nr_soft_scanned; | ||
2016 | |||
2001 | shrink_zone(priority, zone, sc); | 2017 | shrink_zone(priority, zone, sc); |
2002 | } | 2018 | } |
2019 | |||
2020 | return total_scanned; | ||
2003 | } | 2021 | } |
2004 | 2022 | ||
2005 | static bool zone_reclaimable(struct zone *zone) | 2023 | static bool zone_reclaimable(struct zone *zone) |
@@ -2064,7 +2082,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist, | |||
2064 | sc->nr_scanned = 0; | 2082 | sc->nr_scanned = 0; |
2065 | if (!priority) | 2083 | if (!priority) |
2066 | disable_swap_token(); | 2084 | disable_swap_token(); |
2067 | shrink_zones(priority, zonelist, sc); | 2085 | total_scanned += shrink_zones(priority, zonelist, sc); |
2068 | /* | 2086 | /* |
2069 | * Don't shrink slabs when reclaiming memory from | 2087 | * Don't shrink slabs when reclaiming memory from |
2070 | * over limit cgroups | 2088 | * over limit cgroups |
@@ -2171,9 +2189,11 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order, | |||
2171 | unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | 2189 | unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, |
2172 | gfp_t gfp_mask, bool noswap, | 2190 | gfp_t gfp_mask, bool noswap, |
2173 | unsigned int swappiness, | 2191 | unsigned int swappiness, |
2174 | struct zone *zone) | 2192 | struct zone *zone, |
2193 | unsigned long *nr_scanned) | ||
2175 | { | 2194 | { |
2176 | struct scan_control sc = { | 2195 | struct scan_control sc = { |
2196 | .nr_scanned = 0, | ||
2177 | .nr_to_reclaim = SWAP_CLUSTER_MAX, | 2197 | .nr_to_reclaim = SWAP_CLUSTER_MAX, |
2178 | .may_writepage = !laptop_mode, | 2198 | .may_writepage = !laptop_mode, |
2179 | .may_unmap = 1, | 2199 | .may_unmap = 1, |
@@ -2182,6 +2202,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | |||
2182 | .order = 0, | 2202 | .order = 0, |
2183 | .mem_cgroup = mem, | 2203 | .mem_cgroup = mem, |
2184 | }; | 2204 | }; |
2205 | |||
2185 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | | 2206 | sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | |
2186 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); | 2207 | (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); |
2187 | 2208 | ||
@@ -2200,6 +2221,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, | |||
2200 | 2221 | ||
2201 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); | 2222 | trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); |
2202 | 2223 | ||
2224 | *nr_scanned = sc.nr_scanned; | ||
2203 | return sc.nr_reclaimed; | 2225 | return sc.nr_reclaimed; |
2204 | } | 2226 | } |
2205 | 2227 | ||
@@ -2210,6 +2232,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
2210 | { | 2232 | { |
2211 | struct zonelist *zonelist; | 2233 | struct zonelist *zonelist; |
2212 | unsigned long nr_reclaimed; | 2234 | unsigned long nr_reclaimed; |
2235 | int nid; | ||
2213 | struct scan_control sc = { | 2236 | struct scan_control sc = { |
2214 | .may_writepage = !laptop_mode, | 2237 | .may_writepage = !laptop_mode, |
2215 | .may_unmap = 1, | 2238 | .may_unmap = 1, |
@@ -2226,7 +2249,14 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, | |||
2226 | .gfp_mask = sc.gfp_mask, | 2249 | .gfp_mask = sc.gfp_mask, |
2227 | }; | 2250 | }; |
2228 | 2251 | ||
2229 | zonelist = NODE_DATA(numa_node_id())->node_zonelists; | 2252 | /* |
2253 | * Unlike direct reclaim via alloc_pages(), memcg's reclaim doesn't | ||
2254 | * take care of from where we get pages. So the node where we start the | ||
2255 | * scan does not need to be the current node. | ||
2256 | */ | ||
2257 | nid = mem_cgroup_select_victim_node(mem_cont); | ||
2258 | |||
2259 | zonelist = NODE_DATA(nid)->node_zonelists; | ||
2230 | 2260 | ||
2231 | trace_mm_vmscan_memcg_reclaim_begin(0, | 2261 | trace_mm_vmscan_memcg_reclaim_begin(0, |
2232 | sc.may_writepage, | 2262 | sc.may_writepage, |
@@ -2347,6 +2377,8 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order, | |||
2347 | int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ | 2377 | int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ |
2348 | unsigned long total_scanned; | 2378 | unsigned long total_scanned; |
2349 | struct reclaim_state *reclaim_state = current->reclaim_state; | 2379 | struct reclaim_state *reclaim_state = current->reclaim_state; |
2380 | unsigned long nr_soft_reclaimed; | ||
2381 | unsigned long nr_soft_scanned; | ||
2350 | struct scan_control sc = { | 2382 | struct scan_control sc = { |
2351 | .gfp_mask = GFP_KERNEL, | 2383 | .gfp_mask = GFP_KERNEL, |
2352 | .may_unmap = 1, | 2384 | .may_unmap = 1, |
@@ -2439,11 +2471,15 @@ loop_again: | |||
2439 | 2471 | ||
2440 | sc.nr_scanned = 0; | 2472 | sc.nr_scanned = 0; |
2441 | 2473 | ||
2474 | nr_soft_scanned = 0; | ||
2442 | /* | 2475 | /* |
2443 | * Call soft limit reclaim before calling shrink_zone. | 2476 | * Call soft limit reclaim before calling shrink_zone. |
2444 | * For now we ignore the return value | ||
2445 | */ | 2477 | */ |
2446 | mem_cgroup_soft_limit_reclaim(zone, order, sc.gfp_mask); | 2478 | nr_soft_reclaimed = mem_cgroup_soft_limit_reclaim(zone, |
2479 | order, sc.gfp_mask, | ||
2480 | &nr_soft_scanned); | ||
2481 | sc.nr_reclaimed += nr_soft_reclaimed; | ||
2482 | total_scanned += nr_soft_scanned; | ||
2447 | 2483 | ||
2448 | /* | 2484 | /* |
2449 | * We put equal pressure on every zone, unless | 2485 | * We put equal pressure on every zone, unless |
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index b2274d1fd605..c7a581a96894 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c | |||
@@ -46,8 +46,6 @@ int vlan_net_id __read_mostly; | |||
46 | 46 | ||
47 | const char vlan_fullname[] = "802.1Q VLAN Support"; | 47 | const char vlan_fullname[] = "802.1Q VLAN Support"; |
48 | const char vlan_version[] = DRV_VERSION; | 48 | const char vlan_version[] = DRV_VERSION; |
49 | static const char vlan_copyright[] = "Ben Greear <greearb@candelatech.com>"; | ||
50 | static const char vlan_buggyright[] = "David S. Miller <davem@redhat.com>"; | ||
51 | 49 | ||
52 | /* End of global variables definitions. */ | 50 | /* End of global variables definitions. */ |
53 | 51 | ||
@@ -673,8 +671,7 @@ static int __init vlan_proto_init(void) | |||
673 | { | 671 | { |
674 | int err; | 672 | int err; |
675 | 673 | ||
676 | pr_info("%s v%s %s\n", vlan_fullname, vlan_version, vlan_copyright); | 674 | pr_info("%s v%s\n", vlan_fullname, vlan_version); |
677 | pr_info("All bugs added by %s\n", vlan_buggyright); | ||
678 | 675 | ||
679 | err = register_pernet_subsys(&vlan_net_ops); | 676 | err = register_pernet_subsys(&vlan_net_ops); |
680 | if (err < 0) | 677 | if (err < 0) |
diff --git a/net/9p/trans_rdma.c b/net/9p/trans_rdma.c index 844a7a5607e3..159c50f1c6bf 100644 --- a/net/9p/trans_rdma.c +++ b/net/9p/trans_rdma.c | |||
@@ -589,7 +589,8 @@ rdma_create_trans(struct p9_client *client, const char *addr, char *args) | |||
589 | return -ENOMEM; | 589 | return -ENOMEM; |
590 | 590 | ||
591 | /* Create the RDMA CM ID */ | 591 | /* Create the RDMA CM ID */ |
592 | rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP); | 592 | rdma->cm_id = rdma_create_id(p9_cm_event_handler, client, RDMA_PS_TCP, |
593 | IB_QPT_RC); | ||
593 | if (IS_ERR(rdma->cm_id)) | 594 | if (IS_ERR(rdma->cm_id)) |
594 | goto error; | 595 | goto error; |
595 | 596 | ||
diff --git a/net/atm/atm_sysfs.c b/net/atm/atm_sysfs.c index f7fa67c78766..f49da5814bc3 100644 --- a/net/atm/atm_sysfs.c +++ b/net/atm/atm_sysfs.c | |||
@@ -59,6 +59,14 @@ static ssize_t show_atmaddress(struct device *cdev, | |||
59 | return pos - buf; | 59 | return pos - buf; |
60 | } | 60 | } |
61 | 61 | ||
62 | static ssize_t show_atmindex(struct device *cdev, | ||
63 | struct device_attribute *attr, char *buf) | ||
64 | { | ||
65 | struct atm_dev *adev = to_atm_dev(cdev); | ||
66 | |||
67 | return sprintf(buf, "%d\n", adev->number); | ||
68 | } | ||
69 | |||
62 | static ssize_t show_carrier(struct device *cdev, | 70 | static ssize_t show_carrier(struct device *cdev, |
63 | struct device_attribute *attr, char *buf) | 71 | struct device_attribute *attr, char *buf) |
64 | { | 72 | { |
@@ -99,6 +107,7 @@ static ssize_t show_link_rate(struct device *cdev, | |||
99 | 107 | ||
100 | static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); | 108 | static DEVICE_ATTR(address, S_IRUGO, show_address, NULL); |
101 | static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL); | 109 | static DEVICE_ATTR(atmaddress, S_IRUGO, show_atmaddress, NULL); |
110 | static DEVICE_ATTR(atmindex, S_IRUGO, show_atmindex, NULL); | ||
102 | static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL); | 111 | static DEVICE_ATTR(carrier, S_IRUGO, show_carrier, NULL); |
103 | static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); | 112 | static DEVICE_ATTR(type, S_IRUGO, show_type, NULL); |
104 | static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL); | 113 | static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL); |
@@ -106,6 +115,7 @@ static DEVICE_ATTR(link_rate, S_IRUGO, show_link_rate, NULL); | |||
106 | static struct device_attribute *atm_attrs[] = { | 115 | static struct device_attribute *atm_attrs[] = { |
107 | &dev_attr_atmaddress, | 116 | &dev_attr_atmaddress, |
108 | &dev_attr_address, | 117 | &dev_attr_address, |
118 | &dev_attr_atmindex, | ||
109 | &dev_attr_carrier, | 119 | &dev_attr_carrier, |
110 | &dev_attr_type, | 120 | &dev_attr_type, |
111 | &dev_attr_link_rate, | 121 | &dev_attr_link_rate, |
diff --git a/net/atm/lec.c b/net/atm/lec.c index 25073b6ef474..ba48daa68c1f 100644 --- a/net/atm/lec.c +++ b/net/atm/lec.c | |||
@@ -1171,7 +1171,7 @@ static int __init lane_module_init(void) | |||
1171 | #endif | 1171 | #endif |
1172 | 1172 | ||
1173 | register_atm_ioctl(&lane_ioctl_ops); | 1173 | register_atm_ioctl(&lane_ioctl_ops); |
1174 | pr_info("lec.c: " __DATE__ " " __TIME__ " initialized\n"); | 1174 | pr_info("lec.c: initialized\n"); |
1175 | return 0; | 1175 | return 0; |
1176 | } | 1176 | } |
1177 | 1177 | ||
diff --git a/net/atm/mpc.c b/net/atm/mpc.c index 644cdf071642..3ccca42e6f90 100644 --- a/net/atm/mpc.c +++ b/net/atm/mpc.c | |||
@@ -1482,7 +1482,7 @@ static __init int atm_mpoa_init(void) | |||
1482 | if (mpc_proc_init() != 0) | 1482 | if (mpc_proc_init() != 0) |
1483 | pr_info("failed to initialize /proc/mpoa\n"); | 1483 | pr_info("failed to initialize /proc/mpoa\n"); |
1484 | 1484 | ||
1485 | pr_info("mpc.c: " __DATE__ " " __TIME__ " initialized\n"); | 1485 | pr_info("mpc.c: initialized\n"); |
1486 | 1486 | ||
1487 | return 0; | 1487 | return 0; |
1488 | } | 1488 | } |
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c index 1a92b369c820..2b5ca1a0054d 100644 --- a/net/bridge/netfilter/ebtables.c +++ b/net/bridge/netfilter/ebtables.c | |||
@@ -1883,14 +1883,13 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, | |||
1883 | struct xt_target *wt; | 1883 | struct xt_target *wt; |
1884 | void *dst = NULL; | 1884 | void *dst = NULL; |
1885 | int off, pad = 0; | 1885 | int off, pad = 0; |
1886 | unsigned int size_kern, entry_offset, match_size = mwt->match_size; | 1886 | unsigned int size_kern, match_size = mwt->match_size; |
1887 | 1887 | ||
1888 | strlcpy(name, mwt->u.name, sizeof(name)); | 1888 | strlcpy(name, mwt->u.name, sizeof(name)); |
1889 | 1889 | ||
1890 | if (state->buf_kern_start) | 1890 | if (state->buf_kern_start) |
1891 | dst = state->buf_kern_start + state->buf_kern_offset; | 1891 | dst = state->buf_kern_start + state->buf_kern_offset; |
1892 | 1892 | ||
1893 | entry_offset = (unsigned char *) mwt - base; | ||
1894 | switch (compat_mwt) { | 1893 | switch (compat_mwt) { |
1895 | case EBT_COMPAT_MATCH: | 1894 | case EBT_COMPAT_MATCH: |
1896 | match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE, | 1895 | match = try_then_request_module(xt_find_match(NFPROTO_BRIDGE, |
@@ -1933,6 +1932,9 @@ static int compat_mtw_from_user(struct compat_ebt_entry_mwt *mwt, | |||
1933 | size_kern = wt->targetsize; | 1932 | size_kern = wt->targetsize; |
1934 | module_put(wt->me); | 1933 | module_put(wt->me); |
1935 | break; | 1934 | break; |
1935 | |||
1936 | default: | ||
1937 | return -EINVAL; | ||
1936 | } | 1938 | } |
1937 | 1939 | ||
1938 | state->buf_kern_offset += match_size + off; | 1940 | state->buf_kern_offset += match_size + off; |
diff --git a/net/can/proc.c b/net/can/proc.c index f4265cc9c3fb..0016f7339699 100644 --- a/net/can/proc.c +++ b/net/can/proc.c | |||
@@ -204,12 +204,11 @@ static void can_print_rcvlist(struct seq_file *m, struct hlist_head *rx_list, | |||
204 | 204 | ||
205 | hlist_for_each_entry_rcu(r, n, rx_list, list) { | 205 | hlist_for_each_entry_rcu(r, n, rx_list, list) { |
206 | char *fmt = (r->can_id & CAN_EFF_FLAG)? | 206 | char *fmt = (r->can_id & CAN_EFF_FLAG)? |
207 | " %-5s %08X %08x %08x %08x %8ld %s\n" : | 207 | " %-5s %08x %08x %pK %pK %8ld %s\n" : |
208 | " %-5s %03X %08x %08lx %08lx %8ld %s\n"; | 208 | " %-5s %03x %08x %pK %pK %8ld %s\n"; |
209 | 209 | ||
210 | seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask, | 210 | seq_printf(m, fmt, DNAME(dev), r->can_id, r->mask, |
211 | (unsigned long)r->func, (unsigned long)r->data, | 211 | r->func, r->data, r->matches, r->ident); |
212 | r->matches, r->ident); | ||
213 | } | 212 | } |
214 | } | 213 | } |
215 | 214 | ||
diff --git a/net/core/ethtool.c b/net/core/ethtool.c index 84e7304532e6..fd14116ad7f0 100644 --- a/net/core/ethtool.c +++ b/net/core/ethtool.c | |||
@@ -233,6 +233,29 @@ static int ethtool_set_feature_compat(struct net_device *dev, | |||
233 | return 1; | 233 | return 1; |
234 | } | 234 | } |
235 | 235 | ||
236 | static int ethtool_set_flags_compat(struct net_device *dev, | ||
237 | int (*legacy_set)(struct net_device *, u32), | ||
238 | struct ethtool_set_features_block *features, u32 mask) | ||
239 | { | ||
240 | u32 value; | ||
241 | |||
242 | if (!legacy_set) | ||
243 | return 0; | ||
244 | |||
245 | if (!(features[0].valid & mask)) | ||
246 | return 0; | ||
247 | |||
248 | value = dev->features & ~features[0].valid; | ||
249 | value |= features[0].requested; | ||
250 | |||
251 | features[0].valid &= ~mask; | ||
252 | |||
253 | if (legacy_set(dev, value & mask) < 0) | ||
254 | netdev_info(dev, "Legacy flags change failed\n"); | ||
255 | |||
256 | return 1; | ||
257 | } | ||
258 | |||
236 | static int ethtool_set_features_compat(struct net_device *dev, | 259 | static int ethtool_set_features_compat(struct net_device *dev, |
237 | struct ethtool_set_features_block *features) | 260 | struct ethtool_set_features_block *features) |
238 | { | 261 | { |
@@ -249,7 +272,7 @@ static int ethtool_set_features_compat(struct net_device *dev, | |||
249 | features, NETIF_F_ALL_TSO); | 272 | features, NETIF_F_ALL_TSO); |
250 | compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum, | 273 | compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_rx_csum, |
251 | features, NETIF_F_RXCSUM); | 274 | features, NETIF_F_RXCSUM); |
252 | compat |= ethtool_set_feature_compat(dev, dev->ethtool_ops->set_flags, | 275 | compat |= ethtool_set_flags_compat(dev, dev->ethtool_ops->set_flags, |
253 | features, flags_dup_features); | 276 | features, flags_dup_features); |
254 | 277 | ||
255 | return compat; | 278 | return compat; |
diff --git a/net/core/filter.c b/net/core/filter.c index 0e3622f1dcb1..36f975fa87cb 100644 --- a/net/core/filter.c +++ b/net/core/filter.c | |||
@@ -38,6 +38,7 @@ | |||
38 | #include <asm/unaligned.h> | 38 | #include <asm/unaligned.h> |
39 | #include <linux/filter.h> | 39 | #include <linux/filter.h> |
40 | #include <linux/reciprocal_div.h> | 40 | #include <linux/reciprocal_div.h> |
41 | #include <linux/ratelimit.h> | ||
41 | 42 | ||
42 | /* No hurry in this branch */ | 43 | /* No hurry in this branch */ |
43 | static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) | 44 | static void *__load_pointer(const struct sk_buff *skb, int k, unsigned int size) |
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index a829e3f60aeb..77a65f031488 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c | |||
@@ -17,6 +17,7 @@ | |||
17 | 17 | ||
18 | #include <net/ip.h> | 18 | #include <net/ip.h> |
19 | #include <net/sock.h> | 19 | #include <net/sock.h> |
20 | #include <net/net_ratelimit.h> | ||
20 | 21 | ||
21 | #ifdef CONFIG_RPS | 22 | #ifdef CONFIG_RPS |
22 | static int rps_sock_flow_sysctl(ctl_table *table, int write, | 23 | static int rps_sock_flow_sysctl(ctl_table *table, int write, |
diff --git a/net/core/utils.c b/net/core/utils.c index 2012bc797f9c..386e263f6066 100644 --- a/net/core/utils.c +++ b/net/core/utils.c | |||
@@ -27,6 +27,7 @@ | |||
27 | #include <linux/ratelimit.h> | 27 | #include <linux/ratelimit.h> |
28 | 28 | ||
29 | #include <net/sock.h> | 29 | #include <net/sock.h> |
30 | #include <net/net_ratelimit.h> | ||
30 | 31 | ||
31 | #include <asm/byteorder.h> | 32 | #include <asm/byteorder.h> |
32 | #include <asm/system.h> | 33 | #include <asm/system.h> |
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c index 61fac4cabc78..c14d88ad348d 100644 --- a/net/ipv4/inet_connection_sock.c +++ b/net/ipv4/inet_connection_sock.c | |||
@@ -33,7 +33,7 @@ EXPORT_SYMBOL(inet_csk_timer_bug_msg); | |||
33 | * This struct holds the first and last local port number. | 33 | * This struct holds the first and last local port number. |
34 | */ | 34 | */ |
35 | struct local_ports sysctl_local_ports __read_mostly = { | 35 | struct local_ports sysctl_local_ports __read_mostly = { |
36 | .lock = SEQLOCK_UNLOCKED, | 36 | .lock = __SEQLOCK_UNLOCKED(sysctl_local_ports.lock), |
37 | .range = { 32768, 61000 }, | 37 | .range = { 32768, 61000 }, |
38 | }; | 38 | }; |
39 | 39 | ||
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 9df4e635fb5f..ce616d92cc54 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c | |||
@@ -154,11 +154,9 @@ void __init inet_initpeers(void) | |||
154 | /* Called with or without local BH being disabled. */ | 154 | /* Called with or without local BH being disabled. */ |
155 | static void unlink_from_unused(struct inet_peer *p) | 155 | static void unlink_from_unused(struct inet_peer *p) |
156 | { | 156 | { |
157 | if (!list_empty(&p->unused)) { | 157 | spin_lock_bh(&unused_peers.lock); |
158 | spin_lock_bh(&unused_peers.lock); | 158 | list_del_init(&p->unused); |
159 | list_del_init(&p->unused); | 159 | spin_unlock_bh(&unused_peers.lock); |
160 | spin_unlock_bh(&unused_peers.lock); | ||
161 | } | ||
162 | } | 160 | } |
163 | 161 | ||
164 | static int addr_compare(const struct inetpeer_addr *a, | 162 | static int addr_compare(const struct inetpeer_addr *a, |
@@ -205,6 +203,20 @@ static int addr_compare(const struct inetpeer_addr *a, | |||
205 | u; \ | 203 | u; \ |
206 | }) | 204 | }) |
207 | 205 | ||
206 | static bool atomic_add_unless_return(atomic_t *ptr, int a, int u, int *newv) | ||
207 | { | ||
208 | int cur, old = atomic_read(ptr); | ||
209 | |||
210 | while (old != u) { | ||
211 | *newv = old + a; | ||
212 | cur = atomic_cmpxchg(ptr, old, *newv); | ||
213 | if (cur == old) | ||
214 | return true; | ||
215 | old = cur; | ||
216 | } | ||
217 | return false; | ||
218 | } | ||
219 | |||
208 | /* | 220 | /* |
209 | * Called with rcu_read_lock() | 221 | * Called with rcu_read_lock() |
210 | * Because we hold no lock against a writer, its quite possible we fall | 222 | * Because we hold no lock against a writer, its quite possible we fall |
@@ -213,7 +225,8 @@ static int addr_compare(const struct inetpeer_addr *a, | |||
213 | * We exit from this function if number of links exceeds PEER_MAXDEPTH | 225 | * We exit from this function if number of links exceeds PEER_MAXDEPTH |
214 | */ | 226 | */ |
215 | static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, | 227 | static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, |
216 | struct inet_peer_base *base) | 228 | struct inet_peer_base *base, |
229 | int *newrefcnt) | ||
217 | { | 230 | { |
218 | struct inet_peer *u = rcu_dereference(base->root); | 231 | struct inet_peer *u = rcu_dereference(base->root); |
219 | int count = 0; | 232 | int count = 0; |
@@ -226,7 +239,7 @@ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, | |||
226 | * distinction between an unused entry (refcnt=0) and | 239 | * distinction between an unused entry (refcnt=0) and |
227 | * a freed one. | 240 | * a freed one. |
228 | */ | 241 | */ |
229 | if (unlikely(!atomic_add_unless(&u->refcnt, 1, -1))) | 242 | if (!atomic_add_unless_return(&u->refcnt, 1, -1, newrefcnt)) |
230 | u = NULL; | 243 | u = NULL; |
231 | return u; | 244 | return u; |
232 | } | 245 | } |
@@ -465,22 +478,23 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) | |||
465 | struct inet_peer_base *base = family_to_base(daddr->family); | 478 | struct inet_peer_base *base = family_to_base(daddr->family); |
466 | struct inet_peer *p; | 479 | struct inet_peer *p; |
467 | unsigned int sequence; | 480 | unsigned int sequence; |
468 | int invalidated; | 481 | int invalidated, newrefcnt = 0; |
469 | 482 | ||
470 | /* Look up for the address quickly, lockless. | 483 | /* Look up for the address quickly, lockless. |
471 | * Because of a concurrent writer, we might not find an existing entry. | 484 | * Because of a concurrent writer, we might not find an existing entry. |
472 | */ | 485 | */ |
473 | rcu_read_lock(); | 486 | rcu_read_lock(); |
474 | sequence = read_seqbegin(&base->lock); | 487 | sequence = read_seqbegin(&base->lock); |
475 | p = lookup_rcu(daddr, base); | 488 | p = lookup_rcu(daddr, base, &newrefcnt); |
476 | invalidated = read_seqretry(&base->lock, sequence); | 489 | invalidated = read_seqretry(&base->lock, sequence); |
477 | rcu_read_unlock(); | 490 | rcu_read_unlock(); |
478 | 491 | ||
479 | if (p) { | 492 | if (p) { |
480 | /* The existing node has been found. | 493 | found: /* The existing node has been found. |
481 | * Remove the entry from unused list if it was there. | 494 | * Remove the entry from unused list if it was there. |
482 | */ | 495 | */ |
483 | unlink_from_unused(p); | 496 | if (newrefcnt == 1) |
497 | unlink_from_unused(p); | ||
484 | return p; | 498 | return p; |
485 | } | 499 | } |
486 | 500 | ||
@@ -494,11 +508,9 @@ struct inet_peer *inet_getpeer(struct inetpeer_addr *daddr, int create) | |||
494 | write_seqlock_bh(&base->lock); | 508 | write_seqlock_bh(&base->lock); |
495 | p = lookup(daddr, stack, base); | 509 | p = lookup(daddr, stack, base); |
496 | if (p != peer_avl_empty) { | 510 | if (p != peer_avl_empty) { |
497 | atomic_inc(&p->refcnt); | 511 | newrefcnt = atomic_inc_return(&p->refcnt); |
498 | write_sequnlock_bh(&base->lock); | 512 | write_sequnlock_bh(&base->lock); |
499 | /* Remove the entry from unused list if it was there. */ | 513 | goto found; |
500 | unlink_from_unused(p); | ||
501 | return p; | ||
502 | } | 514 | } |
503 | p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; | 515 | p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; |
504 | if (p) { | 516 | if (p) { |
diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c index a15c01524959..7f9124914b13 100644 --- a/net/iucv/iucv.c +++ b/net/iucv/iucv.c | |||
@@ -54,7 +54,7 @@ | |||
54 | #include <asm/atomic.h> | 54 | #include <asm/atomic.h> |
55 | #include <asm/ebcdic.h> | 55 | #include <asm/ebcdic.h> |
56 | #include <asm/io.h> | 56 | #include <asm/io.h> |
57 | #include <asm/s390_ext.h> | 57 | #include <asm/irq.h> |
58 | #include <asm/smp.h> | 58 | #include <asm/smp.h> |
59 | 59 | ||
60 | /* | 60 | /* |
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c index 72d1ac611fdc..8041befc6555 100644 --- a/net/netfilter/ipset/ip_set_core.c +++ b/net/netfilter/ipset/ip_set_core.c | |||
@@ -815,7 +815,7 @@ ip_set_flush(struct sock *ctnl, struct sk_buff *skb, | |||
815 | ip_set_id_t i; | 815 | ip_set_id_t i; |
816 | 816 | ||
817 | if (unlikely(protocol_failed(attr))) | 817 | if (unlikely(protocol_failed(attr))) |
818 | return -EPROTO; | 818 | return -IPSET_ERR_PROTOCOL; |
819 | 819 | ||
820 | if (!attr[IPSET_ATTR_SETNAME]) { | 820 | if (!attr[IPSET_ATTR_SETNAME]) { |
821 | for (i = 0; i < ip_set_max; i++) | 821 | for (i = 0; i < ip_set_max; i++) |
diff --git a/net/netfilter/ipvs/ip_vs_ftp.c b/net/netfilter/ipvs/ip_vs_ftp.c index 6b5dd6ddaae9..af63553fa332 100644 --- a/net/netfilter/ipvs/ip_vs_ftp.c +++ b/net/netfilter/ipvs/ip_vs_ftp.c | |||
@@ -411,25 +411,35 @@ static struct ip_vs_app ip_vs_ftp = { | |||
411 | static int __net_init __ip_vs_ftp_init(struct net *net) | 411 | static int __net_init __ip_vs_ftp_init(struct net *net) |
412 | { | 412 | { |
413 | int i, ret; | 413 | int i, ret; |
414 | struct ip_vs_app *app = &ip_vs_ftp; | 414 | struct ip_vs_app *app; |
415 | struct netns_ipvs *ipvs = net_ipvs(net); | ||
416 | |||
417 | app = kmemdup(&ip_vs_ftp, sizeof(struct ip_vs_app), GFP_KERNEL); | ||
418 | if (!app) | ||
419 | return -ENOMEM; | ||
420 | INIT_LIST_HEAD(&app->a_list); | ||
421 | INIT_LIST_HEAD(&app->incs_list); | ||
422 | ipvs->ftp_app = app; | ||
415 | 423 | ||
416 | ret = register_ip_vs_app(net, app); | 424 | ret = register_ip_vs_app(net, app); |
417 | if (ret) | 425 | if (ret) |
418 | return ret; | 426 | goto err_exit; |
419 | 427 | ||
420 | for (i=0; i<IP_VS_APP_MAX_PORTS; i++) { | 428 | for (i=0; i<IP_VS_APP_MAX_PORTS; i++) { |
421 | if (!ports[i]) | 429 | if (!ports[i]) |
422 | continue; | 430 | continue; |
423 | ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]); | 431 | ret = register_ip_vs_app_inc(net, app, app->protocol, ports[i]); |
424 | if (ret) | 432 | if (ret) |
425 | break; | 433 | goto err_unreg; |
426 | pr_info("%s: loaded support on port[%d] = %d\n", | 434 | pr_info("%s: loaded support on port[%d] = %d\n", |
427 | app->name, i, ports[i]); | 435 | app->name, i, ports[i]); |
428 | } | 436 | } |
437 | return 0; | ||
429 | 438 | ||
430 | if (ret) | 439 | err_unreg: |
431 | unregister_ip_vs_app(net, app); | 440 | unregister_ip_vs_app(net, app); |
432 | 441 | err_exit: | |
442 | kfree(ipvs->ftp_app); | ||
433 | return ret; | 443 | return ret; |
434 | } | 444 | } |
435 | /* | 445 | /* |
@@ -437,9 +447,10 @@ static int __net_init __ip_vs_ftp_init(struct net *net) | |||
437 | */ | 447 | */ |
438 | static void __ip_vs_ftp_exit(struct net *net) | 448 | static void __ip_vs_ftp_exit(struct net *net) |
439 | { | 449 | { |
440 | struct ip_vs_app *app = &ip_vs_ftp; | 450 | struct netns_ipvs *ipvs = net_ipvs(net); |
441 | 451 | ||
442 | unregister_ip_vs_app(net, app); | 452 | unregister_ip_vs_app(net, ipvs->ftp_app); |
453 | kfree(ipvs->ftp_app); | ||
443 | } | 454 | } |
444 | 455 | ||
445 | static struct pernet_operations ip_vs_ftp_ops = { | 456 | static struct pernet_operations ip_vs_ftp_ops = { |
diff --git a/net/rds/ib.c b/net/rds/ib.c index cce19f95c624..3b83086bcc30 100644 --- a/net/rds/ib.c +++ b/net/rds/ib.c | |||
@@ -325,7 +325,7 @@ static int rds_ib_laddr_check(__be32 addr) | |||
325 | /* Create a CMA ID and try to bind it. This catches both | 325 | /* Create a CMA ID and try to bind it. This catches both |
326 | * IB and iWARP capable NICs. | 326 | * IB and iWARP capable NICs. |
327 | */ | 327 | */ |
328 | cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); | 328 | cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); |
329 | if (IS_ERR(cm_id)) | 329 | if (IS_ERR(cm_id)) |
330 | return PTR_ERR(cm_id); | 330 | return PTR_ERR(cm_id); |
331 | 331 | ||
diff --git a/net/rds/ib_cm.c b/net/rds/ib_cm.c index ee369d201a65..fd453dd5124b 100644 --- a/net/rds/ib_cm.c +++ b/net/rds/ib_cm.c | |||
@@ -587,7 +587,7 @@ int rds_ib_conn_connect(struct rds_connection *conn) | |||
587 | /* XXX I wonder what affect the port space has */ | 587 | /* XXX I wonder what affect the port space has */ |
588 | /* delegate cm event handler to rdma_transport */ | 588 | /* delegate cm event handler to rdma_transport */ |
589 | ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, | 589 | ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, |
590 | RDMA_PS_TCP); | 590 | RDMA_PS_TCP, IB_QPT_RC); |
591 | if (IS_ERR(ic->i_cm_id)) { | 591 | if (IS_ERR(ic->i_cm_id)) { |
592 | ret = PTR_ERR(ic->i_cm_id); | 592 | ret = PTR_ERR(ic->i_cm_id); |
593 | ic->i_cm_id = NULL; | 593 | ic->i_cm_id = NULL; |
diff --git a/net/rds/iw.c b/net/rds/iw.c index 5a9676fe594f..f7474844f096 100644 --- a/net/rds/iw.c +++ b/net/rds/iw.c | |||
@@ -226,7 +226,7 @@ static int rds_iw_laddr_check(__be32 addr) | |||
226 | /* Create a CMA ID and try to bind it. This catches both | 226 | /* Create a CMA ID and try to bind it. This catches both |
227 | * IB and iWARP capable NICs. | 227 | * IB and iWARP capable NICs. |
228 | */ | 228 | */ |
229 | cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP); | 229 | cm_id = rdma_create_id(NULL, NULL, RDMA_PS_TCP, IB_QPT_RC); |
230 | if (IS_ERR(cm_id)) | 230 | if (IS_ERR(cm_id)) |
231 | return PTR_ERR(cm_id); | 231 | return PTR_ERR(cm_id); |
232 | 232 | ||
diff --git a/net/rds/iw_cm.c b/net/rds/iw_cm.c index 3a60a15d1b4a..c12db66f24c7 100644 --- a/net/rds/iw_cm.c +++ b/net/rds/iw_cm.c | |||
@@ -522,7 +522,7 @@ int rds_iw_conn_connect(struct rds_connection *conn) | |||
522 | /* XXX I wonder what affect the port space has */ | 522 | /* XXX I wonder what affect the port space has */ |
523 | /* delegate cm event handler to rdma_transport */ | 523 | /* delegate cm event handler to rdma_transport */ |
524 | ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, | 524 | ic->i_cm_id = rdma_create_id(rds_rdma_cm_event_handler, conn, |
525 | RDMA_PS_TCP); | 525 | RDMA_PS_TCP, IB_QPT_RC); |
526 | if (IS_ERR(ic->i_cm_id)) { | 526 | if (IS_ERR(ic->i_cm_id)) { |
527 | ret = PTR_ERR(ic->i_cm_id); | 527 | ret = PTR_ERR(ic->i_cm_id); |
528 | ic->i_cm_id = NULL; | 528 | ic->i_cm_id = NULL; |
diff --git a/net/rds/rdma_transport.c b/net/rds/rdma_transport.c index 4195a0539829..f8760e1b6688 100644 --- a/net/rds/rdma_transport.c +++ b/net/rds/rdma_transport.c | |||
@@ -158,7 +158,8 @@ static int rds_rdma_listen_init(void) | |||
158 | struct rdma_cm_id *cm_id; | 158 | struct rdma_cm_id *cm_id; |
159 | int ret; | 159 | int ret; |
160 | 160 | ||
161 | cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP); | 161 | cm_id = rdma_create_id(rds_rdma_cm_event_handler, NULL, RDMA_PS_TCP, |
162 | IB_QPT_RC); | ||
162 | if (IS_ERR(cm_id)) { | 163 | if (IS_ERR(cm_id)) { |
163 | ret = PTR_ERR(cm_id); | 164 | ret = PTR_ERR(cm_id); |
164 | printk(KERN_ERR "RDS/RDMA: failed to setup listener, " | 165 | printk(KERN_ERR "RDS/RDMA: failed to setup listener, " |
diff --git a/net/sunrpc/xprtrdma/svc_rdma_transport.c b/net/sunrpc/xprtrdma/svc_rdma_transport.c index 6c014dd3a20b..c3c232a88d94 100644 --- a/net/sunrpc/xprtrdma/svc_rdma_transport.c +++ b/net/sunrpc/xprtrdma/svc_rdma_transport.c | |||
@@ -695,7 +695,8 @@ static struct svc_xprt *svc_rdma_create(struct svc_serv *serv, | |||
695 | return ERR_PTR(-ENOMEM); | 695 | return ERR_PTR(-ENOMEM); |
696 | xprt = &cma_xprt->sc_xprt; | 696 | xprt = &cma_xprt->sc_xprt; |
697 | 697 | ||
698 | listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP); | 698 | listen_id = rdma_create_id(rdma_listen_handler, cma_xprt, RDMA_PS_TCP, |
699 | IB_QPT_RC); | ||
699 | if (IS_ERR(listen_id)) { | 700 | if (IS_ERR(listen_id)) { |
700 | ret = PTR_ERR(listen_id); | 701 | ret = PTR_ERR(listen_id); |
701 | dprintk("svcrdma: rdma_create_id failed = %d\n", ret); | 702 | dprintk("svcrdma: rdma_create_id failed = %d\n", ret); |
diff --git a/net/sunrpc/xprtrdma/verbs.c b/net/sunrpc/xprtrdma/verbs.c index d4297dc43dc4..80f8da344df5 100644 --- a/net/sunrpc/xprtrdma/verbs.c +++ b/net/sunrpc/xprtrdma/verbs.c | |||
@@ -387,7 +387,7 @@ rpcrdma_create_id(struct rpcrdma_xprt *xprt, | |||
387 | 387 | ||
388 | init_completion(&ia->ri_done); | 388 | init_completion(&ia->ri_done); |
389 | 389 | ||
390 | id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP); | 390 | id = rdma_create_id(rpcrdma_conn_upcall, xprt, RDMA_PS_TCP, IB_QPT_RC); |
391 | if (IS_ERR(id)) { | 391 | if (IS_ERR(id)) { |
392 | rc = PTR_ERR(id); | 392 | rc = PTR_ERR(id); |
393 | dprintk("RPC: %s: rdma_create_id() failed %i\n", | 393 | dprintk("RPC: %s: rdma_create_id() failed %i\n", |
diff --git a/scripts/selinux/README b/scripts/selinux/README index a936315ba2c8..4d020ecb7524 100644 --- a/scripts/selinux/README +++ b/scripts/selinux/README | |||
@@ -1,2 +1,2 @@ | |||
1 | Please see Documentation/SELinux.txt for information on | 1 | Please see Documentation/security/SELinux.txt for information on |
2 | installing a dummy SELinux policy. | 2 | installing a dummy SELinux policy. |
diff --git a/security/apparmor/match.c b/security/apparmor/match.c index 06d764ccbbe5..94de6b4907c8 100644 --- a/security/apparmor/match.c +++ b/security/apparmor/match.c | |||
@@ -194,7 +194,7 @@ void aa_dfa_free_kref(struct kref *kref) | |||
194 | * @flags: flags controlling what type of accept tables are acceptable | 194 | * @flags: flags controlling what type of accept tables are acceptable |
195 | * | 195 | * |
196 | * Unpack a dfa that has been serialized. To find information on the dfa | 196 | * Unpack a dfa that has been serialized. To find information on the dfa |
197 | * format look in Documentation/apparmor.txt | 197 | * format look in Documentation/security/apparmor.txt |
198 | * Assumes the dfa @blob stream has been aligned on a 8 byte boundary | 198 | * Assumes the dfa @blob stream has been aligned on a 8 byte boundary |
199 | * | 199 | * |
200 | * Returns: an unpacked dfa ready for matching or ERR_PTR on failure | 200 | * Returns: an unpacked dfa ready for matching or ERR_PTR on failure |
diff --git a/security/apparmor/policy_unpack.c b/security/apparmor/policy_unpack.c index e33aaf7e5744..d6d9a57b5652 100644 --- a/security/apparmor/policy_unpack.c +++ b/security/apparmor/policy_unpack.c | |||
@@ -12,8 +12,8 @@ | |||
12 | * published by the Free Software Foundation, version 2 of the | 12 | * published by the Free Software Foundation, version 2 of the |
13 | * License. | 13 | * License. |
14 | * | 14 | * |
15 | * AppArmor uses a serialized binary format for loading policy. | 15 | * AppArmor uses a serialized binary format for loading policy. To find |
16 | * To find policy format documentation look in Documentation/apparmor.txt | 16 | * policy format documentation look in Documentation/security/apparmor.txt |
17 | * All policy is validated before it is used. | 17 | * All policy is validated before it is used. |
18 | */ | 18 | */ |
19 | 19 | ||
diff --git a/security/device_cgroup.c b/security/device_cgroup.c index 8d9c48f13774..cd1f779fa51d 100644 --- a/security/device_cgroup.c +++ b/security/device_cgroup.c | |||
@@ -62,8 +62,7 @@ static inline struct dev_cgroup *task_devcgroup(struct task_struct *task) | |||
62 | struct cgroup_subsys devices_subsys; | 62 | struct cgroup_subsys devices_subsys; |
63 | 63 | ||
64 | static int devcgroup_can_attach(struct cgroup_subsys *ss, | 64 | static int devcgroup_can_attach(struct cgroup_subsys *ss, |
65 | struct cgroup *new_cgroup, struct task_struct *task, | 65 | struct cgroup *new_cgroup, struct task_struct *task) |
66 | bool threadgroup) | ||
67 | { | 66 | { |
68 | if (current != task && !capable(CAP_SYS_ADMIN)) | 67 | if (current != task && !capable(CAP_SYS_ADMIN)) |
69 | return -EPERM; | 68 | return -EPERM; |
diff --git a/security/keys/encrypted.c b/security/keys/encrypted.c index 69907a58a683..b1cba5bf0a5e 100644 --- a/security/keys/encrypted.c +++ b/security/keys/encrypted.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
9 | * the Free Software Foundation, version 2 of the License. | 9 | * the Free Software Foundation, version 2 of the License. |
10 | * | 10 | * |
11 | * See Documentation/keys-trusted-encrypted.txt | 11 | * See Documentation/security/keys-trusted-encrypted.txt |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c index 6c0480db8885..a3063eb3dc23 100644 --- a/security/keys/process_keys.c +++ b/security/keys/process_keys.c | |||
@@ -847,6 +847,7 @@ void key_replace_session_keyring(void) | |||
847 | new-> sgid = old-> sgid; | 847 | new-> sgid = old-> sgid; |
848 | new->fsgid = old->fsgid; | 848 | new->fsgid = old->fsgid; |
849 | new->user = get_uid(old->user); | 849 | new->user = get_uid(old->user); |
850 | new->user_ns = new->user->user_ns; | ||
850 | new->group_info = get_group_info(old->group_info); | 851 | new->group_info = get_group_info(old->group_info); |
851 | 852 | ||
852 | new->securebits = old->securebits; | 853 | new->securebits = old->securebits; |
diff --git a/security/keys/request_key.c b/security/keys/request_key.c index b18a71745901..d31862e0aa1c 100644 --- a/security/keys/request_key.c +++ b/security/keys/request_key.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | * | 10 | * |
11 | * See Documentation/keys-request-key.txt | 11 | * See Documentation/security/keys-request-key.txt |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c index f6337c9082eb..6cff37529b80 100644 --- a/security/keys/request_key_auth.c +++ b/security/keys/request_key_auth.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * as published by the Free Software Foundation; either version | 8 | * as published by the Free Software Foundation; either version |
9 | * 2 of the License, or (at your option) any later version. | 9 | * 2 of the License, or (at your option) any later version. |
10 | * | 10 | * |
11 | * See Documentation/keys-request-key.txt | 11 | * See Documentation/security/keys-request-key.txt |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/module.h> | 14 | #include <linux/module.h> |
diff --git a/security/keys/trusted.c b/security/keys/trusted.c index c99b9368368c..0c33e2ea1f3c 100644 --- a/security/keys/trusted.c +++ b/security/keys/trusted.c | |||
@@ -8,7 +8,7 @@ | |||
8 | * it under the terms of the GNU General Public License as published by | 8 | * it under the terms of the GNU General Public License as published by |
9 | * the Free Software Foundation, version 2 of the License. | 9 | * the Free Software Foundation, version 2 of the License. |
10 | * | 10 | * |
11 | * See Documentation/keys-trusted-encrypted.txt | 11 | * See Documentation/security/keys-trusted-encrypted.txt |
12 | */ | 12 | */ |
13 | 13 | ||
14 | #include <linux/uaccess.h> | 14 | #include <linux/uaccess.h> |
diff --git a/security/selinux/avc.c b/security/selinux/avc.c index fcb89cb0f223..d515b2128a4e 100644 --- a/security/selinux/avc.c +++ b/security/selinux/avc.c | |||
@@ -752,10 +752,9 @@ int avc_ss_reset(u32 seqno) | |||
752 | int avc_has_perm_noaudit(u32 ssid, u32 tsid, | 752 | int avc_has_perm_noaudit(u32 ssid, u32 tsid, |
753 | u16 tclass, u32 requested, | 753 | u16 tclass, u32 requested, |
754 | unsigned flags, | 754 | unsigned flags, |
755 | struct av_decision *in_avd) | 755 | struct av_decision *avd) |
756 | { | 756 | { |
757 | struct avc_node *node; | 757 | struct avc_node *node; |
758 | struct av_decision avd_entry, *avd; | ||
759 | int rc = 0; | 758 | int rc = 0; |
760 | u32 denied; | 759 | u32 denied; |
761 | 760 | ||
@@ -766,18 +765,11 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid, | |||
766 | node = avc_lookup(ssid, tsid, tclass); | 765 | node = avc_lookup(ssid, tsid, tclass); |
767 | if (unlikely(!node)) { | 766 | if (unlikely(!node)) { |
768 | rcu_read_unlock(); | 767 | rcu_read_unlock(); |
769 | |||
770 | if (in_avd) | ||
771 | avd = in_avd; | ||
772 | else | ||
773 | avd = &avd_entry; | ||
774 | |||
775 | security_compute_av(ssid, tsid, tclass, avd); | 768 | security_compute_av(ssid, tsid, tclass, avd); |
776 | rcu_read_lock(); | 769 | rcu_read_lock(); |
777 | node = avc_insert(ssid, tsid, tclass, avd); | 770 | node = avc_insert(ssid, tsid, tclass, avd); |
778 | } else { | 771 | } else { |
779 | if (in_avd) | 772 | memcpy(avd, &node->ae.avd, sizeof(*avd)); |
780 | memcpy(in_avd, &node->ae.avd, sizeof(*in_avd)); | ||
781 | avd = &node->ae.avd; | 773 | avd = &node->ae.avd; |
782 | } | 774 | } |
783 | 775 | ||
diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index c3e4b52699f4..973e00e34fa9 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c | |||
@@ -2217,10 +2217,11 @@ out_unlock: | |||
2217 | goto out; | 2217 | goto out; |
2218 | } | 2218 | } |
2219 | for (i = 0, j = 0; i < mynel; i++) { | 2219 | for (i = 0, j = 0; i < mynel; i++) { |
2220 | struct av_decision dummy_avd; | ||
2220 | rc = avc_has_perm_noaudit(fromsid, mysids[i], | 2221 | rc = avc_has_perm_noaudit(fromsid, mysids[i], |
2221 | SECCLASS_PROCESS, /* kernel value */ | 2222 | SECCLASS_PROCESS, /* kernel value */ |
2222 | PROCESS__TRANSITION, AVC_STRICT, | 2223 | PROCESS__TRANSITION, AVC_STRICT, |
2223 | NULL); | 2224 | &dummy_avd); |
2224 | if (!rc) | 2225 | if (!rc) |
2225 | mysids2[j++] = mysids[i]; | 2226 | mysids2[j++] = mysids[i]; |
2226 | cond_resched(); | 2227 | cond_resched(); |
diff --git a/sound/core/control.c b/sound/core/control.c index 5d98194bcad5..f8c5be464510 100644 --- a/sound/core/control.c +++ b/sound/core/control.c | |||
@@ -704,13 +704,12 @@ static int snd_ctl_elem_list(struct snd_card *card, | |||
704 | struct snd_ctl_elem_list list; | 704 | struct snd_ctl_elem_list list; |
705 | struct snd_kcontrol *kctl; | 705 | struct snd_kcontrol *kctl; |
706 | struct snd_ctl_elem_id *dst, *id; | 706 | struct snd_ctl_elem_id *dst, *id; |
707 | unsigned int offset, space, first, jidx; | 707 | unsigned int offset, space, jidx; |
708 | 708 | ||
709 | if (copy_from_user(&list, _list, sizeof(list))) | 709 | if (copy_from_user(&list, _list, sizeof(list))) |
710 | return -EFAULT; | 710 | return -EFAULT; |
711 | offset = list.offset; | 711 | offset = list.offset; |
712 | space = list.space; | 712 | space = list.space; |
713 | first = 0; | ||
714 | /* try limit maximum space */ | 713 | /* try limit maximum space */ |
715 | if (space > 16384) | 714 | if (space > 16384) |
716 | return -ENOMEM; | 715 | return -ENOMEM; |
diff --git a/sound/core/init.c b/sound/core/init.c index 30ecad41403c..2c041bb36ab3 100644 --- a/sound/core/init.c +++ b/sound/core/init.c | |||
@@ -342,7 +342,6 @@ static const struct file_operations snd_shutdown_f_ops = | |||
342 | int snd_card_disconnect(struct snd_card *card) | 342 | int snd_card_disconnect(struct snd_card *card) |
343 | { | 343 | { |
344 | struct snd_monitor_file *mfile; | 344 | struct snd_monitor_file *mfile; |
345 | struct file *file; | ||
346 | int err; | 345 | int err; |
347 | 346 | ||
348 | if (!card) | 347 | if (!card) |
@@ -366,8 +365,6 @@ int snd_card_disconnect(struct snd_card *card) | |||
366 | 365 | ||
367 | spin_lock(&card->files_lock); | 366 | spin_lock(&card->files_lock); |
368 | list_for_each_entry(mfile, &card->files_list, list) { | 367 | list_for_each_entry(mfile, &card->files_list, list) { |
369 | file = mfile->file; | ||
370 | |||
371 | /* it's critical part, use endless loop */ | 368 | /* it's critical part, use endless loop */ |
372 | /* we have no room to fail */ | 369 | /* we have no room to fail */ |
373 | mfile->disconnected_f_op = mfile->file->f_op; | 370 | mfile->disconnected_f_op = mfile->file->f_op; |
diff --git a/sound/core/oss/linear.c b/sound/core/oss/linear.c index 13b3f6f49fae..2045697f449d 100644 --- a/sound/core/oss/linear.c +++ b/sound/core/oss/linear.c | |||
@@ -90,11 +90,8 @@ static snd_pcm_sframes_t linear_transfer(struct snd_pcm_plugin *plugin, | |||
90 | struct snd_pcm_plugin_channel *dst_channels, | 90 | struct snd_pcm_plugin_channel *dst_channels, |
91 | snd_pcm_uframes_t frames) | 91 | snd_pcm_uframes_t frames) |
92 | { | 92 | { |
93 | struct linear_priv *data; | ||
94 | |||
95 | if (snd_BUG_ON(!plugin || !src_channels || !dst_channels)) | 93 | if (snd_BUG_ON(!plugin || !src_channels || !dst_channels)) |
96 | return -ENXIO; | 94 | return -ENXIO; |
97 | data = (struct linear_priv *)plugin->extra_data; | ||
98 | if (frames == 0) | 95 | if (frames == 0) |
99 | return 0; | 96 | return 0; |
100 | #ifdef CONFIG_SND_DEBUG | 97 | #ifdef CONFIG_SND_DEBUG |
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c index abfeff1611ce..f1341308beda 100644 --- a/sound/core/pcm_lib.c +++ b/sound/core/pcm_lib.c | |||
@@ -1756,8 +1756,18 @@ static int wait_for_avail(struct snd_pcm_substream *substream, | |||
1756 | wait_queue_t wait; | 1756 | wait_queue_t wait; |
1757 | int err = 0; | 1757 | int err = 0; |
1758 | snd_pcm_uframes_t avail = 0; | 1758 | snd_pcm_uframes_t avail = 0; |
1759 | long tout; | 1759 | long wait_time, tout; |
1760 | 1760 | ||
1761 | if (runtime->no_period_wakeup) | ||
1762 | wait_time = MAX_SCHEDULE_TIMEOUT; | ||
1763 | else { | ||
1764 | wait_time = 10; | ||
1765 | if (runtime->rate) { | ||
1766 | long t = runtime->period_size * 2 / runtime->rate; | ||
1767 | wait_time = max(t, wait_time); | ||
1768 | } | ||
1769 | wait_time = msecs_to_jiffies(wait_time * 1000); | ||
1770 | } | ||
1761 | init_waitqueue_entry(&wait, current); | 1771 | init_waitqueue_entry(&wait, current); |
1762 | add_wait_queue(&runtime->tsleep, &wait); | 1772 | add_wait_queue(&runtime->tsleep, &wait); |
1763 | for (;;) { | 1773 | for (;;) { |
@@ -1765,9 +1775,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream, | |||
1765 | err = -ERESTARTSYS; | 1775 | err = -ERESTARTSYS; |
1766 | break; | 1776 | break; |
1767 | } | 1777 | } |
1768 | set_current_state(TASK_INTERRUPTIBLE); | ||
1769 | snd_pcm_stream_unlock_irq(substream); | 1778 | snd_pcm_stream_unlock_irq(substream); |
1770 | tout = schedule_timeout(msecs_to_jiffies(10000)); | 1779 | tout = schedule_timeout_interruptible(wait_time); |
1771 | snd_pcm_stream_lock_irq(substream); | 1780 | snd_pcm_stream_lock_irq(substream); |
1772 | switch (runtime->status->state) { | 1781 | switch (runtime->status->state) { |
1773 | case SNDRV_PCM_STATE_SUSPENDED: | 1782 | case SNDRV_PCM_STATE_SUSPENDED: |
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 1a07750f3836..1c6be91dfb98 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c | |||
@@ -1481,11 +1481,20 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, | |||
1481 | break; /* all drained */ | 1481 | break; /* all drained */ |
1482 | init_waitqueue_entry(&wait, current); | 1482 | init_waitqueue_entry(&wait, current); |
1483 | add_wait_queue(&to_check->sleep, &wait); | 1483 | add_wait_queue(&to_check->sleep, &wait); |
1484 | set_current_state(TASK_INTERRUPTIBLE); | ||
1485 | snd_pcm_stream_unlock_irq(substream); | 1484 | snd_pcm_stream_unlock_irq(substream); |
1486 | up_read(&snd_pcm_link_rwsem); | 1485 | up_read(&snd_pcm_link_rwsem); |
1487 | snd_power_unlock(card); | 1486 | snd_power_unlock(card); |
1488 | tout = schedule_timeout(10 * HZ); | 1487 | if (runtime->no_period_wakeup) |
1488 | tout = MAX_SCHEDULE_TIMEOUT; | ||
1489 | else { | ||
1490 | tout = 10; | ||
1491 | if (runtime->rate) { | ||
1492 | long t = runtime->period_size * 2 / runtime->rate; | ||
1493 | tout = max(t, tout); | ||
1494 | } | ||
1495 | tout = msecs_to_jiffies(tout * 1000); | ||
1496 | } | ||
1497 | tout = schedule_timeout_interruptible(tout); | ||
1489 | snd_power_lock(card); | 1498 | snd_power_lock(card); |
1490 | down_read(&snd_pcm_link_rwsem); | 1499 | down_read(&snd_pcm_link_rwsem); |
1491 | snd_pcm_stream_lock_irq(substream); | 1500 | snd_pcm_stream_lock_irq(substream); |
@@ -1518,13 +1527,11 @@ static int snd_pcm_drain(struct snd_pcm_substream *substream, | |||
1518 | static int snd_pcm_drop(struct snd_pcm_substream *substream) | 1527 | static int snd_pcm_drop(struct snd_pcm_substream *substream) |
1519 | { | 1528 | { |
1520 | struct snd_pcm_runtime *runtime; | 1529 | struct snd_pcm_runtime *runtime; |
1521 | struct snd_card *card; | ||
1522 | int result = 0; | 1530 | int result = 0; |
1523 | 1531 | ||
1524 | if (PCM_RUNTIME_CHECK(substream)) | 1532 | if (PCM_RUNTIME_CHECK(substream)) |
1525 | return -ENXIO; | 1533 | return -ENXIO; |
1526 | runtime = substream->runtime; | 1534 | runtime = substream->runtime; |
1527 | card = substream->pcm->card; | ||
1528 | 1535 | ||
1529 | if (runtime->status->state == SNDRV_PCM_STATE_OPEN || | 1536 | if (runtime->status->state == SNDRV_PCM_STATE_OPEN || |
1530 | runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED || | 1537 | runtime->status->state == SNDRV_PCM_STATE_DISCONNECTED || |
@@ -2056,7 +2063,6 @@ static int snd_pcm_open_file(struct file *file, | |||
2056 | { | 2063 | { |
2057 | struct snd_pcm_file *pcm_file; | 2064 | struct snd_pcm_file *pcm_file; |
2058 | struct snd_pcm_substream *substream; | 2065 | struct snd_pcm_substream *substream; |
2059 | struct snd_pcm_str *str; | ||
2060 | int err; | 2066 | int err; |
2061 | 2067 | ||
2062 | if (rpcm_file) | 2068 | if (rpcm_file) |
@@ -2073,7 +2079,6 @@ static int snd_pcm_open_file(struct file *file, | |||
2073 | } | 2079 | } |
2074 | pcm_file->substream = substream; | 2080 | pcm_file->substream = substream; |
2075 | if (substream->ref_count == 1) { | 2081 | if (substream->ref_count == 1) { |
2076 | str = substream->pstr; | ||
2077 | substream->file = pcm_file; | 2082 | substream->file = pcm_file; |
2078 | substream->pcm_release = pcm_release_private; | 2083 | substream->pcm_release = pcm_release_private; |
2079 | } | 2084 | } |
@@ -3015,11 +3020,9 @@ static const struct vm_operations_struct snd_pcm_vm_ops_status = | |||
3015 | static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, | 3020 | static int snd_pcm_mmap_status(struct snd_pcm_substream *substream, struct file *file, |
3016 | struct vm_area_struct *area) | 3021 | struct vm_area_struct *area) |
3017 | { | 3022 | { |
3018 | struct snd_pcm_runtime *runtime; | ||
3019 | long size; | 3023 | long size; |
3020 | if (!(area->vm_flags & VM_READ)) | 3024 | if (!(area->vm_flags & VM_READ)) |
3021 | return -EINVAL; | 3025 | return -EINVAL; |
3022 | runtime = substream->runtime; | ||
3023 | size = area->vm_end - area->vm_start; | 3026 | size = area->vm_end - area->vm_start; |
3024 | if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status))) | 3027 | if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_status))) |
3025 | return -EINVAL; | 3028 | return -EINVAL; |
@@ -3054,11 +3057,9 @@ static const struct vm_operations_struct snd_pcm_vm_ops_control = | |||
3054 | static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, | 3057 | static int snd_pcm_mmap_control(struct snd_pcm_substream *substream, struct file *file, |
3055 | struct vm_area_struct *area) | 3058 | struct vm_area_struct *area) |
3056 | { | 3059 | { |
3057 | struct snd_pcm_runtime *runtime; | ||
3058 | long size; | 3060 | long size; |
3059 | if (!(area->vm_flags & VM_READ)) | 3061 | if (!(area->vm_flags & VM_READ)) |
3060 | return -EINVAL; | 3062 | return -EINVAL; |
3061 | runtime = substream->runtime; | ||
3062 | size = area->vm_end - area->vm_start; | 3063 | size = area->vm_end - area->vm_start; |
3063 | if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control))) | 3064 | if (size != PAGE_ALIGN(sizeof(struct snd_pcm_mmap_control))) |
3064 | return -EINVAL; | 3065 | return -EINVAL; |
diff --git a/sound/core/seq/seq_queue.c b/sound/core/seq/seq_queue.c index e7a8e9e4edb2..f9077361c119 100644 --- a/sound/core/seq/seq_queue.c +++ b/sound/core/seq/seq_queue.c | |||
@@ -467,13 +467,11 @@ int snd_seq_queue_timer_open(int queueid) | |||
467 | int snd_seq_queue_timer_close(int queueid) | 467 | int snd_seq_queue_timer_close(int queueid) |
468 | { | 468 | { |
469 | struct snd_seq_queue *queue; | 469 | struct snd_seq_queue *queue; |
470 | struct snd_seq_timer *tmr; | ||
471 | int result = 0; | 470 | int result = 0; |
472 | 471 | ||
473 | queue = queueptr(queueid); | 472 | queue = queueptr(queueid); |
474 | if (queue == NULL) | 473 | if (queue == NULL) |
475 | return -EINVAL; | 474 | return -EINVAL; |
476 | tmr = queue->timer; | ||
477 | snd_seq_timer_close(queue); | 475 | snd_seq_timer_close(queue); |
478 | queuefree(queue); | 476 | queuefree(queue); |
479 | return result; | 477 | return result; |
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 8edd998509f7..45b4a8d70e08 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c | |||
@@ -4719,7 +4719,7 @@ int snd_hda_parse_pin_def_config(struct hda_codec *codec, | |||
4719 | cfg->dig_out_pins[0], cfg->dig_out_pins[1]); | 4719 | cfg->dig_out_pins[0], cfg->dig_out_pins[1]); |
4720 | snd_printd(" inputs:"); | 4720 | snd_printd(" inputs:"); |
4721 | for (i = 0; i < cfg->num_inputs; i++) { | 4721 | for (i = 0; i < cfg->num_inputs; i++) { |
4722 | snd_printdd(" %s=0x%x", | 4722 | snd_printd(" %s=0x%x", |
4723 | hda_get_autocfg_input_label(codec, cfg, i), | 4723 | hda_get_autocfg_input_label(codec, cfg, i), |
4724 | cfg->inputs[i].pin); | 4724 | cfg->inputs[i].pin); |
4725 | } | 4725 | } |
diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index 74b0560289c0..b05f7be9dc1b 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c | |||
@@ -312,23 +312,6 @@ out_fail: | |||
312 | return -EINVAL; | 312 | return -EINVAL; |
313 | } | 313 | } |
314 | 314 | ||
315 | static int hdmi_eld_valid(struct hda_codec *codec, hda_nid_t nid) | ||
316 | { | ||
317 | int eldv; | ||
318 | int present; | ||
319 | |||
320 | present = snd_hda_pin_sense(codec, nid); | ||
321 | eldv = (present & AC_PINSENSE_ELDV); | ||
322 | present = (present & AC_PINSENSE_PRESENCE); | ||
323 | |||
324 | #ifdef CONFIG_SND_DEBUG_VERBOSE | ||
325 | printk(KERN_INFO "HDMI: sink_present = %d, eld_valid = %d\n", | ||
326 | !!present, !!eldv); | ||
327 | #endif | ||
328 | |||
329 | return eldv && present; | ||
330 | } | ||
331 | |||
332 | int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid) | 315 | int snd_hdmi_get_eld_size(struct hda_codec *codec, hda_nid_t nid) |
333 | { | 316 | { |
334 | return snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_HDMI_DIP_SIZE, | 317 | return snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_HDMI_DIP_SIZE, |
@@ -343,7 +326,7 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld, | |||
343 | int size; | 326 | int size; |
344 | unsigned char *buf; | 327 | unsigned char *buf; |
345 | 328 | ||
346 | if (!hdmi_eld_valid(codec, nid)) | 329 | if (!eld->eld_valid) |
347 | return -ENOENT; | 330 | return -ENOENT; |
348 | 331 | ||
349 | size = snd_hdmi_get_eld_size(codec, nid); | 332 | size = snd_hdmi_get_eld_size(codec, nid); |
@@ -477,6 +460,8 @@ static void hdmi_print_eld_info(struct snd_info_entry *entry, | |||
477 | 460 | ||
478 | snd_iprintf(buffer, "monitor_present\t\t%d\n", e->monitor_present); | 461 | snd_iprintf(buffer, "monitor_present\t\t%d\n", e->monitor_present); |
479 | snd_iprintf(buffer, "eld_valid\t\t%d\n", e->eld_valid); | 462 | snd_iprintf(buffer, "eld_valid\t\t%d\n", e->eld_valid); |
463 | if (!e->eld_valid) | ||
464 | return; | ||
480 | snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name); | 465 | snd_iprintf(buffer, "monitor_name\t\t%s\n", e->monitor_name); |
481 | snd_iprintf(buffer, "connection_type\t\t%s\n", | 466 | snd_iprintf(buffer, "connection_type\t\t%s\n", |
482 | eld_connection_type_names[e->conn_type]); | 467 | eld_connection_type_names[e->conn_type]); |
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index 43a036716d25..486f6deb3eee 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c | |||
@@ -391,6 +391,7 @@ struct azx { | |||
391 | 391 | ||
392 | /* chip type specific */ | 392 | /* chip type specific */ |
393 | int driver_type; | 393 | int driver_type; |
394 | unsigned int driver_caps; | ||
394 | int playback_streams; | 395 | int playback_streams; |
395 | int playback_index_offset; | 396 | int playback_index_offset; |
396 | int capture_streams; | 397 | int capture_streams; |
@@ -464,6 +465,34 @@ enum { | |||
464 | AZX_NUM_DRIVERS, /* keep this as last entry */ | 465 | AZX_NUM_DRIVERS, /* keep this as last entry */ |
465 | }; | 466 | }; |
466 | 467 | ||
468 | /* driver quirks (capabilities) */ | ||
469 | /* bits 0-7 are used for indicating driver type */ | ||
470 | #define AZX_DCAPS_NO_TCSEL (1 << 8) /* No Intel TCSEL bit */ | ||
471 | #define AZX_DCAPS_NO_MSI (1 << 9) /* No MSI support */ | ||
472 | #define AZX_DCAPS_ATI_SNOOP (1 << 10) /* ATI snoop enable */ | ||
473 | #define AZX_DCAPS_NVIDIA_SNOOP (1 << 11) /* Nvidia snoop enable */ | ||
474 | #define AZX_DCAPS_SCH_SNOOP (1 << 12) /* SCH/PCH snoop enable */ | ||
475 | #define AZX_DCAPS_RIRB_DELAY (1 << 13) /* Long delay in read loop */ | ||
476 | #define AZX_DCAPS_RIRB_PRE_DELAY (1 << 14) /* Put a delay before read */ | ||
477 | #define AZX_DCAPS_CTX_WORKAROUND (1 << 15) /* X-Fi workaround */ | ||
478 | #define AZX_DCAPS_POSFIX_LPIB (1 << 16) /* Use LPIB as default */ | ||
479 | #define AZX_DCAPS_POSFIX_VIA (1 << 17) /* Use VIACOMBO as default */ | ||
480 | #define AZX_DCAPS_NO_64BIT (1 << 18) /* No 64bit address */ | ||
481 | #define AZX_DCAPS_SYNC_WRITE (1 << 19) /* sync each cmd write */ | ||
482 | |||
483 | /* quirks for ATI SB / AMD Hudson */ | ||
484 | #define AZX_DCAPS_PRESET_ATI_SB \ | ||
485 | (AZX_DCAPS_ATI_SNOOP | AZX_DCAPS_NO_TCSEL | \ | ||
486 | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB) | ||
487 | |||
488 | /* quirks for ATI/AMD HDMI */ | ||
489 | #define AZX_DCAPS_PRESET_ATI_HDMI \ | ||
490 | (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB) | ||
491 | |||
492 | /* quirks for Nvidia */ | ||
493 | #define AZX_DCAPS_PRESET_NVIDIA \ | ||
494 | (AZX_DCAPS_NVIDIA_SNOOP | AZX_DCAPS_RIRB_DELAY | AZX_DCAPS_NO_MSI) | ||
495 | |||
467 | static char *driver_short_names[] __devinitdata = { | 496 | static char *driver_short_names[] __devinitdata = { |
468 | [AZX_DRIVER_ICH] = "HDA Intel", | 497 | [AZX_DRIVER_ICH] = "HDA Intel", |
469 | [AZX_DRIVER_PCH] = "HDA Intel PCH", | 498 | [AZX_DRIVER_PCH] = "HDA Intel PCH", |
@@ -566,7 +595,7 @@ static void azx_init_cmd_io(struct azx *chip) | |||
566 | /* reset the rirb hw write pointer */ | 595 | /* reset the rirb hw write pointer */ |
567 | azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST); | 596 | azx_writew(chip, RIRBWP, ICH6_RIRBWP_RST); |
568 | /* set N=1, get RIRB response interrupt for new entry */ | 597 | /* set N=1, get RIRB response interrupt for new entry */ |
569 | if (chip->driver_type == AZX_DRIVER_CTX) | 598 | if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) |
570 | azx_writew(chip, RINTCNT, 0xc0); | 599 | azx_writew(chip, RINTCNT, 0xc0); |
571 | else | 600 | else |
572 | azx_writew(chip, RINTCNT, 1); | 601 | azx_writew(chip, RINTCNT, 1); |
@@ -1056,19 +1085,24 @@ static void azx_init_pci(struct azx *chip) | |||
1056 | * codecs. | 1085 | * codecs. |
1057 | * The PCI register TCSEL is defined in the Intel manuals. | 1086 | * The PCI register TCSEL is defined in the Intel manuals. |
1058 | */ | 1087 | */ |
1059 | if (chip->driver_type != AZX_DRIVER_ATI && | 1088 | if (!(chip->driver_caps & AZX_DCAPS_NO_TCSEL)) { |
1060 | chip->driver_type != AZX_DRIVER_ATIHDMI) | 1089 | snd_printdd(SFX "Clearing TCSEL\n"); |
1061 | update_pci_byte(chip->pci, ICH6_PCIREG_TCSEL, 0x07, 0); | 1090 | update_pci_byte(chip->pci, ICH6_PCIREG_TCSEL, 0x07, 0); |
1091 | } | ||
1062 | 1092 | ||
1063 | switch (chip->driver_type) { | 1093 | /* For ATI SB450/600/700/800/900 and AMD Hudson azalia HD audio, |
1064 | case AZX_DRIVER_ATI: | 1094 | * we need to enable snoop. |
1065 | /* For ATI SB450 azalia HD audio, we need to enable snoop */ | 1095 | */ |
1096 | if (chip->driver_caps & AZX_DCAPS_ATI_SNOOP) { | ||
1097 | snd_printdd(SFX "Enabling ATI snoop\n"); | ||
1066 | update_pci_byte(chip->pci, | 1098 | update_pci_byte(chip->pci, |
1067 | ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR, | 1099 | ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR, |
1068 | 0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP); | 1100 | 0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP); |
1069 | break; | 1101 | } |
1070 | case AZX_DRIVER_NVIDIA: | 1102 | |
1071 | /* For NVIDIA HDA, enable snoop */ | 1103 | /* For NVIDIA HDA, enable snoop */ |
1104 | if (chip->driver_caps & AZX_DCAPS_NVIDIA_SNOOP) { | ||
1105 | snd_printdd(SFX "Enabling Nvidia snoop\n"); | ||
1072 | update_pci_byte(chip->pci, | 1106 | update_pci_byte(chip->pci, |
1073 | NVIDIA_HDA_TRANSREG_ADDR, | 1107 | NVIDIA_HDA_TRANSREG_ADDR, |
1074 | 0x0f, NVIDIA_HDA_ENABLE_COHBITS); | 1108 | 0x0f, NVIDIA_HDA_ENABLE_COHBITS); |
@@ -1078,9 +1112,10 @@ static void azx_init_pci(struct azx *chip) | |||
1078 | update_pci_byte(chip->pci, | 1112 | update_pci_byte(chip->pci, |
1079 | NVIDIA_HDA_OSTRM_COH, | 1113 | NVIDIA_HDA_OSTRM_COH, |
1080 | 0x01, NVIDIA_HDA_ENABLE_COHBIT); | 1114 | 0x01, NVIDIA_HDA_ENABLE_COHBIT); |
1081 | break; | 1115 | } |
1082 | case AZX_DRIVER_SCH: | 1116 | |
1083 | case AZX_DRIVER_PCH: | 1117 | /* Enable SCH/PCH snoop if needed */ |
1118 | if (chip->driver_caps & AZX_DCAPS_SCH_SNOOP) { | ||
1084 | pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop); | 1119 | pci_read_config_word(chip->pci, INTEL_SCH_HDA_DEVC, &snoop); |
1085 | if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) { | 1120 | if (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) { |
1086 | pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC, | 1121 | pci_write_config_word(chip->pci, INTEL_SCH_HDA_DEVC, |
@@ -1091,14 +1126,6 @@ static void azx_init_pci(struct azx *chip) | |||
1091 | (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) | 1126 | (snoop & INTEL_SCH_HDA_DEVC_NOSNOOP) |
1092 | ? "Failed" : "OK"); | 1127 | ? "Failed" : "OK"); |
1093 | } | 1128 | } |
1094 | break; | ||
1095 | default: | ||
1096 | /* AMD Hudson needs the similar snoop, as it seems... */ | ||
1097 | if (chip->pci->vendor == PCI_VENDOR_ID_AMD) | ||
1098 | update_pci_byte(chip->pci, | ||
1099 | ATI_SB450_HDAUDIO_MISC_CNTR2_ADDR, | ||
1100 | 0x07, ATI_SB450_HDAUDIO_ENABLE_SNOOP); | ||
1101 | break; | ||
1102 | } | 1129 | } |
1103 | } | 1130 | } |
1104 | 1131 | ||
@@ -1152,7 +1179,7 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id) | |||
1152 | status = azx_readb(chip, RIRBSTS); | 1179 | status = azx_readb(chip, RIRBSTS); |
1153 | if (status & RIRB_INT_MASK) { | 1180 | if (status & RIRB_INT_MASK) { |
1154 | if (status & RIRB_INT_RESPONSE) { | 1181 | if (status & RIRB_INT_RESPONSE) { |
1155 | if (chip->driver_type == AZX_DRIVER_CTX) | 1182 | if (chip->driver_caps & AZX_DCAPS_RIRB_PRE_DELAY) |
1156 | udelay(80); | 1183 | udelay(80); |
1157 | azx_update_rirb(chip); | 1184 | azx_update_rirb(chip); |
1158 | } | 1185 | } |
@@ -1421,8 +1448,10 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model) | |||
1421 | if (err < 0) | 1448 | if (err < 0) |
1422 | return err; | 1449 | return err; |
1423 | 1450 | ||
1424 | if (chip->driver_type == AZX_DRIVER_NVIDIA) | 1451 | if (chip->driver_caps & AZX_DCAPS_RIRB_DELAY) { |
1452 | snd_printd(SFX "Enable delay in RIRB handling\n"); | ||
1425 | chip->bus->needs_damn_long_delay = 1; | 1453 | chip->bus->needs_damn_long_delay = 1; |
1454 | } | ||
1426 | 1455 | ||
1427 | codecs = 0; | 1456 | codecs = 0; |
1428 | max_slots = azx_max_codecs[chip->driver_type]; | 1457 | max_slots = azx_max_codecs[chip->driver_type]; |
@@ -1457,9 +1486,8 @@ static int __devinit azx_codec_create(struct azx *chip, const char *model) | |||
1457 | * sequence like the pin-detection. It seems that forcing the synced | 1486 | * sequence like the pin-detection. It seems that forcing the synced |
1458 | * access works around the stall. Grrr... | 1487 | * access works around the stall. Grrr... |
1459 | */ | 1488 | */ |
1460 | if (chip->pci->vendor == PCI_VENDOR_ID_AMD || | 1489 | if (chip->driver_caps & AZX_DCAPS_SYNC_WRITE) { |
1461 | chip->pci->vendor == PCI_VENDOR_ID_ATI) { | 1490 | snd_printd(SFX "Enable sync_write for stable communication\n"); |
1462 | snd_printk(KERN_INFO SFX "Enable sync_write for AMD chipset\n"); | ||
1463 | chip->bus->sync_write = 1; | 1491 | chip->bus->sync_write = 1; |
1464 | chip->bus->allow_bus_reset = 1; | 1492 | chip->bus->allow_bus_reset = 1; |
1465 | } | 1493 | } |
@@ -1720,7 +1748,7 @@ static int azx_pcm_prepare(struct snd_pcm_substream *substream) | |||
1720 | 1748 | ||
1721 | stream_tag = azx_dev->stream_tag; | 1749 | stream_tag = azx_dev->stream_tag; |
1722 | /* CA-IBG chips need the playback stream starting from 1 */ | 1750 | /* CA-IBG chips need the playback stream starting from 1 */ |
1723 | if (chip->driver_type == AZX_DRIVER_CTX && | 1751 | if ((chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND) && |
1724 | stream_tag > chip->capture_streams) | 1752 | stream_tag > chip->capture_streams) |
1725 | stream_tag -= chip->capture_streams; | 1753 | stream_tag -= chip->capture_streams; |
1726 | return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, | 1754 | return snd_hda_codec_prepare(apcm->codec, hinfo, stream_tag, |
@@ -2365,20 +2393,14 @@ static int __devinit check_position_fix(struct azx *chip, int fix) | |||
2365 | } | 2393 | } |
2366 | 2394 | ||
2367 | /* Check VIA/ATI HD Audio Controller exist */ | 2395 | /* Check VIA/ATI HD Audio Controller exist */ |
2368 | switch (chip->driver_type) { | 2396 | if (chip->driver_caps & AZX_DCAPS_POSFIX_VIA) { |
2369 | case AZX_DRIVER_VIA: | 2397 | snd_printd(SFX "Using VIACOMBO position fix\n"); |
2370 | /* Use link position directly, avoid any transfer problem. */ | ||
2371 | return POS_FIX_VIACOMBO; | 2398 | return POS_FIX_VIACOMBO; |
2372 | case AZX_DRIVER_ATI: | 2399 | } |
2373 | /* ATI chipsets don't work well with position-buffer */ | 2400 | if (chip->driver_caps & AZX_DCAPS_POSFIX_LPIB) { |
2401 | snd_printd(SFX "Using LPIB position fix\n"); | ||
2374 | return POS_FIX_LPIB; | 2402 | return POS_FIX_LPIB; |
2375 | case AZX_DRIVER_GENERIC: | ||
2376 | /* AMD chipsets also don't work with position-buffer */ | ||
2377 | if (chip->pci->vendor == PCI_VENDOR_ID_AMD) | ||
2378 | return POS_FIX_LPIB; | ||
2379 | break; | ||
2380 | } | 2403 | } |
2381 | |||
2382 | return POS_FIX_AUTO; | 2404 | return POS_FIX_AUTO; |
2383 | } | 2405 | } |
2384 | 2406 | ||
@@ -2460,8 +2482,8 @@ static void __devinit check_msi(struct azx *chip) | |||
2460 | } | 2482 | } |
2461 | 2483 | ||
2462 | /* NVidia chipsets seem to cause troubles with MSI */ | 2484 | /* NVidia chipsets seem to cause troubles with MSI */ |
2463 | if (chip->driver_type == AZX_DRIVER_NVIDIA) { | 2485 | if (chip->driver_caps & AZX_DCAPS_NO_MSI) { |
2464 | printk(KERN_INFO "hda_intel: Disable MSI for Nvidia chipset\n"); | 2486 | printk(KERN_INFO "hda_intel: Disabling MSI\n"); |
2465 | chip->msi = 0; | 2487 | chip->msi = 0; |
2466 | } | 2488 | } |
2467 | } | 2489 | } |
@@ -2471,7 +2493,7 @@ static void __devinit check_msi(struct azx *chip) | |||
2471 | * constructor | 2493 | * constructor |
2472 | */ | 2494 | */ |
2473 | static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci, | 2495 | static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci, |
2474 | int dev, int driver_type, | 2496 | int dev, unsigned int driver_caps, |
2475 | struct azx **rchip) | 2497 | struct azx **rchip) |
2476 | { | 2498 | { |
2477 | struct azx *chip; | 2499 | struct azx *chip; |
@@ -2499,7 +2521,8 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci, | |||
2499 | chip->card = card; | 2521 | chip->card = card; |
2500 | chip->pci = pci; | 2522 | chip->pci = pci; |
2501 | chip->irq = -1; | 2523 | chip->irq = -1; |
2502 | chip->driver_type = driver_type; | 2524 | chip->driver_caps = driver_caps; |
2525 | chip->driver_type = driver_caps & 0xff; | ||
2503 | check_msi(chip); | 2526 | check_msi(chip); |
2504 | chip->dev_index = dev; | 2527 | chip->dev_index = dev; |
2505 | INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work); | 2528 | INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work); |
@@ -2563,8 +2586,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci, | |||
2563 | snd_printdd(SFX "chipset global capabilities = 0x%x\n", gcap); | 2586 | snd_printdd(SFX "chipset global capabilities = 0x%x\n", gcap); |
2564 | 2587 | ||
2565 | /* disable SB600 64bit support for safety */ | 2588 | /* disable SB600 64bit support for safety */ |
2566 | if ((chip->driver_type == AZX_DRIVER_ATI) || | 2589 | if (chip->pci->vendor == PCI_VENDOR_ID_ATI) { |
2567 | (chip->driver_type == AZX_DRIVER_ATIHDMI)) { | ||
2568 | struct pci_dev *p_smbus; | 2590 | struct pci_dev *p_smbus; |
2569 | p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, | 2591 | p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, |
2570 | PCI_DEVICE_ID_ATI_SBX00_SMBUS, | 2592 | PCI_DEVICE_ID_ATI_SBX00_SMBUS, |
@@ -2574,19 +2596,13 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci, | |||
2574 | gcap &= ~ICH6_GCAP_64OK; | 2596 | gcap &= ~ICH6_GCAP_64OK; |
2575 | pci_dev_put(p_smbus); | 2597 | pci_dev_put(p_smbus); |
2576 | } | 2598 | } |
2577 | } else { | ||
2578 | /* FIXME: not sure whether this is really needed, but | ||
2579 | * Hudson isn't stable enough for allowing everything... | ||
2580 | * let's check later again. | ||
2581 | */ | ||
2582 | if (chip->pci->vendor == PCI_VENDOR_ID_AMD) | ||
2583 | gcap &= ~ICH6_GCAP_64OK; | ||
2584 | } | 2599 | } |
2585 | 2600 | ||
2586 | /* disable 64bit DMA address for Teradici */ | 2601 | /* disable 64bit DMA address on some devices */ |
2587 | /* it does not work with device 6549:1200 subsys e4a2:040b */ | 2602 | if (chip->driver_caps & AZX_DCAPS_NO_64BIT) { |
2588 | if (chip->driver_type == AZX_DRIVER_TERA) | 2603 | snd_printd(SFX "Disabling 64bit DMA\n"); |
2589 | gcap &= ~ICH6_GCAP_64OK; | 2604 | gcap &= ~ICH6_GCAP_64OK; |
2605 | } | ||
2590 | 2606 | ||
2591 | /* allow 64bit DMA address if supported by H/W */ | 2607 | /* allow 64bit DMA address if supported by H/W */ |
2592 | if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64))) | 2608 | if ((gcap & ICH6_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64))) |
@@ -2788,38 +2804,62 @@ static void __devexit azx_remove(struct pci_dev *pci) | |||
2788 | /* PCI IDs */ | 2804 | /* PCI IDs */ |
2789 | static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { | 2805 | static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { |
2790 | /* CPT */ | 2806 | /* CPT */ |
2791 | { PCI_DEVICE(0x8086, 0x1c20), .driver_data = AZX_DRIVER_PCH }, | 2807 | { PCI_DEVICE(0x8086, 0x1c20), |
2808 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP }, | ||
2792 | /* PBG */ | 2809 | /* PBG */ |
2793 | { PCI_DEVICE(0x8086, 0x1d20), .driver_data = AZX_DRIVER_PCH }, | 2810 | { PCI_DEVICE(0x8086, 0x1d20), |
2811 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP }, | ||
2794 | /* Panther Point */ | 2812 | /* Panther Point */ |
2795 | { PCI_DEVICE(0x8086, 0x1e20), .driver_data = AZX_DRIVER_PCH }, | 2813 | { PCI_DEVICE(0x8086, 0x1e20), |
2814 | .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_SCH_SNOOP }, | ||
2796 | /* SCH */ | 2815 | /* SCH */ |
2797 | { PCI_DEVICE(0x8086, 0x811b), .driver_data = AZX_DRIVER_SCH }, | 2816 | { PCI_DEVICE(0x8086, 0x811b), |
2817 | .driver_data = AZX_DRIVER_SCH | AZX_DCAPS_SCH_SNOOP }, | ||
2798 | /* Generic Intel */ | 2818 | /* Generic Intel */ |
2799 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID), | 2819 | { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_ANY_ID), |
2800 | .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, | 2820 | .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, |
2801 | .class_mask = 0xffffff, | 2821 | .class_mask = 0xffffff, |
2802 | .driver_data = AZX_DRIVER_ICH }, | 2822 | .driver_data = AZX_DRIVER_ICH }, |
2803 | /* ATI SB 450/600 */ | 2823 | /* ATI SB 450/600/700/800/900 */ |
2804 | { PCI_DEVICE(0x1002, 0x437b), .driver_data = AZX_DRIVER_ATI }, | 2824 | { PCI_DEVICE(0x1002, 0x437b), |
2805 | { PCI_DEVICE(0x1002, 0x4383), .driver_data = AZX_DRIVER_ATI }, | 2825 | .driver_data = AZX_DRIVER_ATI | AZX_DCAPS_PRESET_ATI_SB }, |
2826 | { PCI_DEVICE(0x1002, 0x4383), | ||
2827 | .driver_data = AZX_DRIVER_ATI | AZX_DCAPS_PRESET_ATI_SB }, | ||
2828 | /* AMD Hudson */ | ||
2829 | { PCI_DEVICE(0x1022, 0x780d), | ||
2830 | .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB }, | ||
2806 | /* ATI HDMI */ | 2831 | /* ATI HDMI */ |
2807 | { PCI_DEVICE(0x1002, 0x793b), .driver_data = AZX_DRIVER_ATIHDMI }, | 2832 | { PCI_DEVICE(0x1002, 0x793b), |
2808 | { PCI_DEVICE(0x1002, 0x7919), .driver_data = AZX_DRIVER_ATIHDMI }, | 2833 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, |
2809 | { PCI_DEVICE(0x1002, 0x960f), .driver_data = AZX_DRIVER_ATIHDMI }, | 2834 | { PCI_DEVICE(0x1002, 0x7919), |
2810 | { PCI_DEVICE(0x1002, 0x970f), .driver_data = AZX_DRIVER_ATIHDMI }, | 2835 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, |
2811 | { PCI_DEVICE(0x1002, 0xaa00), .driver_data = AZX_DRIVER_ATIHDMI }, | 2836 | { PCI_DEVICE(0x1002, 0x960f), |
2812 | { PCI_DEVICE(0x1002, 0xaa08), .driver_data = AZX_DRIVER_ATIHDMI }, | 2837 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, |
2813 | { PCI_DEVICE(0x1002, 0xaa10), .driver_data = AZX_DRIVER_ATIHDMI }, | 2838 | { PCI_DEVICE(0x1002, 0x970f), |
2814 | { PCI_DEVICE(0x1002, 0xaa18), .driver_data = AZX_DRIVER_ATIHDMI }, | 2839 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, |
2815 | { PCI_DEVICE(0x1002, 0xaa20), .driver_data = AZX_DRIVER_ATIHDMI }, | 2840 | { PCI_DEVICE(0x1002, 0xaa00), |
2816 | { PCI_DEVICE(0x1002, 0xaa28), .driver_data = AZX_DRIVER_ATIHDMI }, | 2841 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, |
2817 | { PCI_DEVICE(0x1002, 0xaa30), .driver_data = AZX_DRIVER_ATIHDMI }, | 2842 | { PCI_DEVICE(0x1002, 0xaa08), |
2818 | { PCI_DEVICE(0x1002, 0xaa38), .driver_data = AZX_DRIVER_ATIHDMI }, | 2843 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, |
2819 | { PCI_DEVICE(0x1002, 0xaa40), .driver_data = AZX_DRIVER_ATIHDMI }, | 2844 | { PCI_DEVICE(0x1002, 0xaa10), |
2820 | { PCI_DEVICE(0x1002, 0xaa48), .driver_data = AZX_DRIVER_ATIHDMI }, | 2845 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, |
2846 | { PCI_DEVICE(0x1002, 0xaa18), | ||
2847 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, | ||
2848 | { PCI_DEVICE(0x1002, 0xaa20), | ||
2849 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, | ||
2850 | { PCI_DEVICE(0x1002, 0xaa28), | ||
2851 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, | ||
2852 | { PCI_DEVICE(0x1002, 0xaa30), | ||
2853 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, | ||
2854 | { PCI_DEVICE(0x1002, 0xaa38), | ||
2855 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, | ||
2856 | { PCI_DEVICE(0x1002, 0xaa40), | ||
2857 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, | ||
2858 | { PCI_DEVICE(0x1002, 0xaa48), | ||
2859 | .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, | ||
2821 | /* VIA VT8251/VT8237A */ | 2860 | /* VIA VT8251/VT8237A */ |
2822 | { PCI_DEVICE(0x1106, 0x3288), .driver_data = AZX_DRIVER_VIA }, | 2861 | { PCI_DEVICE(0x1106, 0x3288), |
2862 | .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, | ||
2823 | /* SIS966 */ | 2863 | /* SIS966 */ |
2824 | { PCI_DEVICE(0x1039, 0x7502), .driver_data = AZX_DRIVER_SIS }, | 2864 | { PCI_DEVICE(0x1039, 0x7502), .driver_data = AZX_DRIVER_SIS }, |
2825 | /* ULI M5461 */ | 2865 | /* ULI M5461 */ |
@@ -2828,9 +2868,10 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { | |||
2828 | { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), | 2868 | { PCI_DEVICE(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID), |
2829 | .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, | 2869 | .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, |
2830 | .class_mask = 0xffffff, | 2870 | .class_mask = 0xffffff, |
2831 | .driver_data = AZX_DRIVER_NVIDIA }, | 2871 | .driver_data = AZX_DRIVER_NVIDIA | AZX_DCAPS_PRESET_NVIDIA }, |
2832 | /* Teradici */ | 2872 | /* Teradici */ |
2833 | { PCI_DEVICE(0x6549, 0x1200), .driver_data = AZX_DRIVER_TERA }, | 2873 | { PCI_DEVICE(0x6549, 0x1200), |
2874 | .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, | ||
2834 | /* Creative X-Fi (CA0110-IBG) */ | 2875 | /* Creative X-Fi (CA0110-IBG) */ |
2835 | #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE) | 2876 | #if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE) |
2836 | /* the following entry conflicts with snd-ctxfi driver, | 2877 | /* the following entry conflicts with snd-ctxfi driver, |
@@ -2840,10 +2881,13 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { | |||
2840 | { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_ANY_ID), | 2881 | { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_ANY_ID), |
2841 | .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, | 2882 | .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, |
2842 | .class_mask = 0xffffff, | 2883 | .class_mask = 0xffffff, |
2843 | .driver_data = AZX_DRIVER_CTX }, | 2884 | .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND | |
2885 | AZX_DCAPS_RIRB_PRE_DELAY }, | ||
2844 | #else | 2886 | #else |
2845 | /* this entry seems still valid -- i.e. without emu20kx chip */ | 2887 | /* this entry seems still valid -- i.e. without emu20kx chip */ |
2846 | { PCI_DEVICE(0x1102, 0x0009), .driver_data = AZX_DRIVER_CTX }, | 2888 | { PCI_DEVICE(0x1102, 0x0009), |
2889 | .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND | | ||
2890 | AZX_DCAPS_RIRB_PRE_DELAY }, | ||
2847 | #endif | 2891 | #endif |
2848 | /* Vortex86MX */ | 2892 | /* Vortex86MX */ |
2849 | { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC }, | 2893 | { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC }, |
@@ -2853,11 +2897,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = { | |||
2853 | { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID), | 2897 | { PCI_DEVICE(PCI_VENDOR_ID_ATI, PCI_ANY_ID), |
2854 | .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, | 2898 | .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, |
2855 | .class_mask = 0xffffff, | 2899 | .class_mask = 0xffffff, |
2856 | .driver_data = AZX_DRIVER_GENERIC }, | 2900 | .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI }, |
2857 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_ANY_ID), | 2901 | { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_ANY_ID), |
2858 | .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, | 2902 | .class = PCI_CLASS_MULTIMEDIA_HD_AUDIO << 8, |
2859 | .class_mask = 0xffffff, | 2903 | .class_mask = 0xffffff, |
2860 | .driver_data = AZX_DRIVER_GENERIC }, | 2904 | .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_HDMI }, |
2861 | { 0, } | 2905 | { 0, } |
2862 | }; | 2906 | }; |
2863 | MODULE_DEVICE_TABLE(pci, azx_ids); | 2907 | MODULE_DEVICE_TABLE(pci, azx_ids); |
diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c index f1b3875c57df..696ac2590307 100644 --- a/sound/pci/hda/patch_analog.c +++ b/sound/pci/hda/patch_analog.c | |||
@@ -3159,6 +3159,7 @@ static const struct snd_pci_quirk ad1988_cfg_tbl[] = { | |||
3159 | SND_PCI_QUIRK(0x1043, 0x81ec, "Asus P5B-DLX", AD1988_6STACK_DIG), | 3159 | SND_PCI_QUIRK(0x1043, 0x81ec, "Asus P5B-DLX", AD1988_6STACK_DIG), |
3160 | SND_PCI_QUIRK(0x1043, 0x81f6, "Asus M2N-SLI", AD1988_6STACK_DIG), | 3160 | SND_PCI_QUIRK(0x1043, 0x81f6, "Asus M2N-SLI", AD1988_6STACK_DIG), |
3161 | SND_PCI_QUIRK(0x1043, 0x8277, "Asus P5K-E/WIFI-AP", AD1988_6STACK_DIG), | 3161 | SND_PCI_QUIRK(0x1043, 0x8277, "Asus P5K-E/WIFI-AP", AD1988_6STACK_DIG), |
3162 | SND_PCI_QUIRK(0x1043, 0x82c0, "Asus M3N-HT Deluxe", AD1988_6STACK_DIG), | ||
3162 | SND_PCI_QUIRK(0x1043, 0x8311, "Asus P5Q-Premium/Pro", AD1988_6STACK_DIG), | 3163 | SND_PCI_QUIRK(0x1043, 0x8311, "Asus P5Q-Premium/Pro", AD1988_6STACK_DIG), |
3163 | {} | 3164 | {} |
3164 | }; | 3165 | }; |
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c index 4f37477d3c71..3e6b9a8539c2 100644 --- a/sound/pci/hda/patch_conexant.c +++ b/sound/pci/hda/patch_conexant.c | |||
@@ -3098,7 +3098,9 @@ static const struct snd_pci_quirk cxt5066_cfg_tbl[] = { | |||
3098 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), | 3098 | SND_PCI_QUIRK(0x17aa, 0x215e, "Lenovo Thinkpad", CXT5066_THINKPAD), |
3099 | SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), | 3099 | SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT5066_THINKPAD), |
3100 | SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), | 3100 | SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT5066_THINKPAD), |
3101 | SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo U350", CXT5066_ASUS), | ||
3101 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), | 3102 | SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo G560", CXT5066_ASUS), |
3103 | SND_PCI_QUIRK(0x17aa, 0x3938, "Lenovo G565", CXT5066_AUTO), | ||
3102 | SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */ | 3104 | SND_PCI_QUIRK_VENDOR(0x17aa, "Lenovo", CXT5066_IDEAPAD), /* Fallback for Lenovos without dock mic */ |
3103 | {} | 3105 | {} |
3104 | }; | 3106 | }; |
@@ -3433,7 +3435,9 @@ static void cx_auto_parse_output(struct hda_codec *codec) | |||
3433 | break; | 3435 | break; |
3434 | } | 3436 | } |
3435 | } | 3437 | } |
3436 | if (spec->auto_mute && cfg->line_out_pins[0] && | 3438 | if (spec->auto_mute && |
3439 | cfg->line_out_pins[0] && | ||
3440 | cfg->line_out_type != AUTO_PIN_SPEAKER_OUT && | ||
3437 | cfg->line_out_pins[0] != cfg->hp_pins[0] && | 3441 | cfg->line_out_pins[0] != cfg->hp_pins[0] && |
3438 | cfg->line_out_pins[0] != cfg->speaker_pins[0]) { | 3442 | cfg->line_out_pins[0] != cfg->speaker_pins[0]) { |
3439 | for (i = 0; i < cfg->line_outs; i++) { | 3443 | for (i = 0; i < cfg->line_outs; i++) { |
@@ -3481,25 +3485,32 @@ static void cx_auto_update_speakers(struct hda_codec *codec) | |||
3481 | { | 3485 | { |
3482 | struct conexant_spec *spec = codec->spec; | 3486 | struct conexant_spec *spec = codec->spec; |
3483 | struct auto_pin_cfg *cfg = &spec->autocfg; | 3487 | struct auto_pin_cfg *cfg = &spec->autocfg; |
3484 | int on; | 3488 | int on = 1; |
3485 | 3489 | ||
3486 | if (!spec->auto_mute) | 3490 | /* turn on HP EAPD when HP jacks are present */ |
3487 | on = 0; | 3491 | if (spec->auto_mute) |
3488 | else | 3492 | on = spec->hp_present; |
3489 | on = spec->hp_present | spec->line_present; | ||
3490 | cx_auto_turn_eapd(codec, cfg->hp_outs, cfg->hp_pins, on); | 3493 | cx_auto_turn_eapd(codec, cfg->hp_outs, cfg->hp_pins, on); |
3491 | do_automute(codec, cfg->speaker_outs, cfg->speaker_pins, !on); | 3494 | /* mute speakers in auto-mode if HP or LO jacks are plugged */ |
3495 | if (spec->auto_mute) | ||
3496 | on = !(spec->hp_present || | ||
3497 | (spec->detect_line && spec->line_present)); | ||
3498 | do_automute(codec, cfg->speaker_outs, cfg->speaker_pins, on); | ||
3492 | 3499 | ||
3493 | /* toggle line-out mutes if needed, too */ | 3500 | /* toggle line-out mutes if needed, too */ |
3494 | /* if LO is a copy of either HP or Speaker, don't need to handle it */ | 3501 | /* if LO is a copy of either HP or Speaker, don't need to handle it */ |
3495 | if (cfg->line_out_pins[0] == cfg->hp_pins[0] || | 3502 | if (cfg->line_out_pins[0] == cfg->hp_pins[0] || |
3496 | cfg->line_out_pins[0] == cfg->speaker_pins[0]) | 3503 | cfg->line_out_pins[0] == cfg->speaker_pins[0]) |
3497 | return; | 3504 | return; |
3498 | if (!spec->automute_lines || !spec->auto_mute) | 3505 | if (spec->auto_mute) { |
3499 | on = 0; | 3506 | /* mute LO in auto-mode when HP jack is present */ |
3500 | else | 3507 | if (cfg->line_out_type == AUTO_PIN_SPEAKER_OUT || |
3501 | on = spec->hp_present; | 3508 | spec->automute_lines) |
3502 | do_automute(codec, cfg->line_outs, cfg->line_out_pins, !on); | 3509 | on = !spec->hp_present; |
3510 | else | ||
3511 | on = 1; | ||
3512 | } | ||
3513 | do_automute(codec, cfg->line_outs, cfg->line_out_pins, on); | ||
3503 | } | 3514 | } |
3504 | 3515 | ||
3505 | static void cx_auto_hp_automute(struct hda_codec *codec) | 3516 | static void cx_auto_hp_automute(struct hda_codec *codec) |
@@ -3696,13 +3707,14 @@ static int cx_auto_mux_enum_update(struct hda_codec *codec, | |||
3696 | { | 3707 | { |
3697 | struct conexant_spec *spec = codec->spec; | 3708 | struct conexant_spec *spec = codec->spec; |
3698 | hda_nid_t adc; | 3709 | hda_nid_t adc; |
3710 | int changed = 1; | ||
3699 | 3711 | ||
3700 | if (!imux->num_items) | 3712 | if (!imux->num_items) |
3701 | return 0; | 3713 | return 0; |
3702 | if (idx >= imux->num_items) | 3714 | if (idx >= imux->num_items) |
3703 | idx = imux->num_items - 1; | 3715 | idx = imux->num_items - 1; |
3704 | if (spec->cur_mux[0] == idx) | 3716 | if (spec->cur_mux[0] == idx) |
3705 | return 0; | 3717 | changed = 0; |
3706 | adc = spec->imux_info[idx].adc; | 3718 | adc = spec->imux_info[idx].adc; |
3707 | select_input_connection(codec, spec->imux_info[idx].adc, | 3719 | select_input_connection(codec, spec->imux_info[idx].adc, |
3708 | spec->imux_info[idx].pin); | 3720 | spec->imux_info[idx].pin); |
@@ -3715,7 +3727,7 @@ static int cx_auto_mux_enum_update(struct hda_codec *codec, | |||
3715 | spec->cur_adc_format); | 3727 | spec->cur_adc_format); |
3716 | } | 3728 | } |
3717 | spec->cur_mux[0] = idx; | 3729 | spec->cur_mux[0] = idx; |
3718 | return 1; | 3730 | return changed; |
3719 | } | 3731 | } |
3720 | 3732 | ||
3721 | static int cx_auto_mux_enum_put(struct snd_kcontrol *kcontrol, | 3733 | static int cx_auto_mux_enum_put(struct snd_kcontrol *kcontrol, |
@@ -3789,7 +3801,7 @@ static void cx_auto_check_auto_mic(struct hda_codec *codec) | |||
3789 | int pset[INPUT_PIN_ATTR_NORMAL + 1]; | 3801 | int pset[INPUT_PIN_ATTR_NORMAL + 1]; |
3790 | int i; | 3802 | int i; |
3791 | 3803 | ||
3792 | for (i = 0; i < INPUT_PIN_ATTR_NORMAL; i++) | 3804 | for (i = 0; i < ARRAY_SIZE(pset); i++) |
3793 | pset[i] = -1; | 3805 | pset[i] = -1; |
3794 | for (i = 0; i < spec->private_imux.num_items; i++) { | 3806 | for (i = 0; i < spec->private_imux.num_items; i++) { |
3795 | hda_nid_t pin = spec->imux_info[i].pin; | 3807 | hda_nid_t pin = spec->imux_info[i].pin; |
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c index 322901873222..bd0ae697f9c4 100644 --- a/sound/pci/hda/patch_hdmi.c +++ b/sound/pci/hda/patch_hdmi.c | |||
@@ -48,8 +48,8 @@ MODULE_PARM_DESC(static_hdmi_pcm, "Don't restrict PCM parameters per ELD info"); | |||
48 | * | 48 | * |
49 | * The HDA correspondence of pipes/ports are converter/pin nodes. | 49 | * The HDA correspondence of pipes/ports are converter/pin nodes. |
50 | */ | 50 | */ |
51 | #define MAX_HDMI_CVTS 3 | 51 | #define MAX_HDMI_CVTS 4 |
52 | #define MAX_HDMI_PINS 3 | 52 | #define MAX_HDMI_PINS 4 |
53 | 53 | ||
54 | struct hdmi_spec { | 54 | struct hdmi_spec { |
55 | int num_cvts; | 55 | int num_cvts; |
@@ -78,10 +78,6 @@ struct hdmi_spec { | |||
78 | */ | 78 | */ |
79 | struct hda_multi_out multiout; | 79 | struct hda_multi_out multiout; |
80 | const struct hda_pcm_stream *pcm_playback; | 80 | const struct hda_pcm_stream *pcm_playback; |
81 | |||
82 | /* misc flags */ | ||
83 | /* PD bit indicates only the update, not the current state */ | ||
84 | unsigned int old_pin_detect:1; | ||
85 | }; | 81 | }; |
86 | 82 | ||
87 | 83 | ||
@@ -300,13 +296,6 @@ static int hda_node_index(hda_nid_t *nids, hda_nid_t nid) | |||
300 | return -EINVAL; | 296 | return -EINVAL; |
301 | } | 297 | } |
302 | 298 | ||
303 | static void hdmi_get_show_eld(struct hda_codec *codec, hda_nid_t pin_nid, | ||
304 | struct hdmi_eld *eld) | ||
305 | { | ||
306 | if (!snd_hdmi_get_eld(eld, codec, pin_nid)) | ||
307 | snd_hdmi_show_eld(eld); | ||
308 | } | ||
309 | |||
310 | #ifdef BE_PARANOID | 299 | #ifdef BE_PARANOID |
311 | static void hdmi_get_dip_index(struct hda_codec *codec, hda_nid_t pin_nid, | 300 | static void hdmi_get_dip_index(struct hda_codec *codec, hda_nid_t pin_nid, |
312 | int *packet_index, int *byte_index) | 301 | int *packet_index, int *byte_index) |
@@ -694,35 +683,20 @@ static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid, | |||
694 | static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) | 683 | static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res) |
695 | { | 684 | { |
696 | struct hdmi_spec *spec = codec->spec; | 685 | struct hdmi_spec *spec = codec->spec; |
697 | int tag = res >> AC_UNSOL_RES_TAG_SHIFT; | 686 | int pin_nid = res >> AC_UNSOL_RES_TAG_SHIFT; |
698 | int pind = !!(res & AC_UNSOL_RES_PD); | 687 | int pd = !!(res & AC_UNSOL_RES_PD); |
699 | int eldv = !!(res & AC_UNSOL_RES_ELDV); | 688 | int eldv = !!(res & AC_UNSOL_RES_ELDV); |
700 | int index; | 689 | int index; |
701 | 690 | ||
702 | printk(KERN_INFO | 691 | printk(KERN_INFO |
703 | "HDMI hot plug event: Pin=%d Presence_Detect=%d ELD_Valid=%d\n", | 692 | "HDMI hot plug event: Pin=%d Presence_Detect=%d ELD_Valid=%d\n", |
704 | tag, pind, eldv); | 693 | pin_nid, pd, eldv); |
705 | 694 | ||
706 | index = hda_node_index(spec->pin, tag); | 695 | index = hda_node_index(spec->pin, pin_nid); |
707 | if (index < 0) | 696 | if (index < 0) |
708 | return; | 697 | return; |
709 | 698 | ||
710 | if (spec->old_pin_detect) { | 699 | hdmi_present_sense(codec, pin_nid, &spec->sink_eld[index]); |
711 | if (pind) | ||
712 | hdmi_present_sense(codec, tag, &spec->sink_eld[index]); | ||
713 | pind = spec->sink_eld[index].monitor_present; | ||
714 | } | ||
715 | |||
716 | spec->sink_eld[index].monitor_present = pind; | ||
717 | spec->sink_eld[index].eld_valid = eldv; | ||
718 | |||
719 | if (pind && eldv) { | ||
720 | hdmi_get_show_eld(codec, spec->pin[index], | ||
721 | &spec->sink_eld[index]); | ||
722 | /* TODO: do real things about ELD */ | ||
723 | } | ||
724 | |||
725 | snd_hda_input_jack_report(codec, tag); | ||
726 | } | 700 | } |
727 | 701 | ||
728 | static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res) | 702 | static void hdmi_non_intrinsic_event(struct hda_codec *codec, unsigned int res) |
@@ -903,13 +877,33 @@ static int hdmi_read_pin_conn(struct hda_codec *codec, hda_nid_t pin_nid) | |||
903 | static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid, | 877 | static void hdmi_present_sense(struct hda_codec *codec, hda_nid_t pin_nid, |
904 | struct hdmi_eld *eld) | 878 | struct hdmi_eld *eld) |
905 | { | 879 | { |
880 | /* | ||
881 | * Always execute a GetPinSense verb here, even when called from | ||
882 | * hdmi_intrinsic_event; for some NVIDIA HW, the unsolicited | ||
883 | * response's PD bit is not the real PD value, but indicates that | ||
884 | * the real PD value changed. An older version of the HD-audio | ||
885 | * specification worked this way. Hence, we just ignore the data in | ||
886 | * the unsolicited response to avoid custom WARs. | ||
887 | */ | ||
906 | int present = snd_hda_pin_sense(codec, pin_nid); | 888 | int present = snd_hda_pin_sense(codec, pin_nid); |
907 | 889 | ||
890 | memset(eld, 0, sizeof(*eld)); | ||
891 | |||
908 | eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE); | 892 | eld->monitor_present = !!(present & AC_PINSENSE_PRESENCE); |
909 | eld->eld_valid = !!(present & AC_PINSENSE_ELDV); | 893 | if (eld->monitor_present) |
894 | eld->eld_valid = !!(present & AC_PINSENSE_ELDV); | ||
895 | else | ||
896 | eld->eld_valid = 0; | ||
910 | 897 | ||
911 | if (present & AC_PINSENSE_ELDV) | 898 | printk(KERN_INFO |
912 | hdmi_get_show_eld(codec, pin_nid, eld); | 899 | "HDMI status: Pin=%d Presence_Detect=%d ELD_Valid=%d\n", |
900 | pin_nid, eld->monitor_present, eld->eld_valid); | ||
901 | |||
902 | if (eld->eld_valid) | ||
903 | if (!snd_hdmi_get_eld(eld, codec, pin_nid)) | ||
904 | snd_hdmi_show_eld(eld); | ||
905 | |||
906 | snd_hda_input_jack_report(codec, pin_nid); | ||
913 | } | 907 | } |
914 | 908 | ||
915 | static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid) | 909 | static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid) |
@@ -927,7 +921,6 @@ static int hdmi_add_pin(struct hda_codec *codec, hda_nid_t pin_nid) | |||
927 | SND_JACK_VIDEOOUT, NULL); | 921 | SND_JACK_VIDEOOUT, NULL); |
928 | if (err < 0) | 922 | if (err < 0) |
929 | return err; | 923 | return err; |
930 | snd_hda_input_jack_report(codec, pin_nid); | ||
931 | 924 | ||
932 | hdmi_present_sense(codec, pin_nid, &spec->sink_eld[spec->num_pins]); | 925 | hdmi_present_sense(codec, pin_nid, &spec->sink_eld[spec->num_pins]); |
933 | 926 | ||
@@ -1034,6 +1027,7 @@ static char *generic_hdmi_pcm_names[MAX_HDMI_CVTS] = { | |||
1034 | "HDMI 0", | 1027 | "HDMI 0", |
1035 | "HDMI 1", | 1028 | "HDMI 1", |
1036 | "HDMI 2", | 1029 | "HDMI 2", |
1030 | "HDMI 3", | ||
1037 | }; | 1031 | }; |
1038 | 1032 | ||
1039 | /* | 1033 | /* |
@@ -1490,18 +1484,6 @@ static const struct hda_codec_ops nvhdmi_patch_ops_2ch = { | |||
1490 | .free = generic_hdmi_free, | 1484 | .free = generic_hdmi_free, |
1491 | }; | 1485 | }; |
1492 | 1486 | ||
1493 | static int patch_nvhdmi_8ch_89(struct hda_codec *codec) | ||
1494 | { | ||
1495 | struct hdmi_spec *spec; | ||
1496 | int err = patch_generic_hdmi(codec); | ||
1497 | |||
1498 | if (err < 0) | ||
1499 | return err; | ||
1500 | spec = codec->spec; | ||
1501 | spec->old_pin_detect = 1; | ||
1502 | return 0; | ||
1503 | } | ||
1504 | |||
1505 | static int patch_nvhdmi_2ch(struct hda_codec *codec) | 1487 | static int patch_nvhdmi_2ch(struct hda_codec *codec) |
1506 | { | 1488 | { |
1507 | struct hdmi_spec *spec; | 1489 | struct hdmi_spec *spec; |
@@ -1515,7 +1497,6 @@ static int patch_nvhdmi_2ch(struct hda_codec *codec) | |||
1515 | spec->multiout.num_dacs = 0; /* no analog */ | 1497 | spec->multiout.num_dacs = 0; /* no analog */ |
1516 | spec->multiout.max_channels = 2; | 1498 | spec->multiout.max_channels = 2; |
1517 | spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x; | 1499 | spec->multiout.dig_out_nid = nvhdmi_master_con_nid_7x; |
1518 | spec->old_pin_detect = 1; | ||
1519 | spec->num_cvts = 1; | 1500 | spec->num_cvts = 1; |
1520 | spec->cvt[0] = nvhdmi_master_con_nid_7x; | 1501 | spec->cvt[0] = nvhdmi_master_con_nid_7x; |
1521 | spec->pcm_playback = &nvhdmi_pcm_playback_2ch; | 1502 | spec->pcm_playback = &nvhdmi_pcm_playback_2ch; |
@@ -1658,28 +1639,28 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = { | |||
1658 | { .id = 0x10de0005, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x }, | 1639 | { .id = 0x10de0005, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x }, |
1659 | { .id = 0x10de0006, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x }, | 1640 | { .id = 0x10de0006, .name = "MCP77/78 HDMI", .patch = patch_nvhdmi_8ch_7x }, |
1660 | { .id = 0x10de0007, .name = "MCP79/7A HDMI", .patch = patch_nvhdmi_8ch_7x }, | 1641 | { .id = 0x10de0007, .name = "MCP79/7A HDMI", .patch = patch_nvhdmi_8ch_7x }, |
1661 | { .id = 0x10de000a, .name = "GPU 0a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1642 | { .id = 0x10de000a, .name = "GPU 0a HDMI/DP", .patch = patch_generic_hdmi }, |
1662 | { .id = 0x10de000b, .name = "GPU 0b HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1643 | { .id = 0x10de000b, .name = "GPU 0b HDMI/DP", .patch = patch_generic_hdmi }, |
1663 | { .id = 0x10de000c, .name = "MCP89 HDMI", .patch = patch_nvhdmi_8ch_89 }, | 1644 | { .id = 0x10de000c, .name = "MCP89 HDMI", .patch = patch_generic_hdmi }, |
1664 | { .id = 0x10de000d, .name = "GPU 0d HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1645 | { .id = 0x10de000d, .name = "GPU 0d HDMI/DP", .patch = patch_generic_hdmi }, |
1665 | { .id = 0x10de0010, .name = "GPU 10 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1646 | { .id = 0x10de0010, .name = "GPU 10 HDMI/DP", .patch = patch_generic_hdmi }, |
1666 | { .id = 0x10de0011, .name = "GPU 11 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1647 | { .id = 0x10de0011, .name = "GPU 11 HDMI/DP", .patch = patch_generic_hdmi }, |
1667 | { .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1648 | { .id = 0x10de0012, .name = "GPU 12 HDMI/DP", .patch = patch_generic_hdmi }, |
1668 | { .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1649 | { .id = 0x10de0013, .name = "GPU 13 HDMI/DP", .patch = patch_generic_hdmi }, |
1669 | { .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1650 | { .id = 0x10de0014, .name = "GPU 14 HDMI/DP", .patch = patch_generic_hdmi }, |
1670 | { .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1651 | { .id = 0x10de0015, .name = "GPU 15 HDMI/DP", .patch = patch_generic_hdmi }, |
1671 | { .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1652 | { .id = 0x10de0016, .name = "GPU 16 HDMI/DP", .patch = patch_generic_hdmi }, |
1672 | /* 17 is known to be absent */ | 1653 | /* 17 is known to be absent */ |
1673 | { .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1654 | { .id = 0x10de0018, .name = "GPU 18 HDMI/DP", .patch = patch_generic_hdmi }, |
1674 | { .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1655 | { .id = 0x10de0019, .name = "GPU 19 HDMI/DP", .patch = patch_generic_hdmi }, |
1675 | { .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1656 | { .id = 0x10de001a, .name = "GPU 1a HDMI/DP", .patch = patch_generic_hdmi }, |
1676 | { .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1657 | { .id = 0x10de001b, .name = "GPU 1b HDMI/DP", .patch = patch_generic_hdmi }, |
1677 | { .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1658 | { .id = 0x10de001c, .name = "GPU 1c HDMI/DP", .patch = patch_generic_hdmi }, |
1678 | { .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1659 | { .id = 0x10de0040, .name = "GPU 40 HDMI/DP", .patch = patch_generic_hdmi }, |
1679 | { .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1660 | { .id = 0x10de0041, .name = "GPU 41 HDMI/DP", .patch = patch_generic_hdmi }, |
1680 | { .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1661 | { .id = 0x10de0042, .name = "GPU 42 HDMI/DP", .patch = patch_generic_hdmi }, |
1681 | { .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1662 | { .id = 0x10de0043, .name = "GPU 43 HDMI/DP", .patch = patch_generic_hdmi }, |
1682 | { .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_nvhdmi_8ch_89 }, | 1663 | { .id = 0x10de0044, .name = "GPU 44 HDMI/DP", .patch = patch_generic_hdmi }, |
1683 | { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, | 1664 | { .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, |
1684 | { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, | 1665 | { .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, |
1685 | { .id = 0x80860054, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi }, | 1666 | { .id = 0x80860054, .name = "IbexPeak HDMI", .patch = patch_generic_hdmi }, |
diff --git a/sound/soc/atmel/sam9g20_wm8731.c b/sound/soc/atmel/sam9g20_wm8731.c index 28afbbf69ce0..95572d290c27 100644 --- a/sound/soc/atmel/sam9g20_wm8731.c +++ b/sound/soc/atmel/sam9g20_wm8731.c | |||
@@ -146,7 +146,7 @@ static int at91sam9g20ek_wm8731_init(struct snd_soc_pcm_runtime *rtd) | |||
146 | "at91sam9g20ek_wm8731 " | 146 | "at91sam9g20ek_wm8731 " |
147 | ": at91sam9g20ek_wm8731_init() called\n"); | 147 | ": at91sam9g20ek_wm8731_init() called\n"); |
148 | 148 | ||
149 | ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_XTAL, | 149 | ret = snd_soc_dai_set_sysclk(codec_dai, WM8731_SYSCLK_MCLK, |
150 | MCLK_RATE, SND_SOC_CLOCK_IN); | 150 | MCLK_RATE, SND_SOC_CLOCK_IN); |
151 | if (ret < 0) { | 151 | if (ret < 0) { |
152 | printk(KERN_ERR "Failed to set WM8731 SYSCLK: %d\n", ret); | 152 | printk(KERN_ERR "Failed to set WM8731 SYSCLK: %d\n", ret); |
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c index b8066ef10bb0..46dbfd067f79 100644 --- a/sound/soc/codecs/cq93vc.c +++ b/sound/soc/codecs/cq93vc.c | |||
@@ -153,8 +153,7 @@ static int cq93vc_resume(struct snd_soc_codec *codec) | |||
153 | 153 | ||
154 | static int cq93vc_probe(struct snd_soc_codec *codec) | 154 | static int cq93vc_probe(struct snd_soc_codec *codec) |
155 | { | 155 | { |
156 | struct davinci_vc *davinci_vc = | 156 | struct davinci_vc *davinci_vc = codec->dev->platform_data; |
157 | mfd_get_data(to_platform_device(codec->dev)); | ||
158 | 157 | ||
159 | davinci_vc->cq93vc.codec = codec; | 158 | davinci_vc->cq93vc.codec = codec; |
160 | codec->control_data = davinci_vc; | 159 | codec->control_data = davinci_vc; |
diff --git a/sound/soc/codecs/twl4030.c b/sound/soc/codecs/twl4030.c index 575238d68e5e..bec788b12613 100644 --- a/sound/soc/codecs/twl4030.c +++ b/sound/soc/codecs/twl4030.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <linux/pm.h> | 26 | #include <linux/pm.h> |
27 | #include <linux/i2c.h> | 27 | #include <linux/i2c.h> |
28 | #include <linux/platform_device.h> | 28 | #include <linux/platform_device.h> |
29 | #include <linux/mfd/core.h> | ||
30 | #include <linux/i2c/twl.h> | 29 | #include <linux/i2c/twl.h> |
31 | #include <linux/slab.h> | 30 | #include <linux/slab.h> |
32 | #include <sound/core.h> | 31 | #include <sound/core.h> |
@@ -733,8 +732,7 @@ static int aif_event(struct snd_soc_dapm_widget *w, | |||
733 | 732 | ||
734 | static void headset_ramp(struct snd_soc_codec *codec, int ramp) | 733 | static void headset_ramp(struct snd_soc_codec *codec, int ramp) |
735 | { | 734 | { |
736 | struct twl4030_codec_audio_data *pdata = | 735 | struct twl4030_codec_audio_data *pdata = codec->dev->platform_data; |
737 | mfd_get_data(to_platform_device(codec->dev)); | ||
738 | unsigned char hs_gain, hs_pop; | 736 | unsigned char hs_gain, hs_pop; |
739 | struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec); | 737 | struct twl4030_priv *twl4030 = snd_soc_codec_get_drvdata(codec); |
740 | /* Base values for ramp delay calculation: 2^19 - 2^26 */ | 738 | /* Base values for ramp delay calculation: 2^19 - 2^26 */ |
@@ -2299,7 +2297,7 @@ static struct snd_soc_codec_driver soc_codec_dev_twl4030 = { | |||
2299 | 2297 | ||
2300 | static int __devinit twl4030_codec_probe(struct platform_device *pdev) | 2298 | static int __devinit twl4030_codec_probe(struct platform_device *pdev) |
2301 | { | 2299 | { |
2302 | struct twl4030_codec_audio_data *pdata = mfd_get_data(pdev); | 2300 | struct twl4030_codec_audio_data *pdata = pdev->dev.platform_data; |
2303 | 2301 | ||
2304 | if (!pdata) { | 2302 | if (!pdata) { |
2305 | dev_err(&pdev->dev, "platform_data is missing\n"); | 2303 | dev_err(&pdev->dev, "platform_data is missing\n"); |
diff --git a/sound/soc/codecs/wl1273.c b/sound/soc/codecs/wl1273.c index c8a874d0d4ca..5836201834d9 100644 --- a/sound/soc/codecs/wl1273.c +++ b/sound/soc/codecs/wl1273.c | |||
@@ -441,8 +441,7 @@ EXPORT_SYMBOL_GPL(wl1273_get_format); | |||
441 | 441 | ||
442 | static int wl1273_probe(struct snd_soc_codec *codec) | 442 | static int wl1273_probe(struct snd_soc_codec *codec) |
443 | { | 443 | { |
444 | struct wl1273_core **core = | 444 | struct wl1273_core **core = codec->dev->platform_data; |
445 | mfd_get_data(to_platform_device(codec->dev)); | ||
446 | struct wl1273_priv *wl1273; | 445 | struct wl1273_priv *wl1273; |
447 | int r; | 446 | int r; |
448 | 447 | ||
diff --git a/sound/soc/codecs/wm1250-ev1.c b/sound/soc/codecs/wm1250-ev1.c index 14d0716bf009..bcc208967917 100644 --- a/sound/soc/codecs/wm1250-ev1.c +++ b/sound/soc/codecs/wm1250-ev1.c | |||
@@ -22,7 +22,7 @@ SND_SOC_DAPM_ADC("ADC", "wm1250-ev1 Capture", SND_SOC_NOPM, 0, 0), | |||
22 | SND_SOC_DAPM_DAC("DAC", "wm1250-ev1 Playback", SND_SOC_NOPM, 0, 0), | 22 | SND_SOC_DAPM_DAC("DAC", "wm1250-ev1 Playback", SND_SOC_NOPM, 0, 0), |
23 | 23 | ||
24 | SND_SOC_DAPM_INPUT("WM1250 Input"), | 24 | SND_SOC_DAPM_INPUT("WM1250 Input"), |
25 | SND_SOC_DAPM_INPUT("WM1250 Output"), | 25 | SND_SOC_DAPM_OUTPUT("WM1250 Output"), |
26 | }; | 26 | }; |
27 | 27 | ||
28 | static const struct snd_soc_dapm_route wm1250_ev1_dapm_routes[] = { | 28 | static const struct snd_soc_dapm_route wm1250_ev1_dapm_routes[] = { |
diff --git a/sound/soc/codecs/wm8400.c b/sound/soc/codecs/wm8400.c index 736b785e3756..fbee556cbf35 100644 --- a/sound/soc/codecs/wm8400.c +++ b/sound/soc/codecs/wm8400.c | |||
@@ -1378,7 +1378,7 @@ static void wm8400_probe_deferred(struct work_struct *work) | |||
1378 | 1378 | ||
1379 | static int wm8400_codec_probe(struct snd_soc_codec *codec) | 1379 | static int wm8400_codec_probe(struct snd_soc_codec *codec) |
1380 | { | 1380 | { |
1381 | struct wm8400 *wm8400 = mfd_get_data(to_platform_device(codec->dev)); | 1381 | struct wm8400 *wm8400 = dev_get_platdata(codec->dev); |
1382 | struct wm8400_priv *priv; | 1382 | struct wm8400_priv *priv; |
1383 | int ret; | 1383 | int ret; |
1384 | u16 reg; | 1384 | u16 reg; |
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c index 6dec7cee2cb4..2dc964b55e4f 100644 --- a/sound/soc/codecs/wm8731.c +++ b/sound/soc/codecs/wm8731.c | |||
@@ -198,7 +198,7 @@ static int wm8731_check_osc(struct snd_soc_dapm_widget *source, | |||
198 | { | 198 | { |
199 | struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(source->codec); | 199 | struct wm8731_priv *wm8731 = snd_soc_codec_get_drvdata(source->codec); |
200 | 200 | ||
201 | return wm8731->sysclk_type == WM8731_SYSCLK_MCLK; | 201 | return wm8731->sysclk_type == WM8731_SYSCLK_XTAL; |
202 | } | 202 | } |
203 | 203 | ||
204 | static const struct snd_soc_dapm_route wm8731_intercon[] = { | 204 | static const struct snd_soc_dapm_route wm8731_intercon[] = { |
diff --git a/sound/soc/codecs/wm8915.c b/sound/soc/codecs/wm8915.c index ccc9bd832794..a0b1a7278284 100644 --- a/sound/soc/codecs/wm8915.c +++ b/sound/soc/codecs/wm8915.c | |||
@@ -19,7 +19,6 @@ | |||
19 | #include <linux/gcd.h> | 19 | #include <linux/gcd.h> |
20 | #include <linux/gpio.h> | 20 | #include <linux/gpio.h> |
21 | #include <linux/i2c.h> | 21 | #include <linux/i2c.h> |
22 | #include <linux/delay.h> | ||
23 | #include <linux/regulator/consumer.h> | 22 | #include <linux/regulator/consumer.h> |
24 | #include <linux/slab.h> | 23 | #include <linux/slab.h> |
25 | #include <linux/workqueue.h> | 24 | #include <linux/workqueue.h> |
diff --git a/sound/soc/davinci/davinci-vcif.c b/sound/soc/davinci/davinci-vcif.c index 13e05a302a92..9259f1f34899 100644 --- a/sound/soc/davinci/davinci-vcif.c +++ b/sound/soc/davinci/davinci-vcif.c | |||
@@ -205,7 +205,7 @@ static struct snd_soc_dai_driver davinci_vcif_dai = { | |||
205 | 205 | ||
206 | static int davinci_vcif_probe(struct platform_device *pdev) | 206 | static int davinci_vcif_probe(struct platform_device *pdev) |
207 | { | 207 | { |
208 | struct davinci_vc *davinci_vc = mfd_get_data(pdev); | 208 | struct davinci_vc *davinci_vc = pdev->dev.platform_data; |
209 | struct davinci_vcif_dev *davinci_vcif_dev; | 209 | struct davinci_vcif_dev *davinci_vcif_dev; |
210 | int ret; | 210 | int ret; |
211 | 211 | ||
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig index b5922984eac6..99054cf1f68f 100644 --- a/sound/soc/omap/Kconfig +++ b/sound/soc/omap/Kconfig | |||
@@ -65,14 +65,6 @@ config SND_OMAP_SOC_OVERO | |||
65 | Say Y if you want to add support for SoC audio on the | 65 | Say Y if you want to add support for SoC audio on the |
66 | Gumstix Overo or CompuLab CM-T35 | 66 | Gumstix Overo or CompuLab CM-T35 |
67 | 67 | ||
68 | config SND_OMAP_SOC_OMAP2EVM | ||
69 | tristate "SoC Audio support for OMAP2EVM board" | ||
70 | depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP2EVM | ||
71 | select SND_OMAP_SOC_MCBSP | ||
72 | select SND_SOC_TWL4030 | ||
73 | help | ||
74 | Say Y if you want to add support for SoC audio on the omap2evm board. | ||
75 | |||
76 | config SND_OMAP_SOC_OMAP3EVM | 68 | config SND_OMAP_SOC_OMAP3EVM |
77 | tristate "SoC Audio support for OMAP3EVM board" | 69 | tristate "SoC Audio support for OMAP3EVM board" |
78 | depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP3EVM | 70 | depends on TWL4030_CORE && SND_OMAP_SOC && MACH_OMAP3EVM |
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile index ba9fc650db28..6c2c87eed5bb 100644 --- a/sound/soc/omap/Makefile +++ b/sound/soc/omap/Makefile | |||
@@ -13,7 +13,6 @@ snd-soc-rx51-objs := rx51.o | |||
13 | snd-soc-ams-delta-objs := ams-delta.o | 13 | snd-soc-ams-delta-objs := ams-delta.o |
14 | snd-soc-osk5912-objs := osk5912.o | 14 | snd-soc-osk5912-objs := osk5912.o |
15 | snd-soc-overo-objs := overo.o | 15 | snd-soc-overo-objs := overo.o |
16 | snd-soc-omap2evm-objs := omap2evm.o | ||
17 | snd-soc-omap3evm-objs := omap3evm.o | 16 | snd-soc-omap3evm-objs := omap3evm.o |
18 | snd-soc-am3517evm-objs := am3517evm.o | 17 | snd-soc-am3517evm-objs := am3517evm.o |
19 | snd-soc-sdp3430-objs := sdp3430.o | 18 | snd-soc-sdp3430-objs := sdp3430.o |
diff --git a/sound/soc/omap/omap2evm.c b/sound/soc/omap/omap2evm.c deleted file mode 100644 index 29b60d6796e7..000000000000 --- a/sound/soc/omap/omap2evm.c +++ /dev/null | |||
@@ -1,139 +0,0 @@ | |||
1 | /* | ||
2 | * omap2evm.c -- SoC audio machine driver for omap2evm board | ||
3 | * | ||
4 | * Author: Arun KS <arunks@mistralsolutions.com> | ||
5 | * | ||
6 | * Based on sound/soc/omap/overo.c by Steve Sakoman | ||
7 | * | ||
8 | * This program is free software; you can redistribute it and/or | ||
9 | * modify it under the terms of the GNU General Public License | ||
10 | * version 2 as published by the Free Software Foundation. | ||
11 | * | ||
12 | * This program is distributed in the hope that it will be useful, but | ||
13 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
15 | * General Public License for more details. | ||
16 | * | ||
17 | * You should have received a copy of the GNU General Public License | ||
18 | * along with this program; if not, write to the Free Software | ||
19 | * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA | ||
20 | * 02110-1301 USA | ||
21 | * | ||
22 | */ | ||
23 | |||
24 | #include <linux/clk.h> | ||
25 | #include <linux/platform_device.h> | ||
26 | #include <sound/core.h> | ||
27 | #include <sound/pcm.h> | ||
28 | #include <sound/soc.h> | ||
29 | |||
30 | #include <asm/mach-types.h> | ||
31 | #include <mach/hardware.h> | ||
32 | #include <mach/gpio.h> | ||
33 | #include <plat/mcbsp.h> | ||
34 | |||
35 | #include "omap-mcbsp.h" | ||
36 | #include "omap-pcm.h" | ||
37 | |||
38 | static int omap2evm_hw_params(struct snd_pcm_substream *substream, | ||
39 | struct snd_pcm_hw_params *params) | ||
40 | { | ||
41 | struct snd_soc_pcm_runtime *rtd = substream->private_data; | ||
42 | struct snd_soc_dai *codec_dai = rtd->codec_dai; | ||
43 | struct snd_soc_dai *cpu_dai = rtd->cpu_dai; | ||
44 | int ret; | ||
45 | |||
46 | /* Set codec DAI configuration */ | ||
47 | ret = snd_soc_dai_set_fmt(codec_dai, | ||
48 | SND_SOC_DAIFMT_I2S | | ||
49 | SND_SOC_DAIFMT_NB_NF | | ||
50 | SND_SOC_DAIFMT_CBM_CFM); | ||
51 | if (ret < 0) { | ||
52 | printk(KERN_ERR "can't set codec DAI configuration\n"); | ||
53 | return ret; | ||
54 | } | ||
55 | |||
56 | /* Set cpu DAI configuration */ | ||
57 | ret = snd_soc_dai_set_fmt(cpu_dai, | ||
58 | SND_SOC_DAIFMT_I2S | | ||
59 | SND_SOC_DAIFMT_NB_NF | | ||
60 | SND_SOC_DAIFMT_CBM_CFM); | ||
61 | if (ret < 0) { | ||
62 | printk(KERN_ERR "can't set cpu DAI configuration\n"); | ||
63 | return ret; | ||
64 | } | ||
65 | |||
66 | /* Set the codec system clock for DAC and ADC */ | ||
67 | ret = snd_soc_dai_set_sysclk(codec_dai, 0, 26000000, | ||
68 | SND_SOC_CLOCK_IN); | ||
69 | if (ret < 0) { | ||
70 | printk(KERN_ERR "can't set codec system clock\n"); | ||
71 | return ret; | ||
72 | } | ||
73 | |||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static struct snd_soc_ops omap2evm_ops = { | ||
78 | .hw_params = omap2evm_hw_params, | ||
79 | }; | ||
80 | |||
81 | /* Digital audio interface glue - connects codec <--> CPU */ | ||
82 | static struct snd_soc_dai_link omap2evm_dai = { | ||
83 | .name = "TWL4030", | ||
84 | .stream_name = "TWL4030", | ||
85 | .cpu_dai_name = "omap-mcbsp-dai.1", | ||
86 | .codec_dai_name = "twl4030-hifi", | ||
87 | .platform_name = "omap-pcm-audio", | ||
88 | .codec_name = "twl4030-codec", | ||
89 | .ops = &omap2evm_ops, | ||
90 | }; | ||
91 | |||
92 | /* Audio machine driver */ | ||
93 | static struct snd_soc_card snd_soc_omap2evm = { | ||
94 | .name = "omap2evm", | ||
95 | .dai_link = &omap2evm_dai, | ||
96 | .num_links = 1, | ||
97 | }; | ||
98 | |||
99 | static struct platform_device *omap2evm_snd_device; | ||
100 | |||
101 | static int __init omap2evm_soc_init(void) | ||
102 | { | ||
103 | int ret; | ||
104 | |||
105 | if (!machine_is_omap2evm()) | ||
106 | return -ENODEV; | ||
107 | printk(KERN_INFO "omap2evm SoC init\n"); | ||
108 | |||
109 | omap2evm_snd_device = platform_device_alloc("soc-audio", -1); | ||
110 | if (!omap2evm_snd_device) { | ||
111 | printk(KERN_ERR "Platform device allocation failed\n"); | ||
112 | return -ENOMEM; | ||
113 | } | ||
114 | |||
115 | platform_set_drvdata(omap2evm_snd_device, &snd_soc_omap2evm); | ||
116 | |||
117 | ret = platform_device_add(omap2evm_snd_device); | ||
118 | if (ret) | ||
119 | goto err1; | ||
120 | |||
121 | return 0; | ||
122 | |||
123 | err1: | ||
124 | printk(KERN_ERR "Unable to add platform device\n"); | ||
125 | platform_device_put(omap2evm_snd_device); | ||
126 | |||
127 | return ret; | ||
128 | } | ||
129 | module_init(omap2evm_soc_init); | ||
130 | |||
131 | static void __exit omap2evm_soc_exit(void) | ||
132 | { | ||
133 | platform_device_unregister(omap2evm_snd_device); | ||
134 | } | ||
135 | module_exit(omap2evm_soc_exit); | ||
136 | |||
137 | MODULE_AUTHOR("Arun KS <arunks@mistralsolutions.com>"); | ||
138 | MODULE_DESCRIPTION("ALSA SoC omap2evm"); | ||
139 | MODULE_LICENSE("GPL"); | ||
diff --git a/sound/soc/pxa/raumfeld.c b/sound/soc/pxa/raumfeld.c index 2afabaf59491..1a591f1ebfbd 100644 --- a/sound/soc/pxa/raumfeld.c +++ b/sound/soc/pxa/raumfeld.c | |||
@@ -151,13 +151,13 @@ static struct snd_soc_ops raumfeld_cs4270_ops = { | |||
151 | .hw_params = raumfeld_cs4270_hw_params, | 151 | .hw_params = raumfeld_cs4270_hw_params, |
152 | }; | 152 | }; |
153 | 153 | ||
154 | static int raumfeld_line_suspend(struct snd_soc_card *card) | 154 | static int raumfeld_analog_suspend(struct snd_soc_card *card) |
155 | { | 155 | { |
156 | raumfeld_enable_audio(false); | 156 | raumfeld_enable_audio(false); |
157 | return 0; | 157 | return 0; |
158 | } | 158 | } |
159 | 159 | ||
160 | static int raumfeld_line_resume(struct snd_soc_card *card) | 160 | static int raumfeld_analog_resume(struct snd_soc_card *card) |
161 | { | 161 | { |
162 | raumfeld_enable_audio(true); | 162 | raumfeld_enable_audio(true); |
163 | return 0; | 163 | return 0; |
@@ -225,32 +225,53 @@ static struct snd_soc_ops raumfeld_ak4104_ops = { | |||
225 | .hw_params = raumfeld_ak4104_hw_params, | 225 | .hw_params = raumfeld_ak4104_hw_params, |
226 | }; | 226 | }; |
227 | 227 | ||
228 | static struct snd_soc_dai_link raumfeld_dai[] = { | 228 | #define DAI_LINK_CS4270 \ |
229 | { \ | ||
230 | .name = "CS4270", \ | ||
231 | .stream_name = "CS4270", \ | ||
232 | .cpu_dai_name = "pxa-ssp-dai.0", \ | ||
233 | .platform_name = "pxa-pcm-audio", \ | ||
234 | .codec_dai_name = "cs4270-hifi", \ | ||
235 | .codec_name = "cs4270-codec.0-0048", \ | ||
236 | .ops = &raumfeld_cs4270_ops, \ | ||
237 | } | ||
238 | |||
239 | #define DAI_LINK_AK4104 \ | ||
240 | { \ | ||
241 | .name = "ak4104", \ | ||
242 | .stream_name = "Playback", \ | ||
243 | .cpu_dai_name = "pxa-ssp-dai.1", \ | ||
244 | .codec_dai_name = "ak4104-hifi", \ | ||
245 | .platform_name = "pxa-pcm-audio", \ | ||
246 | .ops = &raumfeld_ak4104_ops, \ | ||
247 | .codec_name = "spi0.0", \ | ||
248 | } | ||
249 | |||
250 | static struct snd_soc_dai_link snd_soc_raumfeld_connector_dai[] = | ||
229 | { | 251 | { |
230 | .name = "ak4104", | 252 | DAI_LINK_CS4270, |
231 | .stream_name = "Playback", | 253 | DAI_LINK_AK4104, |
232 | .cpu_dai_name = "pxa-ssp-dai.1", | 254 | }; |
233 | .codec_dai_name = "ak4104-hifi", | 255 | |
234 | .platform_name = "pxa-pcm-audio", | 256 | static struct snd_soc_dai_link snd_soc_raumfeld_speaker_dai[] = |
235 | .ops = &raumfeld_ak4104_ops, | ||
236 | .codec_name = "ak4104-codec.0", | ||
237 | }, | ||
238 | { | 257 | { |
239 | .name = "CS4270", | 258 | DAI_LINK_CS4270, |
240 | .stream_name = "CS4270", | 259 | }; |
241 | .cpu_dai_name = "pxa-ssp-dai.0", | 260 | |
242 | .platform_name = "pxa-pcm-audio", | 261 | static struct snd_soc_card snd_soc_raumfeld_connector = { |
243 | .codec_dai_name = "cs4270-hifi", | 262 | .name = "Raumfeld Connector", |
244 | .codec_name = "cs4270-codec.0-0048", | 263 | .dai_link = snd_soc_raumfeld_connector_dai, |
245 | .ops = &raumfeld_cs4270_ops, | 264 | .num_links = ARRAY_SIZE(snd_soc_raumfeld_connector_dai), |
246 | },}; | 265 | .suspend_post = raumfeld_analog_suspend, |
247 | 266 | .resume_pre = raumfeld_analog_resume, | |
248 | static struct snd_soc_card snd_soc_raumfeld = { | 267 | }; |
249 | .name = "Raumfeld", | 268 | |
250 | .dai_link = raumfeld_dai, | 269 | static struct snd_soc_card snd_soc_raumfeld_speaker = { |
251 | .suspend_post = raumfeld_line_suspend, | 270 | .name = "Raumfeld Speaker", |
252 | .resume_pre = raumfeld_line_resume, | 271 | .dai_link = snd_soc_raumfeld_speaker_dai, |
253 | .num_links = ARRAY_SIZE(raumfeld_dai), | 272 | .num_links = ARRAY_SIZE(snd_soc_raumfeld_speaker_dai), |
273 | .suspend_post = raumfeld_analog_suspend, | ||
274 | .resume_pre = raumfeld_analog_resume, | ||
254 | }; | 275 | }; |
255 | 276 | ||
256 | static struct platform_device *raumfeld_audio_device; | 277 | static struct platform_device *raumfeld_audio_device; |
@@ -271,22 +292,25 @@ static int __init raumfeld_audio_init(void) | |||
271 | 292 | ||
272 | set_max9485_clk(MAX9485_MCLK_FREQ_122880); | 293 | set_max9485_clk(MAX9485_MCLK_FREQ_122880); |
273 | 294 | ||
274 | /* Register LINE and SPDIF */ | 295 | /* Register analog device */ |
275 | raumfeld_audio_device = platform_device_alloc("soc-audio", 0); | 296 | raumfeld_audio_device = platform_device_alloc("soc-audio", 0); |
276 | if (!raumfeld_audio_device) | 297 | if (!raumfeld_audio_device) |
277 | return -ENOMEM; | 298 | return -ENOMEM; |
278 | 299 | ||
279 | platform_set_drvdata(raumfeld_audio_device, | ||
280 | &snd_soc_raumfeld); | ||
281 | ret = platform_device_add(raumfeld_audio_device); | ||
282 | |||
283 | /* no S/PDIF on Speakers */ | ||
284 | if (machine_is_raumfeld_speaker()) | 300 | if (machine_is_raumfeld_speaker()) |
301 | platform_set_drvdata(raumfeld_audio_device, | ||
302 | &snd_soc_raumfeld_speaker); | ||
303 | |||
304 | if (machine_is_raumfeld_connector()) | ||
305 | platform_set_drvdata(raumfeld_audio_device, | ||
306 | &snd_soc_raumfeld_connector); | ||
307 | |||
308 | ret = platform_device_add(raumfeld_audio_device); | ||
309 | if (ret < 0) | ||
285 | return ret; | 310 | return ret; |
286 | 311 | ||
287 | raumfeld_enable_audio(true); | 312 | raumfeld_enable_audio(true); |
288 | 313 | return 0; | |
289 | return ret; | ||
290 | } | 314 | } |
291 | 315 | ||
292 | static void __exit raumfeld_audio_exit(void) | 316 | static void __exit raumfeld_audio_exit(void) |
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c index bb7cd5812945..d75043ed7fc0 100644 --- a/sound/soc/soc-core.c +++ b/sound/soc/soc-core.c | |||
@@ -1306,10 +1306,6 @@ static int soc_bind_dai_link(struct snd_soc_card *card, int num) | |||
1306 | /* no, then find CPU DAI from registered DAIs*/ | 1306 | /* no, then find CPU DAI from registered DAIs*/ |
1307 | list_for_each_entry(cpu_dai, &dai_list, list) { | 1307 | list_for_each_entry(cpu_dai, &dai_list, list) { |
1308 | if (!strcmp(cpu_dai->name, dai_link->cpu_dai_name)) { | 1308 | if (!strcmp(cpu_dai->name, dai_link->cpu_dai_name)) { |
1309 | |||
1310 | if (!try_module_get(cpu_dai->dev->driver->owner)) | ||
1311 | return -ENODEV; | ||
1312 | |||
1313 | rtd->cpu_dai = cpu_dai; | 1309 | rtd->cpu_dai = cpu_dai; |
1314 | goto find_codec; | 1310 | goto find_codec; |
1315 | } | 1311 | } |
@@ -1622,11 +1618,15 @@ static int soc_probe_dai_link(struct snd_soc_card *card, int num) | |||
1622 | 1618 | ||
1623 | /* probe the cpu_dai */ | 1619 | /* probe the cpu_dai */ |
1624 | if (!cpu_dai->probed) { | 1620 | if (!cpu_dai->probed) { |
1621 | if (!try_module_get(cpu_dai->dev->driver->owner)) | ||
1622 | return -ENODEV; | ||
1623 | |||
1625 | if (cpu_dai->driver->probe) { | 1624 | if (cpu_dai->driver->probe) { |
1626 | ret = cpu_dai->driver->probe(cpu_dai); | 1625 | ret = cpu_dai->driver->probe(cpu_dai); |
1627 | if (ret < 0) { | 1626 | if (ret < 0) { |
1628 | printk(KERN_ERR "asoc: failed to probe CPU DAI %s\n", | 1627 | printk(KERN_ERR "asoc: failed to probe CPU DAI %s\n", |
1629 | cpu_dai->name); | 1628 | cpu_dai->name); |
1629 | module_put(cpu_dai->dev->driver->owner); | ||
1630 | return ret; | 1630 | return ret; |
1631 | } | 1631 | } |
1632 | } | 1632 | } |
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c index 456617e63789..999bb08cdfb1 100644 --- a/sound/soc/soc-dapm.c +++ b/sound/soc/soc-dapm.c | |||
@@ -1110,7 +1110,7 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event) | |||
1110 | trace_snd_soc_dapm_start(card); | 1110 | trace_snd_soc_dapm_start(card); |
1111 | 1111 | ||
1112 | list_for_each_entry(d, &card->dapm_list, list) | 1112 | list_for_each_entry(d, &card->dapm_list, list) |
1113 | if (d->n_widgets) | 1113 | if (d->n_widgets || d->codec == NULL) |
1114 | d->dev_power = 0; | 1114 | d->dev_power = 0; |
1115 | 1115 | ||
1116 | /* Check which widgets we need to power and store them in | 1116 | /* Check which widgets we need to power and store them in |
diff --git a/sound/usb/card.c b/sound/usb/card.c index a90662af2d6b..220c6167dd86 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c | |||
@@ -48,6 +48,7 @@ | |||
48 | #include <linux/usb/audio.h> | 48 | #include <linux/usb/audio.h> |
49 | #include <linux/usb/audio-v2.h> | 49 | #include <linux/usb/audio-v2.h> |
50 | 50 | ||
51 | #include <sound/control.h> | ||
51 | #include <sound/core.h> | 52 | #include <sound/core.h> |
52 | #include <sound/info.h> | 53 | #include <sound/info.h> |
53 | #include <sound/pcm.h> | 54 | #include <sound/pcm.h> |
@@ -492,14 +493,6 @@ static void *snd_usb_audio_probe(struct usb_device *dev, | |||
492 | } | 493 | } |
493 | } | 494 | } |
494 | 495 | ||
495 | chip->txfr_quirk = 0; | ||
496 | err = 1; /* continue */ | ||
497 | if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) { | ||
498 | /* need some special handlings */ | ||
499 | if ((err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk)) < 0) | ||
500 | goto __error; | ||
501 | } | ||
502 | |||
503 | /* | 496 | /* |
504 | * For devices with more than one control interface, we assume the | 497 | * For devices with more than one control interface, we assume the |
505 | * first contains the audio controls. We might need a more specific | 498 | * first contains the audio controls. We might need a more specific |
@@ -508,6 +501,14 @@ static void *snd_usb_audio_probe(struct usb_device *dev, | |||
508 | if (!chip->ctrl_intf) | 501 | if (!chip->ctrl_intf) |
509 | chip->ctrl_intf = alts; | 502 | chip->ctrl_intf = alts; |
510 | 503 | ||
504 | chip->txfr_quirk = 0; | ||
505 | err = 1; /* continue */ | ||
506 | if (quirk && quirk->ifnum != QUIRK_NO_INTERFACE) { | ||
507 | /* need some special handlings */ | ||
508 | if ((err = snd_usb_create_quirk(chip, intf, &usb_audio_driver, quirk)) < 0) | ||
509 | goto __error; | ||
510 | } | ||
511 | |||
511 | if (err > 0) { | 512 | if (err > 0) { |
512 | /* create normal USB audio interfaces */ | 513 | /* create normal USB audio interfaces */ |
513 | if (snd_usb_create_streams(chip, ifnum) < 0 || | 514 | if (snd_usb_create_streams(chip, ifnum) < 0 || |
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index eab06edcc9b7..c22fa76e363a 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c | |||
@@ -86,16 +86,6 @@ struct mixer_build { | |||
86 | const struct usbmix_selector_map *selector_map; | 86 | const struct usbmix_selector_map *selector_map; |
87 | }; | 87 | }; |
88 | 88 | ||
89 | enum { | ||
90 | USB_MIXER_BOOLEAN, | ||
91 | USB_MIXER_INV_BOOLEAN, | ||
92 | USB_MIXER_S8, | ||
93 | USB_MIXER_U8, | ||
94 | USB_MIXER_S16, | ||
95 | USB_MIXER_U16, | ||
96 | }; | ||
97 | |||
98 | |||
99 | /*E-mu 0202/0404/0204 eXtension Unit(XU) control*/ | 89 | /*E-mu 0202/0404/0204 eXtension Unit(XU) control*/ |
100 | enum { | 90 | enum { |
101 | USB_XU_CLOCK_RATE = 0xe301, | 91 | USB_XU_CLOCK_RATE = 0xe301, |
@@ -535,20 +525,21 @@ static int check_matrix_bitmap(unsigned char *bmap, int ich, int och, int num_ou | |||
535 | * if failed, give up and free the control instance. | 525 | * if failed, give up and free the control instance. |
536 | */ | 526 | */ |
537 | 527 | ||
538 | static int add_control_to_empty(struct mixer_build *state, struct snd_kcontrol *kctl) | 528 | int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer, |
529 | struct snd_kcontrol *kctl) | ||
539 | { | 530 | { |
540 | struct usb_mixer_elem_info *cval = kctl->private_data; | 531 | struct usb_mixer_elem_info *cval = kctl->private_data; |
541 | int err; | 532 | int err; |
542 | 533 | ||
543 | while (snd_ctl_find_id(state->chip->card, &kctl->id)) | 534 | while (snd_ctl_find_id(mixer->chip->card, &kctl->id)) |
544 | kctl->id.index++; | 535 | kctl->id.index++; |
545 | if ((err = snd_ctl_add(state->chip->card, kctl)) < 0) { | 536 | if ((err = snd_ctl_add(mixer->chip->card, kctl)) < 0) { |
546 | snd_printd(KERN_ERR "cannot add control (err = %d)\n", err); | 537 | snd_printd(KERN_ERR "cannot add control (err = %d)\n", err); |
547 | return err; | 538 | return err; |
548 | } | 539 | } |
549 | cval->elem_id = &kctl->id; | 540 | cval->elem_id = &kctl->id; |
550 | cval->next_id_elem = state->mixer->id_elems[cval->id]; | 541 | cval->next_id_elem = mixer->id_elems[cval->id]; |
551 | state->mixer->id_elems[cval->id] = cval; | 542 | mixer->id_elems[cval->id] = cval; |
552 | return 0; | 543 | return 0; |
553 | } | 544 | } |
554 | 545 | ||
@@ -984,6 +975,9 @@ static struct snd_kcontrol_new usb_feature_unit_ctl_ro = { | |||
984 | .put = NULL, | 975 | .put = NULL, |
985 | }; | 976 | }; |
986 | 977 | ||
978 | /* This symbol is exported in order to allow the mixer quirks to | ||
979 | * hook up to the standard feature unit control mechanism */ | ||
980 | struct snd_kcontrol_new *snd_usb_feature_unit_ctl = &usb_feature_unit_ctl; | ||
987 | 981 | ||
988 | /* | 982 | /* |
989 | * build a feature control | 983 | * build a feature control |
@@ -1176,7 +1170,7 @@ static void build_feature_ctl(struct mixer_build *state, void *raw_desc, | |||
1176 | 1170 | ||
1177 | snd_printdd(KERN_INFO "[%d] FU [%s] ch = %d, val = %d/%d/%d\n", | 1171 | snd_printdd(KERN_INFO "[%d] FU [%s] ch = %d, val = %d/%d/%d\n", |
1178 | cval->id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res); | 1172 | cval->id, kctl->id.name, cval->channels, cval->min, cval->max, cval->res); |
1179 | add_control_to_empty(state, kctl); | 1173 | snd_usb_mixer_add_control(state->mixer, kctl); |
1180 | } | 1174 | } |
1181 | 1175 | ||
1182 | 1176 | ||
@@ -1340,7 +1334,7 @@ static void build_mixer_unit_ctl(struct mixer_build *state, | |||
1340 | 1334 | ||
1341 | snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n", | 1335 | snd_printdd(KERN_INFO "[%d] MU [%s] ch = %d, val = %d/%d\n", |
1342 | cval->id, kctl->id.name, cval->channels, cval->min, cval->max); | 1336 | cval->id, kctl->id.name, cval->channels, cval->min, cval->max); |
1343 | add_control_to_empty(state, kctl); | 1337 | snd_usb_mixer_add_control(state->mixer, kctl); |
1344 | } | 1338 | } |
1345 | 1339 | ||
1346 | 1340 | ||
@@ -1641,7 +1635,7 @@ static int build_audio_procunit(struct mixer_build *state, int unitid, void *raw | |||
1641 | 1635 | ||
1642 | snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n", | 1636 | snd_printdd(KERN_INFO "[%d] PU [%s] ch = %d, val = %d/%d\n", |
1643 | cval->id, kctl->id.name, cval->channels, cval->min, cval->max); | 1637 | cval->id, kctl->id.name, cval->channels, cval->min, cval->max); |
1644 | if ((err = add_control_to_empty(state, kctl)) < 0) | 1638 | if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0) |
1645 | return err; | 1639 | return err; |
1646 | } | 1640 | } |
1647 | return 0; | 1641 | return 0; |
@@ -1858,7 +1852,7 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid, void | |||
1858 | 1852 | ||
1859 | snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n", | 1853 | snd_printdd(KERN_INFO "[%d] SU [%s] items = %d\n", |
1860 | cval->id, kctl->id.name, desc->bNrInPins); | 1854 | cval->id, kctl->id.name, desc->bNrInPins); |
1861 | if ((err = add_control_to_empty(state, kctl)) < 0) | 1855 | if ((err = snd_usb_mixer_add_control(state->mixer, kctl)) < 0) |
1862 | return err; | 1856 | return err; |
1863 | 1857 | ||
1864 | return 0; | 1858 | return 0; |
diff --git a/sound/usb/mixer.h b/sound/usb/mixer.h index b4a2c8165e4b..ae1a14dcfe82 100644 --- a/sound/usb/mixer.h +++ b/sound/usb/mixer.h | |||
@@ -24,7 +24,16 @@ struct usb_mixer_interface { | |||
24 | u8 xonar_u1_status; | 24 | u8 xonar_u1_status; |
25 | }; | 25 | }; |
26 | 26 | ||
27 | #define MAX_CHANNELS 10 /* max logical channels */ | 27 | #define MAX_CHANNELS 16 /* max logical channels */ |
28 | |||
29 | enum { | ||
30 | USB_MIXER_BOOLEAN, | ||
31 | USB_MIXER_INV_BOOLEAN, | ||
32 | USB_MIXER_S8, | ||
33 | USB_MIXER_U8, | ||
34 | USB_MIXER_S16, | ||
35 | USB_MIXER_U16, | ||
36 | }; | ||
28 | 37 | ||
29 | struct usb_mixer_elem_info { | 38 | struct usb_mixer_elem_info { |
30 | struct usb_mixer_interface *mixer; | 39 | struct usb_mixer_interface *mixer; |
@@ -55,4 +64,7 @@ int snd_usb_mixer_set_ctl_value(struct usb_mixer_elem_info *cval, | |||
55 | void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer); | 64 | void snd_usb_mixer_inactivate(struct usb_mixer_interface *mixer); |
56 | int snd_usb_mixer_activate(struct usb_mixer_interface *mixer); | 65 | int snd_usb_mixer_activate(struct usb_mixer_interface *mixer); |
57 | 66 | ||
67 | int snd_usb_mixer_add_control(struct usb_mixer_interface *mixer, | ||
68 | struct snd_kcontrol *kctl); | ||
69 | |||
58 | #endif /* __USBMIXER_H */ | 70 | #endif /* __USBMIXER_H */ |
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index 9146cffa6ede..3d0f4873112b 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c | |||
@@ -40,6 +40,8 @@ | |||
40 | #include "mixer_quirks.h" | 40 | #include "mixer_quirks.h" |
41 | #include "helper.h" | 41 | #include "helper.h" |
42 | 42 | ||
43 | extern struct snd_kcontrol_new *snd_usb_feature_unit_ctl; | ||
44 | |||
43 | /* | 45 | /* |
44 | * Sound Blaster remote control configuration | 46 | * Sound Blaster remote control configuration |
45 | * | 47 | * |
@@ -492,6 +494,69 @@ static int snd_nativeinstruments_create_mixer(struct usb_mixer_interface *mixer, | |||
492 | return err; | 494 | return err; |
493 | } | 495 | } |
494 | 496 | ||
497 | /* M-Audio FastTrack Ultra quirks */ | ||
498 | |||
499 | /* private_free callback */ | ||
500 | static void usb_mixer_elem_free(struct snd_kcontrol *kctl) | ||
501 | { | ||
502 | kfree(kctl->private_data); | ||
503 | kctl->private_data = NULL; | ||
504 | } | ||
505 | |||
506 | static int snd_maudio_ftu_create_ctl(struct usb_mixer_interface *mixer, | ||
507 | int in, int out, const char *name) | ||
508 | { | ||
509 | struct usb_mixer_elem_info *cval; | ||
510 | struct snd_kcontrol *kctl; | ||
511 | |||
512 | cval = kzalloc(sizeof(*cval), GFP_KERNEL); | ||
513 | if (!cval) | ||
514 | return -ENOMEM; | ||
515 | |||
516 | cval->id = 5; | ||
517 | cval->mixer = mixer; | ||
518 | cval->val_type = USB_MIXER_S16; | ||
519 | cval->channels = 1; | ||
520 | cval->control = out + 1; | ||
521 | cval->cmask = 1 << in; | ||
522 | |||
523 | kctl = snd_ctl_new1(snd_usb_feature_unit_ctl, cval); | ||
524 | if (!kctl) { | ||
525 | kfree(cval); | ||
526 | return -ENOMEM; | ||
527 | } | ||
528 | |||
529 | snprintf(kctl->id.name, sizeof(kctl->id.name), name); | ||
530 | kctl->private_free = usb_mixer_elem_free; | ||
531 | return snd_usb_mixer_add_control(mixer, kctl); | ||
532 | } | ||
533 | |||
534 | static int snd_maudio_ftu_create_mixer(struct usb_mixer_interface *mixer) | ||
535 | { | ||
536 | char name[64]; | ||
537 | int in, out, err; | ||
538 | |||
539 | for (out = 0; out < 8; out++) { | ||
540 | for (in = 0; in < 8; in++) { | ||
541 | snprintf(name, sizeof(name), | ||
542 | "AIn%d - Out%d Capture Volume", in + 1, out + 1); | ||
543 | err = snd_maudio_ftu_create_ctl(mixer, in, out, name); | ||
544 | if (err < 0) | ||
545 | return err; | ||
546 | } | ||
547 | |||
548 | for (in = 8; in < 16; in++) { | ||
549 | snprintf(name, sizeof(name), | ||
550 | "DIn%d - Out%d Playback Volume", in - 7, out + 1); | ||
551 | err = snd_maudio_ftu_create_ctl(mixer, in, out, name); | ||
552 | if (err < 0) | ||
553 | return err; | ||
554 | } | ||
555 | } | ||
556 | |||
557 | return 0; | ||
558 | } | ||
559 | |||
495 | void snd_emuusb_set_samplerate(struct snd_usb_audio *chip, | 560 | void snd_emuusb_set_samplerate(struct snd_usb_audio *chip, |
496 | unsigned char samplerate_id) | 561 | unsigned char samplerate_id) |
497 | { | 562 | { |
@@ -533,6 +598,11 @@ int snd_usb_mixer_apply_create_quirk(struct usb_mixer_interface *mixer) | |||
533 | snd_audigy2nx_proc_read); | 598 | snd_audigy2nx_proc_read); |
534 | break; | 599 | break; |
535 | 600 | ||
601 | case USB_ID(0x0763, 0x2080): /* M-Audio Fast Track Ultra */ | ||
602 | case USB_ID(0x0763, 0x2081): /* M-Audio Fast Track Ultra 8R */ | ||
603 | err = snd_maudio_ftu_create_mixer(mixer); | ||
604 | break; | ||
605 | |||
536 | case USB_ID(0x0b05, 0x1739): | 606 | case USB_ID(0x0b05, 0x1739): |
537 | case USB_ID(0x0b05, 0x1743): | 607 | case USB_ID(0x0b05, 0x1743): |
538 | err = snd_xonar_u1_controls_create(mixer); | 608 | err = snd_xonar_u1_controls_create(mixer); |
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 78792a8900c3..0b2ae8e1c02d 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h | |||
@@ -1988,7 +1988,7 @@ YAMAHA_DEVICE(0x7010, "UB99"), | |||
1988 | .data = & (const struct snd_usb_audio_quirk[]) { | 1988 | .data = & (const struct snd_usb_audio_quirk[]) { |
1989 | { | 1989 | { |
1990 | .ifnum = 0, | 1990 | .ifnum = 0, |
1991 | .type = QUIRK_IGNORE_INTERFACE | 1991 | .type = QUIRK_AUDIO_STANDARD_MIXER, |
1992 | }, | 1992 | }, |
1993 | { | 1993 | { |
1994 | .ifnum = 1, | 1994 | .ifnum = 1, |
@@ -2055,7 +2055,7 @@ YAMAHA_DEVICE(0x7010, "UB99"), | |||
2055 | .data = & (const struct snd_usb_audio_quirk[]) { | 2055 | .data = & (const struct snd_usb_audio_quirk[]) { |
2056 | { | 2056 | { |
2057 | .ifnum = 0, | 2057 | .ifnum = 0, |
2058 | .type = QUIRK_IGNORE_INTERFACE | 2058 | .type = QUIRK_AUDIO_STANDARD_MIXER, |
2059 | }, | 2059 | }, |
2060 | { | 2060 | { |
2061 | .ifnum = 1, | 2061 | .ifnum = 1, |
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index bd13d7257240..2e969cbb393b 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/usb.h> | 19 | #include <linux/usb.h> |
20 | #include <linux/usb/audio.h> | 20 | #include <linux/usb/audio.h> |
21 | 21 | ||
22 | #include <sound/control.h> | ||
22 | #include <sound/core.h> | 23 | #include <sound/core.h> |
23 | #include <sound/info.h> | 24 | #include <sound/info.h> |
24 | #include <sound/pcm.h> | 25 | #include <sound/pcm.h> |
@@ -263,6 +264,20 @@ static int create_uaxx_quirk(struct snd_usb_audio *chip, | |||
263 | } | 264 | } |
264 | 265 | ||
265 | /* | 266 | /* |
267 | * Create a standard mixer for the specified interface. | ||
268 | */ | ||
269 | static int create_standard_mixer_quirk(struct snd_usb_audio *chip, | ||
270 | struct usb_interface *iface, | ||
271 | struct usb_driver *driver, | ||
272 | const struct snd_usb_audio_quirk *quirk) | ||
273 | { | ||
274 | if (quirk->ifnum < 0) | ||
275 | return 0; | ||
276 | |||
277 | return snd_usb_create_mixer(chip, quirk->ifnum, 0); | ||
278 | } | ||
279 | |||
280 | /* | ||
266 | * audio-interface quirks | 281 | * audio-interface quirks |
267 | * | 282 | * |
268 | * returns zero if no standard audio/MIDI parsing is needed. | 283 | * returns zero if no standard audio/MIDI parsing is needed. |
@@ -294,7 +309,8 @@ int snd_usb_create_quirk(struct snd_usb_audio *chip, | |||
294 | [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, | 309 | [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk, |
295 | [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk, | 310 | [QUIRK_AUDIO_FIXED_ENDPOINT] = create_fixed_stream_quirk, |
296 | [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, | 311 | [QUIRK_AUDIO_EDIROL_UAXX] = create_uaxx_quirk, |
297 | [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk | 312 | [QUIRK_AUDIO_ALIGN_TRANSFER] = create_align_transfer_quirk, |
313 | [QUIRK_AUDIO_STANDARD_MIXER] = create_standard_mixer_quirk, | ||
298 | }; | 314 | }; |
299 | 315 | ||
300 | if (quirk->type < QUIRK_TYPE_COUNT) { | 316 | if (quirk->type < QUIRK_TYPE_COUNT) { |
diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h index 32f2a97f2f14..1e79986b5777 100644 --- a/sound/usb/usbaudio.h +++ b/sound/usb/usbaudio.h | |||
@@ -84,6 +84,7 @@ enum quirk_type { | |||
84 | QUIRK_AUDIO_FIXED_ENDPOINT, | 84 | QUIRK_AUDIO_FIXED_ENDPOINT, |
85 | QUIRK_AUDIO_EDIROL_UAXX, | 85 | QUIRK_AUDIO_EDIROL_UAXX, |
86 | QUIRK_AUDIO_ALIGN_TRANSFER, | 86 | QUIRK_AUDIO_ALIGN_TRANSFER, |
87 | QUIRK_AUDIO_STANDARD_MIXER, | ||
87 | 88 | ||
88 | QUIRK_TYPE_COUNT | 89 | QUIRK_TYPE_COUNT |
89 | }; | 90 | }; |