diff options
author | Nigel Cunningham <nigel@tuxonice.net> | 2009-12-06 10:15:53 -0500 |
---|---|---|
committer | Rafael J. Wysocki <rjw@sisk.pl> | 2009-12-06 10:15:53 -0500 |
commit | 0414f2ec03d72dc4e569627e6112fa6dafc99a79 (patch) | |
tree | 679fe5c0b89cc58194d187e25787f79bb7da3441 /kernel | |
parent | 64357ed468025614d48daa6cc87674ae5616f8fb (diff) |
PM / Hibernate: Move swap functions to kernel/power/swap.c.
Move hibernation code's functions for allocating and freeing swap
from swsusp.c to swap.c, which is where you'd expect to find them.
Signed-off-by: Nigel Cunningham <nigel@tuxonice.net>
Signed-off-by: Rafael J. Wysocki <rjw@sisk.pl>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/power/swap.c | 101 | ||||
-rw-r--r-- | kernel/power/swsusp.c | 101 |
2 files changed, 101 insertions, 101 deletions
diff --git a/kernel/power/swap.c b/kernel/power/swap.c index 890f6b11b1d3..0ce9b00f5d33 100644 --- a/kernel/power/swap.c +++ b/kernel/power/swap.c | |||
@@ -38,6 +38,107 @@ struct swsusp_header { | |||
38 | 38 | ||
39 | static struct swsusp_header *swsusp_header; | 39 | static struct swsusp_header *swsusp_header; |
40 | 40 | ||
41 | /** | ||
42 | * The following functions are used for tracing the allocated | ||
43 | * swap pages, so that they can be freed in case of an error. | ||
44 | */ | ||
45 | |||
46 | struct swsusp_extent { | ||
47 | struct rb_node node; | ||
48 | unsigned long start; | ||
49 | unsigned long end; | ||
50 | }; | ||
51 | |||
52 | static struct rb_root swsusp_extents = RB_ROOT; | ||
53 | |||
54 | static int swsusp_extents_insert(unsigned long swap_offset) | ||
55 | { | ||
56 | struct rb_node **new = &(swsusp_extents.rb_node); | ||
57 | struct rb_node *parent = NULL; | ||
58 | struct swsusp_extent *ext; | ||
59 | |||
60 | /* Figure out where to put the new node */ | ||
61 | while (*new) { | ||
62 | ext = container_of(*new, struct swsusp_extent, node); | ||
63 | parent = *new; | ||
64 | if (swap_offset < ext->start) { | ||
65 | /* Try to merge */ | ||
66 | if (swap_offset == ext->start - 1) { | ||
67 | ext->start--; | ||
68 | return 0; | ||
69 | } | ||
70 | new = &((*new)->rb_left); | ||
71 | } else if (swap_offset > ext->end) { | ||
72 | /* Try to merge */ | ||
73 | if (swap_offset == ext->end + 1) { | ||
74 | ext->end++; | ||
75 | return 0; | ||
76 | } | ||
77 | new = &((*new)->rb_right); | ||
78 | } else { | ||
79 | /* It already is in the tree */ | ||
80 | return -EINVAL; | ||
81 | } | ||
82 | } | ||
83 | /* Add the new node and rebalance the tree. */ | ||
84 | ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL); | ||
85 | if (!ext) | ||
86 | return -ENOMEM; | ||
87 | |||
88 | ext->start = swap_offset; | ||
89 | ext->end = swap_offset; | ||
90 | rb_link_node(&ext->node, parent, new); | ||
91 | rb_insert_color(&ext->node, &swsusp_extents); | ||
92 | return 0; | ||
93 | } | ||
94 | |||
95 | /** | ||
96 | * alloc_swapdev_block - allocate a swap page and register that it has | ||
97 | * been allocated, so that it can be freed in case of an error. | ||
98 | */ | ||
99 | |||
100 | sector_t alloc_swapdev_block(int swap) | ||
101 | { | ||
102 | unsigned long offset; | ||
103 | |||
104 | offset = swp_offset(get_swap_page_of_type(swap)); | ||
105 | if (offset) { | ||
106 | if (swsusp_extents_insert(offset)) | ||
107 | swap_free(swp_entry(swap, offset)); | ||
108 | else | ||
109 | return swapdev_block(swap, offset); | ||
110 | } | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * free_all_swap_pages - free swap pages allocated for saving image data. | ||
116 | * It also frees the extents used to register which swap entres had been | ||
117 | * allocated. | ||
118 | */ | ||
119 | |||
120 | void free_all_swap_pages(int swap) | ||
121 | { | ||
122 | struct rb_node *node; | ||
123 | |||
124 | while ((node = swsusp_extents.rb_node)) { | ||
125 | struct swsusp_extent *ext; | ||
126 | unsigned long offset; | ||
127 | |||
128 | ext = container_of(node, struct swsusp_extent, node); | ||
129 | rb_erase(node, &swsusp_extents); | ||
130 | for (offset = ext->start; offset <= ext->end; offset++) | ||
131 | swap_free(swp_entry(swap, offset)); | ||
132 | |||
133 | kfree(ext); | ||
134 | } | ||
135 | } | ||
136 | |||
137 | int swsusp_swap_in_use(void) | ||
138 | { | ||
139 | return (swsusp_extents.rb_node != NULL); | ||
140 | } | ||
141 | |||
41 | /* | 142 | /* |
42 | * General things | 143 | * General things |
43 | */ | 144 | */ |
diff --git a/kernel/power/swsusp.c b/kernel/power/swsusp.c index 6a07f4dbf2f8..57222d2089b8 100644 --- a/kernel/power/swsusp.c +++ b/kernel/power/swsusp.c | |||
@@ -58,107 +58,6 @@ | |||
58 | int in_suspend __nosavedata = 0; | 58 | int in_suspend __nosavedata = 0; |
59 | 59 | ||
60 | /** | 60 | /** |
61 | * The following functions are used for tracing the allocated | ||
62 | * swap pages, so that they can be freed in case of an error. | ||
63 | */ | ||
64 | |||
65 | struct swsusp_extent { | ||
66 | struct rb_node node; | ||
67 | unsigned long start; | ||
68 | unsigned long end; | ||
69 | }; | ||
70 | |||
71 | static struct rb_root swsusp_extents = RB_ROOT; | ||
72 | |||
73 | static int swsusp_extents_insert(unsigned long swap_offset) | ||
74 | { | ||
75 | struct rb_node **new = &(swsusp_extents.rb_node); | ||
76 | struct rb_node *parent = NULL; | ||
77 | struct swsusp_extent *ext; | ||
78 | |||
79 | /* Figure out where to put the new node */ | ||
80 | while (*new) { | ||
81 | ext = container_of(*new, struct swsusp_extent, node); | ||
82 | parent = *new; | ||
83 | if (swap_offset < ext->start) { | ||
84 | /* Try to merge */ | ||
85 | if (swap_offset == ext->start - 1) { | ||
86 | ext->start--; | ||
87 | return 0; | ||
88 | } | ||
89 | new = &((*new)->rb_left); | ||
90 | } else if (swap_offset > ext->end) { | ||
91 | /* Try to merge */ | ||
92 | if (swap_offset == ext->end + 1) { | ||
93 | ext->end++; | ||
94 | return 0; | ||
95 | } | ||
96 | new = &((*new)->rb_right); | ||
97 | } else { | ||
98 | /* It already is in the tree */ | ||
99 | return -EINVAL; | ||
100 | } | ||
101 | } | ||
102 | /* Add the new node and rebalance the tree. */ | ||
103 | ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL); | ||
104 | if (!ext) | ||
105 | return -ENOMEM; | ||
106 | |||
107 | ext->start = swap_offset; | ||
108 | ext->end = swap_offset; | ||
109 | rb_link_node(&ext->node, parent, new); | ||
110 | rb_insert_color(&ext->node, &swsusp_extents); | ||
111 | return 0; | ||
112 | } | ||
113 | |||
114 | /** | ||
115 | * alloc_swapdev_block - allocate a swap page and register that it has | ||
116 | * been allocated, so that it can be freed in case of an error. | ||
117 | */ | ||
118 | |||
119 | sector_t alloc_swapdev_block(int swap) | ||
120 | { | ||
121 | unsigned long offset; | ||
122 | |||
123 | offset = swp_offset(get_swap_page_of_type(swap)); | ||
124 | if (offset) { | ||
125 | if (swsusp_extents_insert(offset)) | ||
126 | swap_free(swp_entry(swap, offset)); | ||
127 | else | ||
128 | return swapdev_block(swap, offset); | ||
129 | } | ||
130 | return 0; | ||
131 | } | ||
132 | |||
133 | /** | ||
134 | * free_all_swap_pages - free swap pages allocated for saving image data. | ||
135 | * It also frees the extents used to register which swap entres had been | ||
136 | * allocated. | ||
137 | */ | ||
138 | |||
139 | void free_all_swap_pages(int swap) | ||
140 | { | ||
141 | struct rb_node *node; | ||
142 | |||
143 | while ((node = swsusp_extents.rb_node)) { | ||
144 | struct swsusp_extent *ext; | ||
145 | unsigned long offset; | ||
146 | |||
147 | ext = container_of(node, struct swsusp_extent, node); | ||
148 | rb_erase(node, &swsusp_extents); | ||
149 | for (offset = ext->start; offset <= ext->end; offset++) | ||
150 | swap_free(swp_entry(swap, offset)); | ||
151 | |||
152 | kfree(ext); | ||
153 | } | ||
154 | } | ||
155 | |||
156 | int swsusp_swap_in_use(void) | ||
157 | { | ||
158 | return (swsusp_extents.rb_node != NULL); | ||
159 | } | ||
160 | |||
161 | /** | ||
162 | * swsusp_show_speed - print the time elapsed between two events represented by | 61 | * swsusp_show_speed - print the time elapsed between two events represented by |
163 | * @start and @stop | 62 | * @start and @stop |
164 | * | 63 | * |