diff options
author | Ganesh Mahendran <opensource.ganesh@gmail.com> | 2014-12-18 19:17:40 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-12-18 22:08:11 -0500 |
commit | 66cdef663cd7a97aff6bbbf41a81a0205dc81ba2 (patch) | |
tree | f41ba2279a16cd072461e4e91cdac878a3cfd5ad /mm | |
parent | 136f49b9171074872f2a14ad0ab10486d1ba13ca (diff) |
mm/zsmalloc: adjust order of functions
Currently functions in zsmalloc.c does not arranged in a readable and
reasonable sequence. With the more and more functions added, we may
meet below inconvenience. For example:
Current functions:
void zs_init()
{
}
static void get_maxobj_per_zspage()
{
}
Then I want to add a func_1() which is called from zs_init(), and this
new added function func_1() will used get_maxobj_per_zspage() which is
defined below zs_init().
void func_1()
{
get_maxobj_per_zspage()
}
void zs_init()
{
func_1()
}
static void get_maxobj_per_zspage()
{
}
This will cause compiling issue. So we must add a declaration:
static void get_maxobj_per_zspage();
before func_1() if we do not put get_maxobj_per_zspage() before
func_1().
In addition, puting module_[init|exit] functions at the bottom of the
file conforms to our habit.
So, this patch ajusts function sequence as:
/* helper functions */
...
obj_location_to_handle()
...
/* Some exported functions */
...
zs_map_object()
zs_unmap_object()
zs_malloc()
zs_free()
zs_init()
zs_exit()
Signed-off-by: Ganesh Mahendran <opensource.ganesh@gmail.com>
Cc: Nitin Gupta <ngupta@vflare.org>
Acked-by: Minchan Kim <minchan@kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/zsmalloc.c | 374 |
1 files changed, 187 insertions, 187 deletions
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c index 4d0a063145ec..b72403927aa4 100644 --- a/mm/zsmalloc.c +++ b/mm/zsmalloc.c | |||
@@ -884,19 +884,6 @@ static struct notifier_block zs_cpu_nb = { | |||
884 | .notifier_call = zs_cpu_notifier | 884 | .notifier_call = zs_cpu_notifier |
885 | }; | 885 | }; |
886 | 886 | ||
887 | static void zs_unregister_cpu_notifier(void) | ||
888 | { | ||
889 | int cpu; | ||
890 | |||
891 | cpu_notifier_register_begin(); | ||
892 | |||
893 | for_each_online_cpu(cpu) | ||
894 | zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); | ||
895 | __unregister_cpu_notifier(&zs_cpu_nb); | ||
896 | |||
897 | cpu_notifier_register_done(); | ||
898 | } | ||
899 | |||
900 | static int zs_register_cpu_notifier(void) | 887 | static int zs_register_cpu_notifier(void) |
901 | { | 888 | { |
902 | int cpu, uninitialized_var(ret); | 889 | int cpu, uninitialized_var(ret); |
@@ -914,40 +901,28 @@ static int zs_register_cpu_notifier(void) | |||
914 | return notifier_to_errno(ret); | 901 | return notifier_to_errno(ret); |
915 | } | 902 | } |
916 | 903 | ||
917 | static void init_zs_size_classes(void) | 904 | static void zs_unregister_cpu_notifier(void) |
918 | { | 905 | { |
919 | int nr; | 906 | int cpu; |
920 | 907 | ||
921 | nr = (ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / ZS_SIZE_CLASS_DELTA + 1; | 908 | cpu_notifier_register_begin(); |
922 | if ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) % ZS_SIZE_CLASS_DELTA) | ||
923 | nr += 1; | ||
924 | 909 | ||
925 | zs_size_classes = nr; | 910 | for_each_online_cpu(cpu) |
926 | } | 911 | zs_cpu_notifier(NULL, CPU_DEAD, (void *)(long)cpu); |
912 | __unregister_cpu_notifier(&zs_cpu_nb); | ||
927 | 913 | ||
928 | static void __exit zs_exit(void) | 914 | cpu_notifier_register_done(); |
929 | { | ||
930 | #ifdef CONFIG_ZPOOL | ||
931 | zpool_unregister_driver(&zs_zpool_driver); | ||
932 | #endif | ||
933 | zs_unregister_cpu_notifier(); | ||
934 | } | 915 | } |
935 | 916 | ||
936 | static int __init zs_init(void) | 917 | static void init_zs_size_classes(void) |
937 | { | 918 | { |
938 | int ret = zs_register_cpu_notifier(); | 919 | int nr; |
939 | |||
940 | if (ret) { | ||
941 | zs_unregister_cpu_notifier(); | ||
942 | return ret; | ||
943 | } | ||
944 | 920 | ||
945 | init_zs_size_classes(); | 921 | nr = (ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) / ZS_SIZE_CLASS_DELTA + 1; |
922 | if ((ZS_MAX_ALLOC_SIZE - ZS_MIN_ALLOC_SIZE) % ZS_SIZE_CLASS_DELTA) | ||
923 | nr += 1; | ||
946 | 924 | ||
947 | #ifdef CONFIG_ZPOOL | 925 | zs_size_classes = nr; |
948 | zpool_register_driver(&zs_zpool_driver); | ||
949 | #endif | ||
950 | return 0; | ||
951 | } | 926 | } |
952 | 927 | ||
953 | static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage) | 928 | static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage) |
@@ -967,113 +942,101 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage) | |||
967 | return true; | 942 | return true; |
968 | } | 943 | } |
969 | 944 | ||
945 | unsigned long zs_get_total_pages(struct zs_pool *pool) | ||
946 | { | ||
947 | return atomic_long_read(&pool->pages_allocated); | ||
948 | } | ||
949 | EXPORT_SYMBOL_GPL(zs_get_total_pages); | ||
950 | |||
970 | /** | 951 | /** |
971 | * zs_create_pool - Creates an allocation pool to work from. | 952 | * zs_map_object - get address of allocated object from handle. |
972 | * @flags: allocation flags used to allocate pool metadata | 953 | * @pool: pool from which the object was allocated |
954 | * @handle: handle returned from zs_malloc | ||
973 | * | 955 | * |
974 | * This function must be called before anything when using | 956 | * Before using an object allocated from zs_malloc, it must be mapped using |
975 | * the zsmalloc allocator. | 957 | * this function. When done with the object, it must be unmapped using |
958 | * zs_unmap_object. | ||
976 | * | 959 | * |
977 | * On success, a pointer to the newly created pool is returned, | 960 | * Only one object can be mapped per cpu at a time. There is no protection |
978 | * otherwise NULL. | 961 | * against nested mappings. |
962 | * | ||
963 | * This function returns with preemption and page faults disabled. | ||
979 | */ | 964 | */ |
980 | struct zs_pool *zs_create_pool(gfp_t flags) | 965 | void *zs_map_object(struct zs_pool *pool, unsigned long handle, |
966 | enum zs_mapmode mm) | ||
981 | { | 967 | { |
982 | int i; | 968 | struct page *page; |
983 | struct zs_pool *pool; | 969 | unsigned long obj_idx, off; |
984 | struct size_class *prev_class = NULL; | ||
985 | 970 | ||
986 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); | 971 | unsigned int class_idx; |
987 | if (!pool) | 972 | enum fullness_group fg; |
988 | return NULL; | 973 | struct size_class *class; |
974 | struct mapping_area *area; | ||
975 | struct page *pages[2]; | ||
989 | 976 | ||
990 | pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *), | 977 | BUG_ON(!handle); |
991 | GFP_KERNEL); | ||
992 | if (!pool->size_class) { | ||
993 | kfree(pool); | ||
994 | return NULL; | ||
995 | } | ||
996 | 978 | ||
997 | /* | 979 | /* |
998 | * Iterate reversly, because, size of size_class that we want to use | 980 | * Because we use per-cpu mapping areas shared among the |
999 | * for merging should be larger or equal to current size. | 981 | * pools/users, we can't allow mapping in interrupt context |
982 | * because it can corrupt another users mappings. | ||
1000 | */ | 983 | */ |
1001 | for (i = zs_size_classes - 1; i >= 0; i--) { | 984 | BUG_ON(in_interrupt()); |
1002 | int size; | ||
1003 | int pages_per_zspage; | ||
1004 | struct size_class *class; | ||
1005 | |||
1006 | size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; | ||
1007 | if (size > ZS_MAX_ALLOC_SIZE) | ||
1008 | size = ZS_MAX_ALLOC_SIZE; | ||
1009 | pages_per_zspage = get_pages_per_zspage(size); | ||
1010 | |||
1011 | /* | ||
1012 | * size_class is used for normal zsmalloc operation such | ||
1013 | * as alloc/free for that size. Although it is natural that we | ||
1014 | * have one size_class for each size, there is a chance that we | ||
1015 | * can get more memory utilization if we use one size_class for | ||
1016 | * many different sizes whose size_class have same | ||
1017 | * characteristics. So, we makes size_class point to | ||
1018 | * previous size_class if possible. | ||
1019 | */ | ||
1020 | if (prev_class) { | ||
1021 | if (can_merge(prev_class, size, pages_per_zspage)) { | ||
1022 | pool->size_class[i] = prev_class; | ||
1023 | continue; | ||
1024 | } | ||
1025 | } | ||
1026 | |||
1027 | class = kzalloc(sizeof(struct size_class), GFP_KERNEL); | ||
1028 | if (!class) | ||
1029 | goto err; | ||
1030 | 985 | ||
1031 | class->size = size; | 986 | obj_handle_to_location(handle, &page, &obj_idx); |
1032 | class->index = i; | 987 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); |
1033 | class->pages_per_zspage = pages_per_zspage; | 988 | class = pool->size_class[class_idx]; |
1034 | spin_lock_init(&class->lock); | 989 | off = obj_idx_to_offset(page, obj_idx, class->size); |
1035 | pool->size_class[i] = class; | ||
1036 | 990 | ||
1037 | prev_class = class; | 991 | area = &get_cpu_var(zs_map_area); |
992 | area->vm_mm = mm; | ||
993 | if (off + class->size <= PAGE_SIZE) { | ||
994 | /* this object is contained entirely within a page */ | ||
995 | area->vm_addr = kmap_atomic(page); | ||
996 | return area->vm_addr + off; | ||
1038 | } | 997 | } |
1039 | 998 | ||
1040 | pool->flags = flags; | 999 | /* this object spans two pages */ |
1041 | 1000 | pages[0] = page; | |
1042 | return pool; | 1001 | pages[1] = get_next_page(page); |
1002 | BUG_ON(!pages[1]); | ||
1043 | 1003 | ||
1044 | err: | 1004 | return __zs_map_object(area, pages, off, class->size); |
1045 | zs_destroy_pool(pool); | ||
1046 | return NULL; | ||
1047 | } | 1005 | } |
1048 | EXPORT_SYMBOL_GPL(zs_create_pool); | 1006 | EXPORT_SYMBOL_GPL(zs_map_object); |
1049 | 1007 | ||
1050 | void zs_destroy_pool(struct zs_pool *pool) | 1008 | void zs_unmap_object(struct zs_pool *pool, unsigned long handle) |
1051 | { | 1009 | { |
1052 | int i; | 1010 | struct page *page; |
1011 | unsigned long obj_idx, off; | ||
1053 | 1012 | ||
1054 | for (i = 0; i < zs_size_classes; i++) { | 1013 | unsigned int class_idx; |
1055 | int fg; | 1014 | enum fullness_group fg; |
1056 | struct size_class *class = pool->size_class[i]; | 1015 | struct size_class *class; |
1016 | struct mapping_area *area; | ||
1057 | 1017 | ||
1058 | if (!class) | 1018 | BUG_ON(!handle); |
1059 | continue; | ||
1060 | 1019 | ||
1061 | if (class->index != i) | 1020 | obj_handle_to_location(handle, &page, &obj_idx); |
1062 | continue; | 1021 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); |
1022 | class = pool->size_class[class_idx]; | ||
1023 | off = obj_idx_to_offset(page, obj_idx, class->size); | ||
1063 | 1024 | ||
1064 | for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { | 1025 | area = this_cpu_ptr(&zs_map_area); |
1065 | if (class->fullness_list[fg]) { | 1026 | if (off + class->size <= PAGE_SIZE) |
1066 | pr_info("Freeing non-empty class with size %db, fullness group %d\n", | 1027 | kunmap_atomic(area->vm_addr); |
1067 | class->size, fg); | 1028 | else { |
1068 | } | 1029 | struct page *pages[2]; |
1069 | } | ||
1070 | kfree(class); | ||
1071 | } | ||
1072 | 1030 | ||
1073 | kfree(pool->size_class); | 1031 | pages[0] = page; |
1074 | kfree(pool); | 1032 | pages[1] = get_next_page(page); |
1033 | BUG_ON(!pages[1]); | ||
1034 | |||
1035 | __zs_unmap_object(area, pages, off, class->size); | ||
1036 | } | ||
1037 | put_cpu_var(zs_map_area); | ||
1075 | } | 1038 | } |
1076 | EXPORT_SYMBOL_GPL(zs_destroy_pool); | 1039 | EXPORT_SYMBOL_GPL(zs_unmap_object); |
1077 | 1040 | ||
1078 | /** | 1041 | /** |
1079 | * zs_malloc - Allocate block of given size from pool. | 1042 | * zs_malloc - Allocate block of given size from pool. |
@@ -1176,100 +1139,137 @@ void zs_free(struct zs_pool *pool, unsigned long obj) | |||
1176 | EXPORT_SYMBOL_GPL(zs_free); | 1139 | EXPORT_SYMBOL_GPL(zs_free); |
1177 | 1140 | ||
1178 | /** | 1141 | /** |
1179 | * zs_map_object - get address of allocated object from handle. | 1142 | * zs_create_pool - Creates an allocation pool to work from. |
1180 | * @pool: pool from which the object was allocated | 1143 | * @flags: allocation flags used to allocate pool metadata |
1181 | * @handle: handle returned from zs_malloc | ||
1182 | * | ||
1183 | * Before using an object allocated from zs_malloc, it must be mapped using | ||
1184 | * this function. When done with the object, it must be unmapped using | ||
1185 | * zs_unmap_object. | ||
1186 | * | 1144 | * |
1187 | * Only one object can be mapped per cpu at a time. There is no protection | 1145 | * This function must be called before anything when using |
1188 | * against nested mappings. | 1146 | * the zsmalloc allocator. |
1189 | * | 1147 | * |
1190 | * This function returns with preemption and page faults disabled. | 1148 | * On success, a pointer to the newly created pool is returned, |
1149 | * otherwise NULL. | ||
1191 | */ | 1150 | */ |
1192 | void *zs_map_object(struct zs_pool *pool, unsigned long handle, | 1151 | struct zs_pool *zs_create_pool(gfp_t flags) |
1193 | enum zs_mapmode mm) | ||
1194 | { | 1152 | { |
1195 | struct page *page; | 1153 | int i; |
1196 | unsigned long obj_idx, off; | 1154 | struct zs_pool *pool; |
1155 | struct size_class *prev_class = NULL; | ||
1197 | 1156 | ||
1198 | unsigned int class_idx; | 1157 | pool = kzalloc(sizeof(*pool), GFP_KERNEL); |
1199 | enum fullness_group fg; | 1158 | if (!pool) |
1200 | struct size_class *class; | 1159 | return NULL; |
1201 | struct mapping_area *area; | ||
1202 | struct page *pages[2]; | ||
1203 | 1160 | ||
1204 | BUG_ON(!handle); | 1161 | pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *), |
1162 | GFP_KERNEL); | ||
1163 | if (!pool->size_class) { | ||
1164 | kfree(pool); | ||
1165 | return NULL; | ||
1166 | } | ||
1205 | 1167 | ||
1206 | /* | 1168 | /* |
1207 | * Because we use per-cpu mapping areas shared among the | 1169 | * Iterate reversly, because, size of size_class that we want to use |
1208 | * pools/users, we can't allow mapping in interrupt context | 1170 | * for merging should be larger or equal to current size. |
1209 | * because it can corrupt another users mappings. | ||
1210 | */ | 1171 | */ |
1211 | BUG_ON(in_interrupt()); | 1172 | for (i = zs_size_classes - 1; i >= 0; i--) { |
1173 | int size; | ||
1174 | int pages_per_zspage; | ||
1175 | struct size_class *class; | ||
1212 | 1176 | ||
1213 | obj_handle_to_location(handle, &page, &obj_idx); | 1177 | size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA; |
1214 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); | 1178 | if (size > ZS_MAX_ALLOC_SIZE) |
1215 | class = pool->size_class[class_idx]; | 1179 | size = ZS_MAX_ALLOC_SIZE; |
1216 | off = obj_idx_to_offset(page, obj_idx, class->size); | 1180 | pages_per_zspage = get_pages_per_zspage(size); |
1217 | 1181 | ||
1218 | area = &get_cpu_var(zs_map_area); | 1182 | /* |
1219 | area->vm_mm = mm; | 1183 | * size_class is used for normal zsmalloc operation such |
1220 | if (off + class->size <= PAGE_SIZE) { | 1184 | * as alloc/free for that size. Although it is natural that we |
1221 | /* this object is contained entirely within a page */ | 1185 | * have one size_class for each size, there is a chance that we |
1222 | area->vm_addr = kmap_atomic(page); | 1186 | * can get more memory utilization if we use one size_class for |
1223 | return area->vm_addr + off; | 1187 | * many different sizes whose size_class have same |
1188 | * characteristics. So, we makes size_class point to | ||
1189 | * previous size_class if possible. | ||
1190 | */ | ||
1191 | if (prev_class) { | ||
1192 | if (can_merge(prev_class, size, pages_per_zspage)) { | ||
1193 | pool->size_class[i] = prev_class; | ||
1194 | continue; | ||
1195 | } | ||
1196 | } | ||
1197 | |||
1198 | class = kzalloc(sizeof(struct size_class), GFP_KERNEL); | ||
1199 | if (!class) | ||
1200 | goto err; | ||
1201 | |||
1202 | class->size = size; | ||
1203 | class->index = i; | ||
1204 | class->pages_per_zspage = pages_per_zspage; | ||
1205 | spin_lock_init(&class->lock); | ||
1206 | pool->size_class[i] = class; | ||
1207 | |||
1208 | prev_class = class; | ||
1224 | } | 1209 | } |
1225 | 1210 | ||
1226 | /* this object spans two pages */ | 1211 | pool->flags = flags; |
1227 | pages[0] = page; | ||
1228 | pages[1] = get_next_page(page); | ||
1229 | BUG_ON(!pages[1]); | ||
1230 | 1212 | ||
1231 | return __zs_map_object(area, pages, off, class->size); | 1213 | return pool; |
1214 | |||
1215 | err: | ||
1216 | zs_destroy_pool(pool); | ||
1217 | return NULL; | ||
1232 | } | 1218 | } |
1233 | EXPORT_SYMBOL_GPL(zs_map_object); | 1219 | EXPORT_SYMBOL_GPL(zs_create_pool); |
1234 | 1220 | ||
1235 | void zs_unmap_object(struct zs_pool *pool, unsigned long handle) | 1221 | void zs_destroy_pool(struct zs_pool *pool) |
1236 | { | 1222 | { |
1237 | struct page *page; | 1223 | int i; |
1238 | unsigned long obj_idx, off; | ||
1239 | 1224 | ||
1240 | unsigned int class_idx; | 1225 | for (i = 0; i < zs_size_classes; i++) { |
1241 | enum fullness_group fg; | 1226 | int fg; |
1242 | struct size_class *class; | 1227 | struct size_class *class = pool->size_class[i]; |
1243 | struct mapping_area *area; | ||
1244 | 1228 | ||
1245 | BUG_ON(!handle); | 1229 | if (!class) |
1230 | continue; | ||
1246 | 1231 | ||
1247 | obj_handle_to_location(handle, &page, &obj_idx); | 1232 | if (class->index != i) |
1248 | get_zspage_mapping(get_first_page(page), &class_idx, &fg); | 1233 | continue; |
1249 | class = pool->size_class[class_idx]; | ||
1250 | off = obj_idx_to_offset(page, obj_idx, class->size); | ||
1251 | 1234 | ||
1252 | area = this_cpu_ptr(&zs_map_area); | 1235 | for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) { |
1253 | if (off + class->size <= PAGE_SIZE) | 1236 | if (class->fullness_list[fg]) { |
1254 | kunmap_atomic(area->vm_addr); | 1237 | pr_info("Freeing non-empty class with size %db, fullness group %d\n", |
1255 | else { | 1238 | class->size, fg); |
1256 | struct page *pages[2]; | 1239 | } |
1240 | } | ||
1241 | kfree(class); | ||
1242 | } | ||
1257 | 1243 | ||
1258 | pages[0] = page; | 1244 | kfree(pool->size_class); |
1259 | pages[1] = get_next_page(page); | 1245 | kfree(pool); |
1260 | BUG_ON(!pages[1]); | 1246 | } |
1247 | EXPORT_SYMBOL_GPL(zs_destroy_pool); | ||
1261 | 1248 | ||
1262 | __zs_unmap_object(area, pages, off, class->size); | 1249 | static int __init zs_init(void) |
1250 | { | ||
1251 | int ret = zs_register_cpu_notifier(); | ||
1252 | |||
1253 | if (ret) { | ||
1254 | zs_unregister_cpu_notifier(); | ||
1255 | return ret; | ||
1263 | } | 1256 | } |
1264 | put_cpu_var(zs_map_area); | 1257 | |
1258 | init_zs_size_classes(); | ||
1259 | |||
1260 | #ifdef CONFIG_ZPOOL | ||
1261 | zpool_register_driver(&zs_zpool_driver); | ||
1262 | #endif | ||
1263 | return 0; | ||
1265 | } | 1264 | } |
1266 | EXPORT_SYMBOL_GPL(zs_unmap_object); | ||
1267 | 1265 | ||
1268 | unsigned long zs_get_total_pages(struct zs_pool *pool) | 1266 | static void __exit zs_exit(void) |
1269 | { | 1267 | { |
1270 | return atomic_long_read(&pool->pages_allocated); | 1268 | #ifdef CONFIG_ZPOOL |
1269 | zpool_unregister_driver(&zs_zpool_driver); | ||
1270 | #endif | ||
1271 | zs_unregister_cpu_notifier(); | ||
1271 | } | 1272 | } |
1272 | EXPORT_SYMBOL_GPL(zs_get_total_pages); | ||
1273 | 1273 | ||
1274 | module_init(zs_init); | 1274 | module_init(zs_init); |
1275 | module_exit(zs_exit); | 1275 | module_exit(zs_exit); |