summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/nvgpu/gk20a/gk20a_allocator.c
diff options
context:
space:
mode:
authorTerje Bergstrom <tbergstrom@nvidia.com>2014-06-27 07:49:36 -0400
committerDan Willemsen <dwillemsen@nvidia.com>2015-03-18 15:10:40 -0400
commit55295c6087ed975be12e92f9be799269aef94678 (patch)
tree1a900a9ba03412a1df413ede9e682aac796e70f7 /drivers/gpu/nvgpu/gk20a/gk20a_allocator.c
parent7812a11903a9c0906370dad00fc092539e2219b9 (diff)
gpu: nvgpu: Remove unused code in allocator
Remove functions that are not used in gk20a allocator. Bug 1523403 Change-Id: I36b2b236258d61602cb3283b59c43b40f237d514 Signed-off-by: Terje Bergstrom <tbergstrom@nvidia.com> Reviewed-on: http://git-master/r/432174
Diffstat (limited to 'drivers/gpu/nvgpu/gk20a/gk20a_allocator.c')
-rw-r--r--drivers/gpu/nvgpu/gk20a/gk20a_allocator.c431
1 files changed, 0 insertions, 431 deletions
diff --git a/drivers/gpu/nvgpu/gk20a/gk20a_allocator.c b/drivers/gpu/nvgpu/gk20a/gk20a_allocator.c
index d836dbcf..8ad3c63f 100644
--- a/drivers/gpu/nvgpu/gk20a/gk20a_allocator.c
+++ b/drivers/gpu/nvgpu/gk20a/gk20a_allocator.c
@@ -51,14 +51,11 @@ static struct gk20a_alloc_block *find_block_prepare(
51 struct gk20a_alloc_block **pprev, struct rb_node ***rb_link, 51 struct gk20a_alloc_block **pprev, struct rb_node ***rb_link,
52 struct rb_node **rb_parent); 52 struct rb_node **rb_parent);
53 53
54static u32 check_free_space(u32 addr, u32 limit, u32 len, u32 align);
55static void update_free_addr_cache(struct gk20a_allocator *allocator, 54static void update_free_addr_cache(struct gk20a_allocator *allocator,
56 struct gk20a_alloc_block *block, 55 struct gk20a_alloc_block *block,
57 u32 addr, u32 len, bool free); 56 u32 addr, u32 len, bool free);
58static int find_free_area(struct gk20a_allocator *allocator, 57static int find_free_area(struct gk20a_allocator *allocator,
59 u32 *addr, u32 len); 58 u32 *addr, u32 len);
60static int find_free_area_nc(struct gk20a_allocator *allocator,
61 u32 *addr, u32 *len);
62 59
63static void adjust_block(struct gk20a_alloc_block *block, 60static void adjust_block(struct gk20a_alloc_block *block,
64 u32 start, u32 end, 61 u32 start, u32 end,
@@ -72,13 +69,8 @@ static int split_block(struct gk20a_allocator *allocator,
72 69
73static int block_alloc_single_locked(struct gk20a_allocator *allocator, 70static int block_alloc_single_locked(struct gk20a_allocator *allocator,
74 u32 *addr, u32 len); 71 u32 *addr, u32 len);
75static int block_alloc_list_locked(struct gk20a_allocator *allocator,
76 u32 *addr, u32 len,
77 struct gk20a_alloc_block **pblock);
78static int block_free_locked(struct gk20a_allocator *allocator, 72static int block_free_locked(struct gk20a_allocator *allocator,
79 u32 addr, u32 len); 73 u32 addr, u32 len);
80static void block_free_list_locked(struct gk20a_allocator *allocator,
81 struct gk20a_alloc_block *list);
82 74
83/* link a block into allocator block list */ 75/* link a block into allocator block list */
84static inline void link_block_list(struct gk20a_allocator *allocator, 76static inline void link_block_list(struct gk20a_allocator *allocator,
@@ -337,16 +329,6 @@ find_block_prepare(struct gk20a_allocator *allocator, u32 addr,
337 return block; 329 return block;
338} 330}
339 331
340/* return available space */
341static u32 check_free_space(u32 addr, u32 limit, u32 len, u32 align)
342{
343 if (addr >= limit)
344 return 0;
345 if (addr + len <= limit)
346 return len;
347 return (limit - addr) & ~(align - 1);
348}
349
350/* update first_free_addr/last_free_addr based on new free addr 332/* update first_free_addr/last_free_addr based on new free addr
351 called when free block(s) and allocate block(s) */ 333 called when free block(s) and allocate block(s) */
352static void update_free_addr_cache(struct gk20a_allocator *allocator, 334static void update_free_addr_cache(struct gk20a_allocator *allocator,
@@ -438,74 +420,6 @@ full_search:
438 } 420 }
439} 421}
440 422
441/* find a free address range for as long as it meets alignment or meet len */
442static int find_free_area_nc(struct gk20a_allocator *allocator,
443 u32 *addr, u32 *len)
444{
445 struct gk20a_alloc_block *block;
446 u32 start_addr;
447 u32 avail_len;
448
449 /* fixed addr allocation */
450 if (*addr) {
451 block = find_block(allocator, *addr);
452 if (allocator->limit - *len >= *addr) {
453 if (!block)
454 return 0;
455
456 avail_len = check_free_space(*addr, block->start,
457 *len, allocator->align);
458 if (avail_len != 0) {
459 update_free_addr_cache(allocator, block,
460 *addr, avail_len, false);
461 allocator_dbg(allocator,
462 "free space between %d, %d, len %d",
463 *addr, block->start, avail_len);
464 allocator_dbg(allocator, "next free addr: %d",
465 allocator->last_free_addr);
466 *len = avail_len;
467 return 0;
468 } else
469 return -ENOMEM;
470 } else
471 return -ENOMEM;
472 }
473
474 start_addr = *addr = allocator->first_free_addr;
475
476 allocator_dbg(allocator, "start search addr : %d", start_addr);
477
478 for (block = find_block(allocator, *addr);; block = block->next) {
479 if (allocator->limit - *len < *addr)
480 return -ENOMEM;
481 if (!block) {
482 update_free_addr_cache(allocator, block,
483 *addr, *len, false);
484 allocator_dbg(allocator, "free space from %d, len %d",
485 *addr, *len);
486 allocator_dbg(allocator, "next free addr: %d",
487 allocator->first_free_addr);
488 return 0;
489 }
490
491 avail_len = check_free_space(*addr, block->start,
492 *len, allocator->align);
493 if (avail_len != 0) {
494 update_free_addr_cache(allocator, block,
495 *addr, avail_len, false);
496 allocator_dbg(allocator, "free space between %d, %d, len %d",
497 *addr, block->start, avail_len);
498 allocator_dbg(allocator, "next free addr: %d",
499 allocator->first_free_addr);
500 *len = avail_len;
501 return 0;
502 }
503 if (*addr + allocator->cached_hole_size < block->start)
504 allocator->cached_hole_size = block->start - *addr;
505 *addr = block->end;
506 }
507}
508
509/* expand/shrink a block with new start and new end 423/* expand/shrink a block with new start and new end
510 split_block function provides insert block for shrink */ 424 split_block function provides insert block for shrink */
511static void adjust_block(struct gk20a_alloc_block *block, 425static void adjust_block(struct gk20a_alloc_block *block,
@@ -670,73 +584,6 @@ static int block_alloc_single_locked(struct gk20a_allocator *allocator,
670 return 0; 584 return 0;
671} 585}
672 586
673static int block_alloc_list_locked(struct gk20a_allocator *allocator,
674 u32 *addr_req, u32 nc_len, struct gk20a_alloc_block **pblock)
675{
676 struct gk20a_alloc_block *block;
677 struct gk20a_alloc_block *nc_head = NULL, *nc_prev = NULL;
678 u32 addr = *addr_req, len = nc_len;
679 int err = 0;
680
681 *addr_req = ~0;
682
683 while (nc_len > 0) {
684 err = find_free_area_nc(allocator, &addr, &len);
685 if (err) {
686 allocator_dbg(allocator, "not enough free space");
687 goto clean_up;
688 }
689
690 /* never merge non-contiguous allocation block,
691 just create a new block */
692 block = kmem_cache_zalloc(allocator->block_cache,
693 GFP_KERNEL);
694 if (!block) {
695 err = -ENOMEM;
696 goto clean_up;
697 }
698
699 block->allocator = allocator;
700 block->start = addr;
701 block->end = addr + len;
702
703 insert_block(allocator, block);
704
705 block->nc_prev = nc_prev;
706 if (nc_prev)
707 nc_prev->nc_next = block;
708 nc_prev = block;
709 block->nc_block = true;
710
711 if (!nc_head)
712 nc_head = block;
713
714 if (*addr_req == ~0)
715 *addr_req = addr;
716
717 addr = 0;
718 nc_len -= len;
719 len = nc_len;
720 allocator_dbg(allocator, "remaining length %d", nc_len);
721 }
722
723clean_up:
724 if (err) {
725 while (nc_head) {
726 unlink_block(allocator, nc_head, nc_head->prev);
727 nc_prev = nc_head;
728 nc_head = nc_head->nc_next;
729 kmem_cache_free(allocator->block_cache, nc_prev);
730 }
731 *pblock = NULL;
732 *addr_req = ~0;
733 } else {
734 *pblock = nc_head;
735 }
736
737 return err;
738}
739
740/* called with rw_sema acquired */ 587/* called with rw_sema acquired */
741static int block_free_locked(struct gk20a_allocator *allocator, 588static int block_free_locked(struct gk20a_allocator *allocator,
742 u32 addr, u32 len) 589 u32 addr, u32 len)
@@ -792,52 +639,6 @@ static int block_free_locked(struct gk20a_allocator *allocator,
792 return 0; 639 return 0;
793} 640}
794 641
795/* called with rw_sema acquired */
796static void block_free_list_locked(struct gk20a_allocator *allocator,
797 struct gk20a_alloc_block *list)
798{
799 struct gk20a_alloc_block *block;
800 u32 len;
801
802 update_free_addr_cache(allocator, NULL,
803 list->start, list->end - list->start, true);
804
805 while (list) {
806 block = list;
807 unlink_block(allocator, block, block->prev);
808
809 len = block->end - block->start;
810 if (allocator->cached_hole_size < len)
811 allocator->cached_hole_size = len;
812
813 list = block->nc_next;
814 kmem_cache_free(allocator->block_cache, block);
815 }
816}
817
818static int
819gk20a_allocator_constrain(struct gk20a_allocator *a,
820 bool enable, u32 base, u32 limit)
821{
822 if (enable) {
823 a->constraint.enable = (base >= a->base &&
824 limit <= a->limit);
825 if (!a->constraint.enable)
826 return -EINVAL;
827 a->constraint.base = base;
828 a->constraint.limit = limit;
829 a->first_free_addr = a->last_free_addr = base;
830
831 } else {
832 a->constraint.enable = false;
833 a->first_free_addr = a->last_free_addr = a->base;
834 }
835
836 a->cached_hole_size = 0;
837
838 return 0;
839}
840
841/* init allocator struct */ 642/* init allocator struct */
842int gk20a_allocator_init(struct gk20a_allocator *allocator, 643int gk20a_allocator_init(struct gk20a_allocator *allocator,
843 const char *name, u32 start, u32 len, u32 align) 644 const char *name, u32 start, u32 len, u32 align)
@@ -869,10 +670,7 @@ int gk20a_allocator_init(struct gk20a_allocator *allocator,
869 init_rwsem(&allocator->rw_sema); 670 init_rwsem(&allocator->rw_sema);
870 671
871 allocator->alloc = gk20a_allocator_block_alloc; 672 allocator->alloc = gk20a_allocator_block_alloc;
872 allocator->alloc_nc = gk20a_allocator_block_alloc_nc;
873 allocator->free = gk20a_allocator_block_free; 673 allocator->free = gk20a_allocator_block_free;
874 allocator->free_nc = gk20a_allocator_block_free_nc;
875 allocator->constrain = gk20a_allocator_constrain;
876 674
877 return 0; 675 return 0;
878} 676}
@@ -979,56 +777,6 @@ int gk20a_allocator_block_alloc(struct gk20a_allocator *allocator,
979 return ret; 777 return ret;
980} 778}
981 779
982/*
983 * *addr != ~0 for fixed address allocation. if *addr == 0, base addr is
984 * returned to caller in *addr.
985 *
986 * non-contiguous allocation, which returns a list of blocks with aggregated
987 * size == len. Individual block size must meet alignment requirement.
988 */
989int gk20a_allocator_block_alloc_nc(struct gk20a_allocator *allocator,
990 u32 *addr, u32 len, struct gk20a_alloc_block **pblock)
991{
992 int ret;
993
994 allocator_dbg(allocator, "[in] addr %d, len %d", *addr, len);
995
996 BUG_ON(pblock == NULL);
997 *pblock = NULL;
998
999 if (*addr + len > allocator->limit || /* check addr range */
1000 *addr & (allocator->align - 1) || /* check addr alignment */
1001 len == 0) /* check len */
1002 return -EINVAL;
1003
1004 len = ALIGN(len, allocator->align);
1005 if (!len)
1006 return -ENOMEM;
1007
1008 down_write(&allocator->rw_sema);
1009
1010 ret = block_alloc_list_locked(allocator, addr, len, pblock);
1011
1012#if defined(ALLOCATOR_DEBUG)
1013 if (!ret) {
1014 struct gk20a_alloc_block *block = *pblock;
1015 BUG_ON(!block);
1016 BUG_ON(block->start < allocator->base);
1017 while (block->nc_next) {
1018 BUG_ON(block->end > block->nc_next->start);
1019 block = block->nc_next;
1020 }
1021 BUG_ON(block->end > allocator->limit);
1022 }
1023#endif
1024
1025 up_write(&allocator->rw_sema);
1026
1027 allocator_dbg(allocator, "[out] addr %d, len %d", *addr, len);
1028
1029 return ret;
1030}
1031
1032/* free all blocks between start and end */ 780/* free all blocks between start and end */
1033int gk20a_allocator_block_free(struct gk20a_allocator *allocator, 781int gk20a_allocator_block_free(struct gk20a_allocator *allocator,
1034 u32 addr, u32 len) 782 u32 addr, u32 len)
@@ -1067,182 +815,3 @@ int gk20a_allocator_block_free(struct gk20a_allocator *allocator,
1067 815
1068 return ret; 816 return ret;
1069} 817}
1070
1071/* free non-contiguous allocation block list */
1072void gk20a_allocator_block_free_nc(struct gk20a_allocator *allocator,
1073 struct gk20a_alloc_block *block)
1074{
1075 /* nothing to free */
1076 if (!block)
1077 return;
1078
1079 down_write(&allocator->rw_sema);
1080 block_free_list_locked(allocator, block);
1081 up_write(&allocator->rw_sema);
1082}
1083
1084#if defined(ALLOCATOR_DEBUG)
1085
1086#include <linux/random.h>
1087
1088/* test suite */
1089void gk20a_allocator_test(void)
1090{
1091 struct gk20a_allocator allocator;
1092 struct gk20a_alloc_block *list[5];
1093 u32 addr, len;
1094 u32 count;
1095 int n;
1096
1097 gk20a_allocator_init(&allocator, "test", 0, 10, 1);
1098
1099 /* alloc/free a single block in the beginning */
1100 addr = 0;
1101 gk20a_allocator_block_alloc(&allocator, &addr, 2);
1102 gk20a_allocator_dump(&allocator);
1103 gk20a_allocator_block_free(&allocator, addr, 2);
1104 gk20a_allocator_dump(&allocator);
1105 /* alloc/free a single block in the middle */
1106 addr = 4;
1107 gk20a_allocator_block_alloc(&allocator, &addr, 2);
1108 gk20a_allocator_dump(&allocator);
1109 gk20a_allocator_block_free(&allocator, addr, 2);
1110 gk20a_allocator_dump(&allocator);
1111 /* alloc/free a single block in the end */
1112 addr = 8;
1113 gk20a_allocator_block_alloc(&allocator, &addr, 2);
1114 gk20a_allocator_dump(&allocator);
1115 gk20a_allocator_block_free(&allocator, addr, 2);
1116 gk20a_allocator_dump(&allocator);
1117
1118 /* allocate contiguous blocks */
1119 addr = 0;
1120 gk20a_allocator_block_alloc(&allocator, &addr, 2);
1121 gk20a_allocator_dump(&allocator);
1122 addr = 0;
1123 gk20a_allocator_block_alloc(&allocator, &addr, 4);
1124 gk20a_allocator_dump(&allocator);
1125 addr = 0;
1126 gk20a_allocator_block_alloc(&allocator, &addr, 4);
1127 gk20a_allocator_dump(&allocator);
1128
1129 /* no free space */
1130 addr = 0;
1131 gk20a_allocator_block_alloc(&allocator, &addr, 2);
1132 gk20a_allocator_dump(&allocator);
1133
1134 /* free in the end */
1135 gk20a_allocator_block_free(&allocator, 8, 2);
1136 gk20a_allocator_dump(&allocator);
1137 /* free in the beginning */
1138 gk20a_allocator_block_free(&allocator, 0, 2);
1139 gk20a_allocator_dump(&allocator);
1140 /* free in the middle */
1141 gk20a_allocator_block_free(&allocator, 4, 2);
1142 gk20a_allocator_dump(&allocator);
1143
1144 /* merge case PPPPAAAANNNN */
1145 addr = 4;
1146 gk20a_allocator_block_alloc(&allocator, &addr, 2);
1147 gk20a_allocator_dump(&allocator);
1148 /* merge case ....AAAANNNN */
1149 addr = 0;
1150 gk20a_allocator_block_alloc(&allocator, &addr, 2);
1151 gk20a_allocator_dump(&allocator);
1152 /* merge case PPPPAAAA.... */
1153 addr = 8;
1154 gk20a_allocator_block_alloc(&allocator, &addr, 2);
1155 gk20a_allocator_dump(&allocator);
1156
1157 /* test free across multiple blocks and split */
1158 gk20a_allocator_block_free(&allocator, 2, 2);
1159 gk20a_allocator_dump(&allocator);
1160 gk20a_allocator_block_free(&allocator, 6, 2);
1161 gk20a_allocator_dump(&allocator);
1162 gk20a_allocator_block_free(&allocator, 1, 8);
1163 gk20a_allocator_dump(&allocator);
1164
1165 /* test non-contiguous allocation */
1166 addr = 4;
1167 gk20a_allocator_block_alloc(&allocator, &addr, 2);
1168 gk20a_allocator_dump(&allocator);
1169 addr = 0;
1170 gk20a_allocator_block_alloc_nc(&allocator, &addr, 5, &list[0]);
1171 gk20a_allocator_dump(&allocator);
1172 gk20a_allocator_dump_nc_list(&allocator, list[0]);
1173
1174 /* test free a range overlaping non-contiguous blocks */
1175 gk20a_allocator_block_free(&allocator, 2, 6);
1176 gk20a_allocator_dump(&allocator);
1177
1178 /* test non-contiguous free */
1179 gk20a_allocator_block_free_nc(&allocator, list[0]);
1180 gk20a_allocator_dump(&allocator);
1181
1182 gk20a_allocator_destroy(&allocator);
1183
1184 /* random stress test */
1185 gk20a_allocator_init(&allocator, "test", 4096, 4096 * 1024, 4096);
1186 for (;;) {
1187 pr_debug("alloc tests...\n");
1188 for (count = 0; count < 50; count++) {
1189 addr = 0;
1190 len = random32() % (4096 * 1024 / 16);
1191 gk20a_allocator_block_alloc(&allocator, &addr, len);
1192 gk20a_allocator_dump(&allocator);
1193 }
1194
1195 pr_debug("free tests...\n");
1196 for (count = 0; count < 30; count++) {
1197 addr = (random32() % (4096 * 1024)) & ~(4096 - 1);
1198 len = random32() % (4096 * 1024 / 16);
1199 gk20a_allocator_block_free(&allocator, addr, len);
1200 gk20a_allocator_dump(&allocator);
1201 }
1202
1203 pr_debug("non-contiguous alloc tests...\n");
1204 for (n = 0; n < 5; n++) {
1205 addr = 0;
1206 len = random32() % (4096 * 1024 / 8);
1207 gk20a_allocator_block_alloc_nc(&allocator, &addr,
1208 len, &list[n]);
1209 gk20a_allocator_dump(&allocator);
1210 gk20a_allocator_dump_nc_list(&allocator, list[n]);
1211 }
1212
1213 pr_debug("free tests...\n");
1214 for (count = 0; count < 10; count++) {
1215 addr = (random32() % (4096 * 1024)) & ~(4096 - 1);
1216 len = random32() % (4096 * 1024 / 16);
1217 gk20a_allocator_block_free(&allocator, addr, len);
1218 gk20a_allocator_dump(&allocator);
1219 }
1220
1221 pr_debug("non-contiguous free tests...\n");
1222 for (n = 4; n >= 0; n--) {
1223 gk20a_allocator_dump_nc_list(&allocator, list[n]);
1224 gk20a_allocator_block_free_nc(&allocator, list[n]);
1225 gk20a_allocator_dump(&allocator);
1226 }
1227
1228 pr_debug("fixed addr alloc tests...\n");
1229 for (count = 0; count < 10; count++) {
1230 addr = (random32() % (4096 * 1024)) & ~(4096 - 1);
1231 len = random32() % (4096 * 1024 / 32);
1232 gk20a_allocator_block_alloc(&allocator, &addr, len);
1233 gk20a_allocator_dump(&allocator);
1234 }
1235
1236 pr_debug("free tests...\n");
1237 for (count = 0; count < 10; count++) {
1238 addr = (random32() % (4096 * 1024)) & ~(4096 - 1);
1239 len = random32() % (4096 * 1024 / 16);
1240 gk20a_allocator_block_free(&allocator, addr, len);
1241 gk20a_allocator_dump(&allocator);
1242 }
1243 }
1244 gk20a_allocator_destroy(&allocator);
1245}
1246
1247#endif /* ALLOCATOR_DEBUG */
1248