diff options
author | Jean Delvare <jdelvare@suse.de> | 2007-04-26 03:44:22 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2007-04-26 03:44:22 -0400 |
commit | eefa3906283a2b60a6d02a2cda593a7d7d7946c5 (patch) | |
tree | a4e1f3b8dca04b8dff3cd99dc43f771f798558fb /net/core/skbuff.c | |
parent | 28d8909bc790d936ce33f4402adf7577533bbd4b (diff) |
[NET]: Clean up sk_buff walkers.
I noticed recently that, in skb_checksum(), "offset" and "start" are
essentially the same thing and have the same value throughout the
function, despite being computed differently. Using a single variable
allows some cleanups and makes the skb_checksum() function smaller,
more readable, and presumably marginally faster.
We appear to have many other "sk_buff walker" functions built on the
exact same model, so the cleanup applies to them, too. Here is a list
of the functions I found to be affected:
net/appletalk/ddp.c:atalk_sum_skb()
net/core/datagram.c:skb_copy_datagram_iovec()
net/core/datagram.c:skb_copy_and_csum_datagram()
net/core/skbuff.c:skb_copy_bits()
net/core/skbuff.c:skb_store_bits()
net/core/skbuff.c:skb_checksum()
net/core/skbuff.c:skb_copy_and_csum_bit()
net/core/user_dma.c:dma_skb_copy_datagram_iovec()
net/xfrm/xfrm_algo.c:skb_icv_walk()
net/xfrm/xfrm_algo.c:skb_to_sgvec()
OTOH, I admit I'm a bit surprised, the cleanup is rather obvious so I'm
really wondering if I am missing something. Can anyone please comment
on this?
Signed-off-by: Jean Delvare <jdelvare@suse.de>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core/skbuff.c')
-rw-r--r-- | net/core/skbuff.c | 122 |
1 files changed, 43 insertions, 79 deletions
diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 142257307fa2..32f087b5233e 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c | |||
@@ -1045,13 +1045,13 @@ pull_pages: | |||
1045 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | 1045 | int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) |
1046 | { | 1046 | { |
1047 | int i, copy; | 1047 | int i, copy; |
1048 | int start = skb_headlen(skb); | 1048 | int end = skb_headlen(skb); |
1049 | 1049 | ||
1050 | if (offset > (int)skb->len - len) | 1050 | if (offset > (int)skb->len - len) |
1051 | goto fault; | 1051 | goto fault; |
1052 | 1052 | ||
1053 | /* Copy header. */ | 1053 | /* Copy header. */ |
1054 | if ((copy = start - offset) > 0) { | 1054 | if ((copy = end - offset) > 0) { |
1055 | if (copy > len) | 1055 | if (copy > len) |
1056 | copy = len; | 1056 | copy = len; |
1057 | skb_copy_from_linear_data_offset(skb, offset, to, copy); | 1057 | skb_copy_from_linear_data_offset(skb, offset, to, copy); |
@@ -1062,11 +1062,9 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | |||
1062 | } | 1062 | } |
1063 | 1063 | ||
1064 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 1064 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1065 | int end; | 1065 | BUG_TRAP(len >= 0); |
1066 | 1066 | ||
1067 | BUG_TRAP(start <= offset + len); | 1067 | end = offset + skb_shinfo(skb)->frags[i].size; |
1068 | |||
1069 | end = start + skb_shinfo(skb)->frags[i].size; | ||
1070 | if ((copy = end - offset) > 0) { | 1068 | if ((copy = end - offset) > 0) { |
1071 | u8 *vaddr; | 1069 | u8 *vaddr; |
1072 | 1070 | ||
@@ -1075,8 +1073,8 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | |||
1075 | 1073 | ||
1076 | vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); | 1074 | vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]); |
1077 | memcpy(to, | 1075 | memcpy(to, |
1078 | vaddr + skb_shinfo(skb)->frags[i].page_offset+ | 1076 | vaddr + skb_shinfo(skb)->frags[i].page_offset, |
1079 | offset - start, copy); | 1077 | copy); |
1080 | kunmap_skb_frag(vaddr); | 1078 | kunmap_skb_frag(vaddr); |
1081 | 1079 | ||
1082 | if ((len -= copy) == 0) | 1080 | if ((len -= copy) == 0) |
@@ -1084,30 +1082,25 @@ int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len) | |||
1084 | offset += copy; | 1082 | offset += copy; |
1085 | to += copy; | 1083 | to += copy; |
1086 | } | 1084 | } |
1087 | start = end; | ||
1088 | } | 1085 | } |
1089 | 1086 | ||
1090 | if (skb_shinfo(skb)->frag_list) { | 1087 | if (skb_shinfo(skb)->frag_list) { |
1091 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1088 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
1092 | 1089 | ||
1093 | for (; list; list = list->next) { | 1090 | for (; list; list = list->next) { |
1094 | int end; | 1091 | BUG_TRAP(len >= 0); |
1095 | |||
1096 | BUG_TRAP(start <= offset + len); | ||
1097 | 1092 | ||
1098 | end = start + list->len; | 1093 | end = offset + list->len; |
1099 | if ((copy = end - offset) > 0) { | 1094 | if ((copy = end - offset) > 0) { |
1100 | if (copy > len) | 1095 | if (copy > len) |
1101 | copy = len; | 1096 | copy = len; |
1102 | if (skb_copy_bits(list, offset - start, | 1097 | if (skb_copy_bits(list, 0, to, copy)) |
1103 | to, copy)) | ||
1104 | goto fault; | 1098 | goto fault; |
1105 | if ((len -= copy) == 0) | 1099 | if ((len -= copy) == 0) |
1106 | return 0; | 1100 | return 0; |
1107 | offset += copy; | 1101 | offset += copy; |
1108 | to += copy; | 1102 | to += copy; |
1109 | } | 1103 | } |
1110 | start = end; | ||
1111 | } | 1104 | } |
1112 | } | 1105 | } |
1113 | if (!len) | 1106 | if (!len) |
@@ -1132,12 +1125,12 @@ fault: | |||
1132 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) | 1125 | int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) |
1133 | { | 1126 | { |
1134 | int i, copy; | 1127 | int i, copy; |
1135 | int start = skb_headlen(skb); | 1128 | int end = skb_headlen(skb); |
1136 | 1129 | ||
1137 | if (offset > (int)skb->len - len) | 1130 | if (offset > (int)skb->len - len) |
1138 | goto fault; | 1131 | goto fault; |
1139 | 1132 | ||
1140 | if ((copy = start - offset) > 0) { | 1133 | if ((copy = end - offset) > 0) { |
1141 | if (copy > len) | 1134 | if (copy > len) |
1142 | copy = len; | 1135 | copy = len; |
1143 | skb_copy_to_linear_data_offset(skb, offset, from, copy); | 1136 | skb_copy_to_linear_data_offset(skb, offset, from, copy); |
@@ -1149,11 +1142,9 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) | |||
1149 | 1142 | ||
1150 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 1143 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1151 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 1144 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
1152 | int end; | 1145 | BUG_TRAP(len >= 0); |
1153 | |||
1154 | BUG_TRAP(start <= offset + len); | ||
1155 | 1146 | ||
1156 | end = start + frag->size; | 1147 | end = offset + frag->size; |
1157 | if ((copy = end - offset) > 0) { | 1148 | if ((copy = end - offset) > 0) { |
1158 | u8 *vaddr; | 1149 | u8 *vaddr; |
1159 | 1150 | ||
@@ -1161,8 +1152,7 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) | |||
1161 | copy = len; | 1152 | copy = len; |
1162 | 1153 | ||
1163 | vaddr = kmap_skb_frag(frag); | 1154 | vaddr = kmap_skb_frag(frag); |
1164 | memcpy(vaddr + frag->page_offset + offset - start, | 1155 | memcpy(vaddr + frag->page_offset, from, copy); |
1165 | from, copy); | ||
1166 | kunmap_skb_frag(vaddr); | 1156 | kunmap_skb_frag(vaddr); |
1167 | 1157 | ||
1168 | if ((len -= copy) == 0) | 1158 | if ((len -= copy) == 0) |
@@ -1170,30 +1160,25 @@ int skb_store_bits(struct sk_buff *skb, int offset, const void *from, int len) | |||
1170 | offset += copy; | 1160 | offset += copy; |
1171 | from += copy; | 1161 | from += copy; |
1172 | } | 1162 | } |
1173 | start = end; | ||
1174 | } | 1163 | } |
1175 | 1164 | ||
1176 | if (skb_shinfo(skb)->frag_list) { | 1165 | if (skb_shinfo(skb)->frag_list) { |
1177 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1166 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
1178 | 1167 | ||
1179 | for (; list; list = list->next) { | 1168 | for (; list; list = list->next) { |
1180 | int end; | 1169 | BUG_TRAP(len >= 0); |
1181 | 1170 | ||
1182 | BUG_TRAP(start <= offset + len); | 1171 | end = offset + list->len; |
1183 | |||
1184 | end = start + list->len; | ||
1185 | if ((copy = end - offset) > 0) { | 1172 | if ((copy = end - offset) > 0) { |
1186 | if (copy > len) | 1173 | if (copy > len) |
1187 | copy = len; | 1174 | copy = len; |
1188 | if (skb_store_bits(list, offset - start, | 1175 | if (skb_store_bits(list, 0, from, copy)) |
1189 | from, copy)) | ||
1190 | goto fault; | 1176 | goto fault; |
1191 | if ((len -= copy) == 0) | 1177 | if ((len -= copy) == 0) |
1192 | return 0; | 1178 | return 0; |
1193 | offset += copy; | 1179 | offset += copy; |
1194 | from += copy; | 1180 | from += copy; |
1195 | } | 1181 | } |
1196 | start = end; | ||
1197 | } | 1182 | } |
1198 | } | 1183 | } |
1199 | if (!len) | 1184 | if (!len) |
@@ -1210,8 +1195,8 @@ EXPORT_SYMBOL(skb_store_bits); | |||
1210 | __wsum skb_checksum(const struct sk_buff *skb, int offset, | 1195 | __wsum skb_checksum(const struct sk_buff *skb, int offset, |
1211 | int len, __wsum csum) | 1196 | int len, __wsum csum) |
1212 | { | 1197 | { |
1213 | int start = skb_headlen(skb); | 1198 | int end = skb_headlen(skb); |
1214 | int i, copy = start - offset; | 1199 | int i, copy = end - offset; |
1215 | int pos = 0; | 1200 | int pos = 0; |
1216 | 1201 | ||
1217 | /* Checksum header. */ | 1202 | /* Checksum header. */ |
@@ -1226,11 +1211,9 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, | |||
1226 | } | 1211 | } |
1227 | 1212 | ||
1228 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 1213 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1229 | int end; | 1214 | BUG_TRAP(len >= 0); |
1230 | |||
1231 | BUG_TRAP(start <= offset + len); | ||
1232 | 1215 | ||
1233 | end = start + skb_shinfo(skb)->frags[i].size; | 1216 | end = offset + skb_shinfo(skb)->frags[i].size; |
1234 | if ((copy = end - offset) > 0) { | 1217 | if ((copy = end - offset) > 0) { |
1235 | __wsum csum2; | 1218 | __wsum csum2; |
1236 | u8 *vaddr; | 1219 | u8 *vaddr; |
@@ -1239,8 +1222,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, | |||
1239 | if (copy > len) | 1222 | if (copy > len) |
1240 | copy = len; | 1223 | copy = len; |
1241 | vaddr = kmap_skb_frag(frag); | 1224 | vaddr = kmap_skb_frag(frag); |
1242 | csum2 = csum_partial(vaddr + frag->page_offset + | 1225 | csum2 = csum_partial(vaddr + frag->page_offset, |
1243 | offset - start, copy, 0); | 1226 | copy, 0); |
1244 | kunmap_skb_frag(vaddr); | 1227 | kunmap_skb_frag(vaddr); |
1245 | csum = csum_block_add(csum, csum2, pos); | 1228 | csum = csum_block_add(csum, csum2, pos); |
1246 | if (!(len -= copy)) | 1229 | if (!(len -= copy)) |
@@ -1248,31 +1231,26 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, | |||
1248 | offset += copy; | 1231 | offset += copy; |
1249 | pos += copy; | 1232 | pos += copy; |
1250 | } | 1233 | } |
1251 | start = end; | ||
1252 | } | 1234 | } |
1253 | 1235 | ||
1254 | if (skb_shinfo(skb)->frag_list) { | 1236 | if (skb_shinfo(skb)->frag_list) { |
1255 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 1237 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
1256 | 1238 | ||
1257 | for (; list; list = list->next) { | 1239 | for (; list; list = list->next) { |
1258 | int end; | 1240 | BUG_TRAP(len >= 0); |
1259 | 1241 | ||
1260 | BUG_TRAP(start <= offset + len); | 1242 | end = offset + list->len; |
1261 | |||
1262 | end = start + list->len; | ||
1263 | if ((copy = end - offset) > 0) { | 1243 | if ((copy = end - offset) > 0) { |
1264 | __wsum csum2; | 1244 | __wsum csum2; |
1265 | if (copy > len) | 1245 | if (copy > len) |
1266 | copy = len; | 1246 | copy = len; |
1267 | csum2 = skb_checksum(list, offset - start, | 1247 | csum2 = skb_checksum(list, 0, copy, 0); |
1268 | copy, 0); | ||
1269 | csum = csum_block_add(csum, csum2, pos); | 1248 | csum = csum_block_add(csum, csum2, pos); |
1270 | if ((len -= copy) == 0) | 1249 | if ((len -= copy) == 0) |
1271 | return csum; | 1250 | return csum; |
1272 | offset += copy; | 1251 | offset += copy; |
1273 | pos += copy; | 1252 | pos += copy; |
1274 | } | 1253 | } |
1275 | start = end; | ||
1276 | } | 1254 | } |
1277 | } | 1255 | } |
1278 | BUG_ON(len); | 1256 | BUG_ON(len); |
@@ -1285,8 +1263,8 @@ __wsum skb_checksum(const struct sk_buff *skb, int offset, | |||
1285 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | 1263 | __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, |
1286 | u8 *to, int len, __wsum csum) | 1264 | u8 *to, int len, __wsum csum) |
1287 | { | 1265 | { |
1288 | int start = skb_headlen(skb); | 1266 | int end = skb_headlen(skb); |
1289 | int i, copy = start - offset; | 1267 | int i, copy = end - offset; |
1290 | int pos = 0; | 1268 | int pos = 0; |
1291 | 1269 | ||
1292 | /* Copy header. */ | 1270 | /* Copy header. */ |
@@ -1303,11 +1281,9 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1303 | } | 1281 | } |
1304 | 1282 | ||
1305 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 1283 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
1306 | int end; | 1284 | BUG_TRAP(len >= 0); |
1307 | |||
1308 | BUG_TRAP(start <= offset + len); | ||
1309 | 1285 | ||
1310 | end = start + skb_shinfo(skb)->frags[i].size; | 1286 | end = offset + skb_shinfo(skb)->frags[i].size; |
1311 | if ((copy = end - offset) > 0) { | 1287 | if ((copy = end - offset) > 0) { |
1312 | __wsum csum2; | 1288 | __wsum csum2; |
1313 | u8 *vaddr; | 1289 | u8 *vaddr; |
@@ -1317,9 +1293,8 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1317 | copy = len; | 1293 | copy = len; |
1318 | vaddr = kmap_skb_frag(frag); | 1294 | vaddr = kmap_skb_frag(frag); |
1319 | csum2 = csum_partial_copy_nocheck(vaddr + | 1295 | csum2 = csum_partial_copy_nocheck(vaddr + |
1320 | frag->page_offset + | 1296 | frag->page_offset, |
1321 | offset - start, to, | 1297 | to, copy, 0); |
1322 | copy, 0); | ||
1323 | kunmap_skb_frag(vaddr); | 1298 | kunmap_skb_frag(vaddr); |
1324 | csum = csum_block_add(csum, csum2, pos); | 1299 | csum = csum_block_add(csum, csum2, pos); |
1325 | if (!(len -= copy)) | 1300 | if (!(len -= copy)) |
@@ -1328,7 +1303,6 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1328 | to += copy; | 1303 | to += copy; |
1329 | pos += copy; | 1304 | pos += copy; |
1330 | } | 1305 | } |
1331 | start = end; | ||
1332 | } | 1306 | } |
1333 | 1307 | ||
1334 | if (skb_shinfo(skb)->frag_list) { | 1308 | if (skb_shinfo(skb)->frag_list) { |
@@ -1336,16 +1310,13 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1336 | 1310 | ||
1337 | for (; list; list = list->next) { | 1311 | for (; list; list = list->next) { |
1338 | __wsum csum2; | 1312 | __wsum csum2; |
1339 | int end; | 1313 | BUG_TRAP(len >= 0); |
1340 | |||
1341 | BUG_TRAP(start <= offset + len); | ||
1342 | 1314 | ||
1343 | end = start + list->len; | 1315 | end = offset + list->len; |
1344 | if ((copy = end - offset) > 0) { | 1316 | if ((copy = end - offset) > 0) { |
1345 | if (copy > len) | 1317 | if (copy > len) |
1346 | copy = len; | 1318 | copy = len; |
1347 | csum2 = skb_copy_and_csum_bits(list, | 1319 | csum2 = skb_copy_and_csum_bits(list, 0, |
1348 | offset - start, | ||
1349 | to, copy, 0); | 1320 | to, copy, 0); |
1350 | csum = csum_block_add(csum, csum2, pos); | 1321 | csum = csum_block_add(csum, csum2, pos); |
1351 | if ((len -= copy) == 0) | 1322 | if ((len -= copy) == 0) |
@@ -1354,7 +1325,6 @@ __wsum skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, | |||
1354 | to += copy; | 1325 | to += copy; |
1355 | pos += copy; | 1326 | pos += copy; |
1356 | } | 1327 | } |
1357 | start = end; | ||
1358 | } | 1328 | } |
1359 | } | 1329 | } |
1360 | BUG_ON(len); | 1330 | BUG_ON(len); |
@@ -2026,8 +1996,8 @@ void __init skb_init(void) | |||
2026 | int | 1996 | int |
2027 | skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | 1997 | skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) |
2028 | { | 1998 | { |
2029 | int start = skb_headlen(skb); | 1999 | int end = skb_headlen(skb); |
2030 | int i, copy = start - offset; | 2000 | int i, copy = end - offset; |
2031 | int elt = 0; | 2001 | int elt = 0; |
2032 | 2002 | ||
2033 | if (copy > 0) { | 2003 | if (copy > 0) { |
@@ -2043,45 +2013,39 @@ skb_to_sgvec(struct sk_buff *skb, struct scatterlist *sg, int offset, int len) | |||
2043 | } | 2013 | } |
2044 | 2014 | ||
2045 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { | 2015 | for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { |
2046 | int end; | 2016 | BUG_TRAP(len >= 0); |
2047 | 2017 | ||
2048 | BUG_TRAP(start <= offset + len); | 2018 | end = offset + skb_shinfo(skb)->frags[i].size; |
2049 | |||
2050 | end = start + skb_shinfo(skb)->frags[i].size; | ||
2051 | if ((copy = end - offset) > 0) { | 2019 | if ((copy = end - offset) > 0) { |
2052 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | 2020 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; |
2053 | 2021 | ||
2054 | if (copy > len) | 2022 | if (copy > len) |
2055 | copy = len; | 2023 | copy = len; |
2056 | sg[elt].page = frag->page; | 2024 | sg[elt].page = frag->page; |
2057 | sg[elt].offset = frag->page_offset+offset-start; | 2025 | sg[elt].offset = frag->page_offset; |
2058 | sg[elt].length = copy; | 2026 | sg[elt].length = copy; |
2059 | elt++; | 2027 | elt++; |
2060 | if (!(len -= copy)) | 2028 | if (!(len -= copy)) |
2061 | return elt; | 2029 | return elt; |
2062 | offset += copy; | 2030 | offset += copy; |
2063 | } | 2031 | } |
2064 | start = end; | ||
2065 | } | 2032 | } |
2066 | 2033 | ||
2067 | if (skb_shinfo(skb)->frag_list) { | 2034 | if (skb_shinfo(skb)->frag_list) { |
2068 | struct sk_buff *list = skb_shinfo(skb)->frag_list; | 2035 | struct sk_buff *list = skb_shinfo(skb)->frag_list; |
2069 | 2036 | ||
2070 | for (; list; list = list->next) { | 2037 | for (; list; list = list->next) { |
2071 | int end; | 2038 | BUG_TRAP(len >= 0); |
2072 | |||
2073 | BUG_TRAP(start <= offset + len); | ||
2074 | 2039 | ||
2075 | end = start + list->len; | 2040 | end = offset + list->len; |
2076 | if ((copy = end - offset) > 0) { | 2041 | if ((copy = end - offset) > 0) { |
2077 | if (copy > len) | 2042 | if (copy > len) |
2078 | copy = len; | 2043 | copy = len; |
2079 | elt += skb_to_sgvec(list, sg+elt, offset - start, copy); | 2044 | elt += skb_to_sgvec(list, sg+elt, 0, copy); |
2080 | if ((len -= copy) == 0) | 2045 | if ((len -= copy) == 0) |
2081 | return elt; | 2046 | return elt; |
2082 | offset += copy; | 2047 | offset += copy; |
2083 | } | 2048 | } |
2084 | start = end; | ||
2085 | } | 2049 | } |
2086 | } | 2050 | } |
2087 | BUG_ON(len); | 2051 | BUG_ON(len); |