diff options
-rw-r--r-- | include/net/ip_vs.h | 14 | ||||
-rw-r--r-- | net/netfilter/ipvs/ip_vs_lblc.c | 13 | ||||
-rw-r--r-- | net/netfilter/ipvs/ip_vs_lblcr.c | 25 | ||||
-rw-r--r-- | net/netfilter/ipvs/ip_vs_lc.c | 18 | ||||
-rw-r--r-- | net/netfilter/ipvs/ip_vs_wlc.c | 20 |
5 files changed, 27 insertions, 63 deletions
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 17b01b2d48f9..e74da41ebd1b 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h | |||
@@ -1243,6 +1243,20 @@ static inline void ip_vs_conn_drop_conntrack(struct ip_vs_conn *cp) | |||
1243 | /* CONFIG_IP_VS_NFCT */ | 1243 | /* CONFIG_IP_VS_NFCT */ |
1244 | #endif | 1244 | #endif |
1245 | 1245 | ||
1246 | static inline unsigned int | ||
1247 | ip_vs_dest_conn_overhead(struct ip_vs_dest *dest) | ||
1248 | { | ||
1249 | /* | ||
1250 | * We think the overhead of processing active connections is 256 | ||
1251 | * times higher than that of inactive connections in average. (This | ||
1252 | * 256 times might not be accurate, we will change it later) We | ||
1253 | * use the following formula to estimate the overhead now: | ||
1254 | * dest->activeconns*256 + dest->inactconns | ||
1255 | */ | ||
1256 | return (atomic_read(&dest->activeconns) << 8) + | ||
1257 | atomic_read(&dest->inactconns); | ||
1258 | } | ||
1259 | |||
1246 | #endif /* __KERNEL__ */ | 1260 | #endif /* __KERNEL__ */ |
1247 | 1261 | ||
1248 | #endif /* _NET_IP_VS_H */ | 1262 | #endif /* _NET_IP_VS_H */ |
diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c index 4a9c8cd19690..6bf7a807649c 100644 --- a/net/netfilter/ipvs/ip_vs_lblc.c +++ b/net/netfilter/ipvs/ip_vs_lblc.c | |||
@@ -389,12 +389,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc) | |||
389 | int loh, doh; | 389 | int loh, doh; |
390 | 390 | ||
391 | /* | 391 | /* |
392 | * We think the overhead of processing active connections is fifty | 392 | * We use the following formula to estimate the load: |
393 | * times higher than that of inactive connections in average. (This | ||
394 | * fifty times might not be accurate, we will change it later.) We | ||
395 | * use the following formula to estimate the overhead: | ||
396 | * dest->activeconns*50 + dest->inactconns | ||
397 | * and the load: | ||
398 | * (dest overhead) / dest->weight | 393 | * (dest overhead) / dest->weight |
399 | * | 394 | * |
400 | * Remember -- no floats in kernel mode!!! | 395 | * Remember -- no floats in kernel mode!!! |
@@ -410,8 +405,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc) | |||
410 | continue; | 405 | continue; |
411 | if (atomic_read(&dest->weight) > 0) { | 406 | if (atomic_read(&dest->weight) > 0) { |
412 | least = dest; | 407 | least = dest; |
413 | loh = atomic_read(&least->activeconns) * 50 | 408 | loh = ip_vs_dest_conn_overhead(least); |
414 | + atomic_read(&least->inactconns); | ||
415 | goto nextstage; | 409 | goto nextstage; |
416 | } | 410 | } |
417 | } | 411 | } |
@@ -425,8 +419,7 @@ __ip_vs_lblc_schedule(struct ip_vs_service *svc) | |||
425 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) | 419 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) |
426 | continue; | 420 | continue; |
427 | 421 | ||
428 | doh = atomic_read(&dest->activeconns) * 50 | 422 | doh = ip_vs_dest_conn_overhead(dest); |
429 | + atomic_read(&dest->inactconns); | ||
430 | if (loh * atomic_read(&dest->weight) > | 423 | if (loh * atomic_read(&dest->weight) > |
431 | doh * atomic_read(&least->weight)) { | 424 | doh * atomic_read(&least->weight)) { |
432 | least = dest; | 425 | least = dest; |
diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c index bd329b1e9589..00631765b92a 100644 --- a/net/netfilter/ipvs/ip_vs_lblcr.c +++ b/net/netfilter/ipvs/ip_vs_lblcr.c | |||
@@ -178,8 +178,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) | |||
178 | 178 | ||
179 | if ((atomic_read(&least->weight) > 0) | 179 | if ((atomic_read(&least->weight) > 0) |
180 | && (least->flags & IP_VS_DEST_F_AVAILABLE)) { | 180 | && (least->flags & IP_VS_DEST_F_AVAILABLE)) { |
181 | loh = atomic_read(&least->activeconns) * 50 | 181 | loh = ip_vs_dest_conn_overhead(least); |
182 | + atomic_read(&least->inactconns); | ||
183 | goto nextstage; | 182 | goto nextstage; |
184 | } | 183 | } |
185 | } | 184 | } |
@@ -192,8 +191,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set) | |||
192 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) | 191 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) |
193 | continue; | 192 | continue; |
194 | 193 | ||
195 | doh = atomic_read(&dest->activeconns) * 50 | 194 | doh = ip_vs_dest_conn_overhead(dest); |
196 | + atomic_read(&dest->inactconns); | ||
197 | if ((loh * atomic_read(&dest->weight) > | 195 | if ((loh * atomic_read(&dest->weight) > |
198 | doh * atomic_read(&least->weight)) | 196 | doh * atomic_read(&least->weight)) |
199 | && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { | 197 | && (dest->flags & IP_VS_DEST_F_AVAILABLE)) { |
@@ -228,8 +226,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) | |||
228 | list_for_each_entry(e, &set->list, list) { | 226 | list_for_each_entry(e, &set->list, list) { |
229 | most = e->dest; | 227 | most = e->dest; |
230 | if (atomic_read(&most->weight) > 0) { | 228 | if (atomic_read(&most->weight) > 0) { |
231 | moh = atomic_read(&most->activeconns) * 50 | 229 | moh = ip_vs_dest_conn_overhead(most); |
232 | + atomic_read(&most->inactconns); | ||
233 | goto nextstage; | 230 | goto nextstage; |
234 | } | 231 | } |
235 | } | 232 | } |
@@ -239,8 +236,7 @@ static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set) | |||
239 | nextstage: | 236 | nextstage: |
240 | list_for_each_entry(e, &set->list, list) { | 237 | list_for_each_entry(e, &set->list, list) { |
241 | dest = e->dest; | 238 | dest = e->dest; |
242 | doh = atomic_read(&dest->activeconns) * 50 | 239 | doh = ip_vs_dest_conn_overhead(dest); |
243 | + atomic_read(&dest->inactconns); | ||
244 | /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ | 240 | /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */ |
245 | if ((moh * atomic_read(&dest->weight) < | 241 | if ((moh * atomic_read(&dest->weight) < |
246 | doh * atomic_read(&most->weight)) | 242 | doh * atomic_read(&most->weight)) |
@@ -563,12 +559,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc) | |||
563 | int loh, doh; | 559 | int loh, doh; |
564 | 560 | ||
565 | /* | 561 | /* |
566 | * We think the overhead of processing active connections is fifty | 562 | * We use the following formula to estimate the load: |
567 | * times higher than that of inactive connections in average. (This | ||
568 | * fifty times might not be accurate, we will change it later.) We | ||
569 | * use the following formula to estimate the overhead: | ||
570 | * dest->activeconns*50 + dest->inactconns | ||
571 | * and the load: | ||
572 | * (dest overhead) / dest->weight | 563 | * (dest overhead) / dest->weight |
573 | * | 564 | * |
574 | * Remember -- no floats in kernel mode!!! | 565 | * Remember -- no floats in kernel mode!!! |
@@ -585,8 +576,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc) | |||
585 | 576 | ||
586 | if (atomic_read(&dest->weight) > 0) { | 577 | if (atomic_read(&dest->weight) > 0) { |
587 | least = dest; | 578 | least = dest; |
588 | loh = atomic_read(&least->activeconns) * 50 | 579 | loh = ip_vs_dest_conn_overhead(least); |
589 | + atomic_read(&least->inactconns); | ||
590 | goto nextstage; | 580 | goto nextstage; |
591 | } | 581 | } |
592 | } | 582 | } |
@@ -600,8 +590,7 @@ __ip_vs_lblcr_schedule(struct ip_vs_service *svc) | |||
600 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) | 590 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) |
601 | continue; | 591 | continue; |
602 | 592 | ||
603 | doh = atomic_read(&dest->activeconns) * 50 | 593 | doh = ip_vs_dest_conn_overhead(dest); |
604 | + atomic_read(&dest->inactconns); | ||
605 | if (loh * atomic_read(&dest->weight) > | 594 | if (loh * atomic_read(&dest->weight) > |
606 | doh * atomic_read(&least->weight)) { | 595 | doh * atomic_read(&least->weight)) { |
607 | least = dest; | 596 | least = dest; |
diff --git a/net/netfilter/ipvs/ip_vs_lc.c b/net/netfilter/ipvs/ip_vs_lc.c index 60638007c6c7..f391819c0cca 100644 --- a/net/netfilter/ipvs/ip_vs_lc.c +++ b/net/netfilter/ipvs/ip_vs_lc.c | |||
@@ -22,22 +22,6 @@ | |||
22 | 22 | ||
23 | #include <net/ip_vs.h> | 23 | #include <net/ip_vs.h> |
24 | 24 | ||
25 | |||
26 | static inline unsigned int | ||
27 | ip_vs_lc_dest_overhead(struct ip_vs_dest *dest) | ||
28 | { | ||
29 | /* | ||
30 | * We think the overhead of processing active connections is 256 | ||
31 | * times higher than that of inactive connections in average. (This | ||
32 | * 256 times might not be accurate, we will change it later) We | ||
33 | * use the following formula to estimate the overhead now: | ||
34 | * dest->activeconns*256 + dest->inactconns | ||
35 | */ | ||
36 | return (atomic_read(&dest->activeconns) << 8) + | ||
37 | atomic_read(&dest->inactconns); | ||
38 | } | ||
39 | |||
40 | |||
41 | /* | 25 | /* |
42 | * Least Connection scheduling | 26 | * Least Connection scheduling |
43 | */ | 27 | */ |
@@ -62,7 +46,7 @@ ip_vs_lc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
62 | if ((dest->flags & IP_VS_DEST_F_OVERLOAD) || | 46 | if ((dest->flags & IP_VS_DEST_F_OVERLOAD) || |
63 | atomic_read(&dest->weight) == 0) | 47 | atomic_read(&dest->weight) == 0) |
64 | continue; | 48 | continue; |
65 | doh = ip_vs_lc_dest_overhead(dest); | 49 | doh = ip_vs_dest_conn_overhead(dest); |
66 | if (!least || doh < loh) { | 50 | if (!least || doh < loh) { |
67 | least = dest; | 51 | least = dest; |
68 | loh = doh; | 52 | loh = doh; |
diff --git a/net/netfilter/ipvs/ip_vs_wlc.c b/net/netfilter/ipvs/ip_vs_wlc.c index fdf0f58962a4..bc1bfc48a17f 100644 --- a/net/netfilter/ipvs/ip_vs_wlc.c +++ b/net/netfilter/ipvs/ip_vs_wlc.c | |||
@@ -27,22 +27,6 @@ | |||
27 | 27 | ||
28 | #include <net/ip_vs.h> | 28 | #include <net/ip_vs.h> |
29 | 29 | ||
30 | |||
31 | static inline unsigned int | ||
32 | ip_vs_wlc_dest_overhead(struct ip_vs_dest *dest) | ||
33 | { | ||
34 | /* | ||
35 | * We think the overhead of processing active connections is 256 | ||
36 | * times higher than that of inactive connections in average. (This | ||
37 | * 256 times might not be accurate, we will change it later) We | ||
38 | * use the following formula to estimate the overhead now: | ||
39 | * dest->activeconns*256 + dest->inactconns | ||
40 | */ | ||
41 | return (atomic_read(&dest->activeconns) << 8) + | ||
42 | atomic_read(&dest->inactconns); | ||
43 | } | ||
44 | |||
45 | |||
46 | /* | 30 | /* |
47 | * Weighted Least Connection scheduling | 31 | * Weighted Least Connection scheduling |
48 | */ | 32 | */ |
@@ -71,7 +55,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
71 | if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && | 55 | if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && |
72 | atomic_read(&dest->weight) > 0) { | 56 | atomic_read(&dest->weight) > 0) { |
73 | least = dest; | 57 | least = dest; |
74 | loh = ip_vs_wlc_dest_overhead(least); | 58 | loh = ip_vs_dest_conn_overhead(least); |
75 | goto nextstage; | 59 | goto nextstage; |
76 | } | 60 | } |
77 | } | 61 | } |
@@ -85,7 +69,7 @@ ip_vs_wlc_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | |||
85 | list_for_each_entry_continue(dest, &svc->destinations, n_list) { | 69 | list_for_each_entry_continue(dest, &svc->destinations, n_list) { |
86 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) | 70 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) |
87 | continue; | 71 | continue; |
88 | doh = ip_vs_wlc_dest_overhead(dest); | 72 | doh = ip_vs_dest_conn_overhead(dest); |
89 | if (loh * atomic_read(&dest->weight) > | 73 | if (loh * atomic_read(&dest->weight) > |
90 | doh * atomic_read(&least->weight)) { | 74 | doh * atomic_read(&least->weight)) { |
91 | least = dest; | 75 | least = dest; |