diff options
Diffstat (limited to 'net/ipv4/ipvs/ip_vs_sed.c')
-rw-r--r-- | net/ipv4/ipvs/ip_vs_sed.c | 140 |
1 files changed, 0 insertions, 140 deletions
diff --git a/net/ipv4/ipvs/ip_vs_sed.c b/net/ipv4/ipvs/ip_vs_sed.c deleted file mode 100644 index 7d2f22f04b83..000000000000 --- a/net/ipv4/ipvs/ip_vs_sed.c +++ /dev/null | |||
@@ -1,140 +0,0 @@ | |||
1 | /* | ||
2 | * IPVS: Shortest Expected Delay scheduling module | ||
3 | * | ||
4 | * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> | ||
5 | * | ||
6 | * This program is free software; you can redistribute it and/or | ||
7 | * modify it under the terms of the GNU General Public License | ||
8 | * as published by the Free Software Foundation; either version | ||
9 | * 2 of the License, or (at your option) any later version. | ||
10 | * | ||
11 | * Changes: | ||
12 | * | ||
13 | */ | ||
14 | |||
15 | /* | ||
16 | * The SED algorithm attempts to minimize each job's expected delay until | ||
17 | * completion. The expected delay that the job will experience is | ||
18 | * (Ci + 1) / Ui if sent to the ith server, in which Ci is the number of | ||
19 | * jobs on the ith server and Ui is the fixed service rate (weight) of | ||
20 | * the ith server. The SED algorithm adopts a greedy policy that each does | ||
21 | * what is in its own best interest, i.e. to join the queue which would | ||
22 | * minimize its expected delay of completion. | ||
23 | * | ||
24 | * See the following paper for more information: | ||
25 | * A. Weinrib and S. Shenker, Greed is not enough: Adaptive load sharing | ||
26 | * in large heterogeneous systems. In Proceedings IEEE INFOCOM'88, | ||
27 | * pages 986-994, 1988. | ||
28 | * | ||
29 | * Thanks must go to Marko Buuri <marko@buuri.name> for talking SED to me. | ||
30 | * | ||
31 | * The difference between SED and WLC is that SED includes the incoming | ||
32 | * job in the cost function (the increment of 1). SED may outperform | ||
33 | * WLC, while scheduling big jobs under larger heterogeneous systems | ||
34 | * (the server weight varies a lot). | ||
35 | * | ||
36 | */ | ||
37 | |||
38 | #include <linux/module.h> | ||
39 | #include <linux/kernel.h> | ||
40 | |||
41 | #include <net/ip_vs.h> | ||
42 | |||
43 | |||
44 | static inline unsigned int | ||
45 | ip_vs_sed_dest_overhead(struct ip_vs_dest *dest) | ||
46 | { | ||
47 | /* | ||
48 | * We only use the active connection number in the cost | ||
49 | * calculation here. | ||
50 | */ | ||
51 | return atomic_read(&dest->activeconns) + 1; | ||
52 | } | ||
53 | |||
54 | |||
55 | /* | ||
56 | * Weighted Least Connection scheduling | ||
57 | */ | ||
58 | static struct ip_vs_dest * | ||
59 | ip_vs_sed_schedule(struct ip_vs_service *svc, const struct sk_buff *skb) | ||
60 | { | ||
61 | struct ip_vs_dest *dest, *least; | ||
62 | unsigned int loh, doh; | ||
63 | |||
64 | IP_VS_DBG(6, "ip_vs_sed_schedule(): Scheduling...\n"); | ||
65 | |||
66 | /* | ||
67 | * We calculate the load of each dest server as follows: | ||
68 | * (server expected overhead) / dest->weight | ||
69 | * | ||
70 | * Remember -- no floats in kernel mode!!! | ||
71 | * The comparison of h1*w2 > h2*w1 is equivalent to that of | ||
72 | * h1/w1 > h2/w2 | ||
73 | * if every weight is larger than zero. | ||
74 | * | ||
75 | * The server with weight=0 is quiesced and will not receive any | ||
76 | * new connections. | ||
77 | */ | ||
78 | |||
79 | list_for_each_entry(dest, &svc->destinations, n_list) { | ||
80 | if (!(dest->flags & IP_VS_DEST_F_OVERLOAD) && | ||
81 | atomic_read(&dest->weight) > 0) { | ||
82 | least = dest; | ||
83 | loh = ip_vs_sed_dest_overhead(least); | ||
84 | goto nextstage; | ||
85 | } | ||
86 | } | ||
87 | return NULL; | ||
88 | |||
89 | /* | ||
90 | * Find the destination with the least load. | ||
91 | */ | ||
92 | nextstage: | ||
93 | list_for_each_entry_continue(dest, &svc->destinations, n_list) { | ||
94 | if (dest->flags & IP_VS_DEST_F_OVERLOAD) | ||
95 | continue; | ||
96 | doh = ip_vs_sed_dest_overhead(dest); | ||
97 | if (loh * atomic_read(&dest->weight) > | ||
98 | doh * atomic_read(&least->weight)) { | ||
99 | least = dest; | ||
100 | loh = doh; | ||
101 | } | ||
102 | } | ||
103 | |||
104 | IP_VS_DBG_BUF(6, "SED: server %s:%u " | ||
105 | "activeconns %d refcnt %d weight %d overhead %d\n", | ||
106 | IP_VS_DBG_ADDR(svc->af, &least->addr), ntohs(least->port), | ||
107 | atomic_read(&least->activeconns), | ||
108 | atomic_read(&least->refcnt), | ||
109 | atomic_read(&least->weight), loh); | ||
110 | |||
111 | return least; | ||
112 | } | ||
113 | |||
114 | |||
115 | static struct ip_vs_scheduler ip_vs_sed_scheduler = | ||
116 | { | ||
117 | .name = "sed", | ||
118 | .refcnt = ATOMIC_INIT(0), | ||
119 | .module = THIS_MODULE, | ||
120 | .n_list = LIST_HEAD_INIT(ip_vs_sed_scheduler.n_list), | ||
121 | #ifdef CONFIG_IP_VS_IPV6 | ||
122 | .supports_ipv6 = 1, | ||
123 | #endif | ||
124 | .schedule = ip_vs_sed_schedule, | ||
125 | }; | ||
126 | |||
127 | |||
128 | static int __init ip_vs_sed_init(void) | ||
129 | { | ||
130 | return register_ip_vs_scheduler(&ip_vs_sed_scheduler); | ||
131 | } | ||
132 | |||
133 | static void __exit ip_vs_sed_cleanup(void) | ||
134 | { | ||
135 | unregister_ip_vs_scheduler(&ip_vs_sed_scheduler); | ||
136 | } | ||
137 | |||
138 | module_init(ip_vs_sed_init); | ||
139 | module_exit(ip_vs_sed_cleanup); | ||
140 | MODULE_LICENSE("GPL"); | ||