aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-03-08 05:53:57 -0500
committerThomas Gleixner <tglx@linutronix.de>2018-04-06 06:19:51 -0400
commit1a2d0914e23aab386f5d5acb689777e24151c2c8 (patch)
tree3190eb1cc626910ab9b5bc1aac883b5ad330ee6f
parentb3e6aaa8d94d618e685c4df08bef991a4fb43923 (diff)
genirq/affinity: Allow irq spreading from a given starting point
To support two stage irq vector spreading, it's required to add a starting point to the spreading function. No functional change, just preparatory work for the actual two stage change. [ tglx: Renamed variables, tidied up the code and massaged changelog ] Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Christoph Hellwig <hch@lst.de> Cc: Jens Axboe <axboe@kernel.dk> Cc: linux-block@vger.kernel.org Cc: Laurence Oberman <loberman@redhat.com> Cc: Christoph Hellwig <hch@infradead.org> Link: https://lkml.kernel.org/r/20180308105358.1506-4-ming.lei@redhat.com
-rw-r--r--kernel/irq/affinity.c35
1 files changed, 20 insertions, 15 deletions
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index a9c36904500c..213695a27ddb 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -94,17 +94,17 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
94 return nodes; 94 return nodes;
95} 95}
96 96
97static int irq_build_affinity_masks(int nvecs, const struct irq_affinity *affd, 97static int irq_build_affinity_masks(const struct irq_affinity *affd,
98 int startvec, int numvecs,
98 cpumask_var_t *node_to_cpumask, 99 cpumask_var_t *node_to_cpumask,
99 const struct cpumask *cpu_mask, 100 const struct cpumask *cpu_mask,
100 struct cpumask *nmsk, 101 struct cpumask *nmsk,
101 struct cpumask *masks) 102 struct cpumask *masks)
102{ 103{
103 int affv = nvecs - affd->pre_vectors - affd->post_vectors; 104 int n, nodes, cpus_per_vec, extra_vecs, done = 0;
104 int last_affv = affv + affd->pre_vectors; 105 int last_affv = affd->pre_vectors + numvecs;
105 int curvec = affd->pre_vectors; 106 int curvec = startvec;
106 nodemask_t nodemsk = NODE_MASK_NONE; 107 nodemask_t nodemsk = NODE_MASK_NONE;
107 int n, nodes, cpus_per_vec, extra_vecs;
108 108
109 nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk); 109 nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
110 110
@@ -112,12 +112,13 @@ static int irq_build_affinity_masks(int nvecs, const struct irq_affinity *affd,
112 * If the number of nodes in the mask is greater than or equal the 112 * If the number of nodes in the mask is greater than or equal the
113 * number of vectors we just spread the vectors across the nodes. 113 * number of vectors we just spread the vectors across the nodes.
114 */ 114 */
115 if (affv <= nodes) { 115 if (numvecs <= nodes) {
116 for_each_node_mask(n, nodemsk) { 116 for_each_node_mask(n, nodemsk) {
117 cpumask_copy(masks + curvec, 117 cpumask_copy(masks + curvec, node_to_cpumask[n]);
118 node_to_cpumask[n]); 118 if (++done == numvecs)
119 if (++curvec == last_affv)
120 break; 119 break;
120 if (++curvec == last_affv)
121 curvec = affd->pre_vectors;
121 } 122 }
122 goto out; 123 goto out;
123 } 124 }
@@ -126,7 +127,7 @@ static int irq_build_affinity_masks(int nvecs, const struct irq_affinity *affd,
126 int ncpus, v, vecs_to_assign, vecs_per_node; 127 int ncpus, v, vecs_to_assign, vecs_per_node;
127 128
128 /* Spread the vectors per node */ 129 /* Spread the vectors per node */
129 vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes; 130 vecs_per_node = (numvecs - (curvec - affd->pre_vectors)) / nodes;
130 131
131 /* Get the cpus on this node which are in the mask */ 132 /* Get the cpus on this node which are in the mask */
132 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); 133 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
@@ -150,13 +151,16 @@ static int irq_build_affinity_masks(int nvecs, const struct irq_affinity *affd,
150 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); 151 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
151 } 152 }
152 153
153 if (curvec >= last_affv) 154 done += v;
155 if (done >= numvecs)
154 break; 156 break;
157 if (curvec >= last_affv)
158 curvec = affd->pre_vectors;
155 --nodes; 159 --nodes;
156 } 160 }
157 161
158out: 162out:
159 return curvec - affd->pre_vectors; 163 return done;
160} 164}
161 165
162/** 166/**
@@ -169,9 +173,9 @@ out:
169struct cpumask * 173struct cpumask *
170irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) 174irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
171{ 175{
176 int curvec, affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
172 cpumask_var_t nmsk, *node_to_cpumask; 177 cpumask_var_t nmsk, *node_to_cpumask;
173 struct cpumask *masks = NULL; 178 struct cpumask *masks = NULL;
174 int curvec;
175 179
176 /* 180 /*
177 * If there aren't any vectors left after applying the pre/post 181 * If there aren't any vectors left after applying the pre/post
@@ -198,8 +202,9 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
198 /* Stabilize the cpumasks */ 202 /* Stabilize the cpumasks */
199 get_online_cpus(); 203 get_online_cpus();
200 build_node_to_cpumask(node_to_cpumask); 204 build_node_to_cpumask(node_to_cpumask);
201 curvec += irq_build_affinity_masks(nvecs, affd, node_to_cpumask, 205 curvec += irq_build_affinity_masks(affd, curvec, affvecs,
202 cpu_possible_mask, nmsk, masks); 206 node_to_cpumask, cpu_possible_mask,
207 nmsk, masks);
203 put_online_cpus(); 208 put_online_cpus();
204 209
205 /* Fill out vectors at the end that don't need affinity */ 210 /* Fill out vectors at the end that don't need affinity */