diff options
-rw-r--r-- | drivers/irqchip/irq-imx-gpcv2.c | 2 | ||||
-rw-r--r-- | kernel/irq/affinity.c | 20 |
2 files changed, 13 insertions, 9 deletions
diff --git a/drivers/irqchip/irq-imx-gpcv2.c b/drivers/irqchip/irq-imx-gpcv2.c index 15af9a9753e5..2d203b422129 100644 --- a/drivers/irqchip/irq-imx-gpcv2.c +++ b/drivers/irqchip/irq-imx-gpcv2.c | |||
@@ -230,6 +230,8 @@ static int __init imx_gpcv2_irqchip_init(struct device_node *node, | |||
230 | return -ENOMEM; | 230 | return -ENOMEM; |
231 | } | 231 | } |
232 | 232 | ||
233 | raw_spin_lock_init(&cd->rlock); | ||
234 | |||
233 | cd->gpc_base = of_iomap(node, 0); | 235 | cd->gpc_base = of_iomap(node, 0); |
234 | if (!cd->gpc_base) { | 236 | if (!cd->gpc_base) { |
235 | pr_err("fsl-gpcv2: unable to map gpc registers\n"); | 237 | pr_err("fsl-gpcv2: unable to map gpc registers\n"); |
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c index 4544b115f5eb..d052947fe785 100644 --- a/kernel/irq/affinity.c +++ b/kernel/irq/affinity.c | |||
@@ -59,7 +59,7 @@ static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk) | |||
59 | struct cpumask * | 59 | struct cpumask * |
60 | irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) | 60 | irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) |
61 | { | 61 | { |
62 | int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec; | 62 | int n, nodes, cpus_per_vec, extra_vecs, curvec; |
63 | int affv = nvecs - affd->pre_vectors - affd->post_vectors; | 63 | int affv = nvecs - affd->pre_vectors - affd->post_vectors; |
64 | int last_affv = affv + affd->pre_vectors; | 64 | int last_affv = affv + affd->pre_vectors; |
65 | nodemask_t nodemsk = NODE_MASK_NONE; | 65 | nodemask_t nodemsk = NODE_MASK_NONE; |
@@ -94,19 +94,21 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) | |||
94 | goto done; | 94 | goto done; |
95 | } | 95 | } |
96 | 96 | ||
97 | /* Spread the vectors per node */ | ||
98 | vecs_per_node = affv / nodes; | ||
99 | /* Account for rounding errors */ | ||
100 | extra_vecs = affv - (nodes * vecs_per_node); | ||
101 | |||
102 | for_each_node_mask(n, nodemsk) { | 97 | for_each_node_mask(n, nodemsk) { |
103 | int ncpus, v, vecs_to_assign = vecs_per_node; | 98 | int ncpus, v, vecs_to_assign, vecs_per_node; |
99 | |||
100 | /* Spread the vectors per node */ | ||
101 | vecs_per_node = (affv - curvec) / nodes; | ||
104 | 102 | ||
105 | /* Get the cpus on this node which are in the mask */ | 103 | /* Get the cpus on this node which are in the mask */ |
106 | cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n)); | 104 | cpumask_and(nmsk, cpu_online_mask, cpumask_of_node(n)); |
107 | 105 | ||
108 | /* Calculate the number of cpus per vector */ | 106 | /* Calculate the number of cpus per vector */ |
109 | ncpus = cpumask_weight(nmsk); | 107 | ncpus = cpumask_weight(nmsk); |
108 | vecs_to_assign = min(vecs_per_node, ncpus); | ||
109 | |||
110 | /* Account for rounding errors */ | ||
111 | extra_vecs = ncpus - vecs_to_assign * (ncpus / vecs_to_assign); | ||
110 | 112 | ||
111 | for (v = 0; curvec < last_affv && v < vecs_to_assign; | 113 | for (v = 0; curvec < last_affv && v < vecs_to_assign; |
112 | curvec++, v++) { | 114 | curvec++, v++) { |
@@ -115,14 +117,14 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) | |||
115 | /* Account for extra vectors to compensate rounding errors */ | 117 | /* Account for extra vectors to compensate rounding errors */ |
116 | if (extra_vecs) { | 118 | if (extra_vecs) { |
117 | cpus_per_vec++; | 119 | cpus_per_vec++; |
118 | if (!--extra_vecs) | 120 | --extra_vecs; |
119 | vecs_per_node++; | ||
120 | } | 121 | } |
121 | irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); | 122 | irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec); |
122 | } | 123 | } |
123 | 124 | ||
124 | if (curvec >= last_affv) | 125 | if (curvec >= last_affv) |
125 | break; | 126 | break; |
127 | --nodes; | ||
126 | } | 128 | } |
127 | 129 | ||
128 | done: | 130 | done: |