aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMatt Redfearn <matt.redfearn@imgtec.com>2016-04-25 03:14:24 -0400
committerThomas Gleixner <tglx@linutronix.de>2016-05-02 07:42:50 -0400
commit7cec18a3906b52e855c9386650c0226bbe594a4c (patch)
tree5cf713d9d41b200cffd6be97da73dee70011f45f
parent01292cea0df86ed4a1eb6450d6eda375ef925716 (diff)
genirq: Add error code reporting to irq_{reserve,destroy}_ipi
Make these functions return appropriate error codes when something goes wrong. Previously irq_destroy_ipi returned void making it impossible to notify the caller if the request could not be fulfilled. Patch 1 in the series added another condition in which this could fail in addition to the existing ones. irq_reserve_ipi returned an unsigned int meaning it could only return 0 on failure and give the caller no indication as to why the request failed. As time goes on there are likely to be further conditions added in which these functions can fail. These APIs and the IPI IRQ domain are new in 4.6 and the number of existing call sites are low, changing the API now has little impact on the code, while making it easier for these functions to grow over time. Signed-off-by: Matt Redfearn <matt.redfearn@imgtec.com> Cc: linux-mips@linux-mips.org Cc: jason@lakedaemon.net Cc: marc.zyngier@arm.com Cc: ralf@linux-mips.org Cc: Qais Yousef <qsyousef@gmail.com> Cc: lisa.parratt@imgtec.com Cc: jiang.liu@linux.intel.com Link: http://lkml.kernel.org/r/1461568464-31701-2-git-send-email-matt.redfearn@imgtec.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--include/linux/irqdomain.h5
-rw-r--r--kernel/irq/ipi.c31
2 files changed, 19 insertions, 17 deletions
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index e1b81d35e7a3..736abd74c135 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -346,9 +346,8 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr,
346 irq_hw_number_t *out_hwirq, unsigned int *out_type); 346 irq_hw_number_t *out_hwirq, unsigned int *out_type);
347 347
348/* IPI functions */ 348/* IPI functions */
349unsigned int irq_reserve_ipi(struct irq_domain *domain, 349int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
350 const struct cpumask *dest); 350int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
351void irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
352 351
353/* V2 interfaces to support hierarchy IRQ domains. */ 352/* V2 interfaces to support hierarchy IRQ domains. */
354extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, 353extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain,
diff --git a/kernel/irq/ipi.c b/kernel/irq/ipi.c
index bedc995ae214..c42742208e5e 100644
--- a/kernel/irq/ipi.c
+++ b/kernel/irq/ipi.c
@@ -19,9 +19,9 @@
19 * 19 *
20 * Allocate a virq that can be used to send IPI to any CPU in dest mask. 20 * Allocate a virq that can be used to send IPI to any CPU in dest mask.
21 * 21 *
22 * On success it'll return linux irq number and 0 on failure 22 * On success it'll return linux irq number and error code on failure
23 */ 23 */
24unsigned int irq_reserve_ipi(struct irq_domain *domain, 24int irq_reserve_ipi(struct irq_domain *domain,
25 const struct cpumask *dest) 25 const struct cpumask *dest)
26{ 26{
27 unsigned int nr_irqs, offset; 27 unsigned int nr_irqs, offset;
@@ -30,18 +30,18 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
30 30
31 if (!domain ||!irq_domain_is_ipi(domain)) { 31 if (!domain ||!irq_domain_is_ipi(domain)) {
32 pr_warn("Reservation on a non IPI domain\n"); 32 pr_warn("Reservation on a non IPI domain\n");
33 return 0; 33 return -EINVAL;
34 } 34 }
35 35
36 if (!cpumask_subset(dest, cpu_possible_mask)) { 36 if (!cpumask_subset(dest, cpu_possible_mask)) {
37 pr_warn("Reservation is not in possible_cpu_mask\n"); 37 pr_warn("Reservation is not in possible_cpu_mask\n");
38 return 0; 38 return -EINVAL;
39 } 39 }
40 40
41 nr_irqs = cpumask_weight(dest); 41 nr_irqs = cpumask_weight(dest);
42 if (!nr_irqs) { 42 if (!nr_irqs) {
43 pr_warn("Reservation for empty destination mask\n"); 43 pr_warn("Reservation for empty destination mask\n");
44 return 0; 44 return -EINVAL;
45 } 45 }
46 46
47 if (irq_domain_is_ipi_single(domain)) { 47 if (irq_domain_is_ipi_single(domain)) {
@@ -72,14 +72,14 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
72 next = cpumask_next(next, dest); 72 next = cpumask_next(next, dest);
73 if (next < nr_cpu_ids) { 73 if (next < nr_cpu_ids) {
74 pr_warn("Destination mask has holes\n"); 74 pr_warn("Destination mask has holes\n");
75 return 0; 75 return -EINVAL;
76 } 76 }
77 } 77 }
78 78
79 virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE); 79 virq = irq_domain_alloc_descs(-1, nr_irqs, 0, NUMA_NO_NODE);
80 if (virq <= 0) { 80 if (virq <= 0) {
81 pr_warn("Can't reserve IPI, failed to alloc descs\n"); 81 pr_warn("Can't reserve IPI, failed to alloc descs\n");
82 return 0; 82 return -ENOMEM;
83 } 83 }
84 84
85 virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE, 85 virq = __irq_domain_alloc_irqs(domain, virq, nr_irqs, NUMA_NO_NODE,
@@ -100,7 +100,7 @@ unsigned int irq_reserve_ipi(struct irq_domain *domain,
100 100
101free_descs: 101free_descs:
102 irq_free_descs(virq, nr_irqs); 102 irq_free_descs(virq, nr_irqs);
103 return 0; 103 return -EBUSY;
104} 104}
105 105
106/** 106/**
@@ -108,10 +108,12 @@ free_descs:
108 * @irq: linux irq number to be destroyed 108 * @irq: linux irq number to be destroyed
109 * @dest: cpumask of cpus which should have the IPI removed 109 * @dest: cpumask of cpus which should have the IPI removed
110 * 110 *
111 * Return the IPIs allocated with irq_reserve_ipi() to the system destroying 111 * The IPIs allocated with irq_reserve_ipi() are retuerned to the system
112 * all virqs associated with them. 112 * destroying all virqs associated with them.
113 *
114 * Return 0 on success or error code on failure.
113 */ 115 */
114void irq_destroy_ipi(unsigned int irq, const struct cpumask *dest) 116int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
115{ 117{
116 struct irq_data *data = irq_get_irq_data(irq); 118 struct irq_data *data = irq_get_irq_data(irq);
117 struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL; 119 struct cpumask *ipimask = data ? irq_data_get_affinity_mask(data) : NULL;
@@ -119,7 +121,7 @@ void irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
119 unsigned int nr_irqs; 121 unsigned int nr_irqs;
120 122
121 if (!irq || !data || !ipimask) 123 if (!irq || !data || !ipimask)
122 return; 124 return -EINVAL;
123 125
124 domain = data->domain; 126 domain = data->domain;
125 if (WARN_ON(domain == NULL)) 127 if (WARN_ON(domain == NULL))
@@ -127,7 +129,7 @@ void irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
127 129
128 if (!irq_domain_is_ipi(domain)) { 130 if (!irq_domain_is_ipi(domain)) {
129 pr_warn("Trying to destroy a non IPI domain!\n"); 131 pr_warn("Trying to destroy a non IPI domain!\n");
130 return; 132 return -EINVAL;
131 } 133 }
132 134
133 if (WARN_ON(!cpumask_subset(dest, ipimask))) 135 if (WARN_ON(!cpumask_subset(dest, ipimask)))
@@ -135,7 +137,7 @@ void irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
135 * Must be destroying a subset of CPUs to which this IPI 137 * Must be destroying a subset of CPUs to which this IPI
136 * was set up to target 138 * was set up to target
137 */ 139 */
138 return; 140 return -EINVAL;
139 141
140 if (irq_domain_is_ipi_per_cpu(domain)) { 142 if (irq_domain_is_ipi_per_cpu(domain)) {
141 irq = irq + cpumask_first(dest) - data->common->ipi_offset; 143 irq = irq + cpumask_first(dest) - data->common->ipi_offset;
@@ -145,6 +147,7 @@ void irq_destroy_ipi(unsigned int irq, const struct cpumask *dest)
145 } 147 }
146 148
147 irq_domain_free_irqs(irq, nr_irqs); 149 irq_domain_free_irqs(irq, nr_irqs);
150 return 0;
148} 151}
149 152
150/** 153/**