diff options
author | Bjorn Helgaas <bjorn.helgaas@hp.com> | 2008-04-28 18:34:18 -0400 |
---|---|---|
committer | Len Brown <len.brown@intel.com> | 2008-04-29 03:22:24 -0400 |
commit | 28ccffcf028777e830cbdc30bc54ba8a37e2fc23 (patch) | |
tree | d71301fd3b0b5fa652f7ad18361a86a8d904a6c0 /drivers/pnp | |
parent | ecfa935a2f7ef89543608f3ca05340c158c9a236 (diff) |
PNP: reduce redundancy in pnp_assign_port() and others
Use a temporary "res" pointer to replace repeated lookups in
the pnp resource tables.
Signed-off-by: Bjorn Helgaas <bjorn.helgaas@hp.com>
Acked-By: Rene Herman <rene.herman@gmail.com>
Signed-off-by: Len Brown <len.brown@intel.com>
Diffstat (limited to 'drivers/pnp')
-rw-r--r-- | drivers/pnp/manager.c | 123 |
1 files changed, 56 insertions, 67 deletions
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c index 8267efd679a1..be21dec539d9 100644 --- a/drivers/pnp/manager.c +++ b/drivers/pnp/manager.c | |||
@@ -19,8 +19,7 @@ DEFINE_MUTEX(pnp_res_mutex); | |||
19 | 19 | ||
20 | static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx) | 20 | static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx) |
21 | { | 21 | { |
22 | resource_size_t *start, *end; | 22 | struct resource *res; |
23 | unsigned long *flags; | ||
24 | 23 | ||
25 | if (idx >= PNP_MAX_PORT) { | 24 | if (idx >= PNP_MAX_PORT) { |
26 | dev_err(&dev->dev, "too many I/O port resources\n"); | 25 | dev_err(&dev->dev, "too many I/O port resources\n"); |
@@ -28,49 +27,46 @@ static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx) | |||
28 | return 1; | 27 | return 1; |
29 | } | 28 | } |
30 | 29 | ||
31 | start = &dev->res.port_resource[idx].start; | 30 | res = &dev->res.port_resource[idx]; |
32 | end = &dev->res.port_resource[idx].end; | ||
33 | flags = &dev->res.port_resource[idx].flags; | ||
34 | 31 | ||
35 | /* check if this resource has been manually set, if so skip */ | 32 | /* check if this resource has been manually set, if so skip */ |
36 | if (!(dev->res.port_resource[idx].flags & IORESOURCE_AUTO)) { | 33 | if (!(res->flags & IORESOURCE_AUTO)) { |
37 | dev_dbg(&dev->dev, " io %d already set to %#llx-%#llx " | 34 | dev_dbg(&dev->dev, " io %d already set to %#llx-%#llx " |
38 | "flags %#lx\n", idx, (unsigned long long) *start, | 35 | "flags %#lx\n", idx, (unsigned long long) res->start, |
39 | (unsigned long long) *end, *flags); | 36 | (unsigned long long) res->end, res->flags); |
40 | return 1; | 37 | return 1; |
41 | } | 38 | } |
42 | 39 | ||
43 | /* set the initial values */ | 40 | /* set the initial values */ |
44 | *flags |= rule->flags | IORESOURCE_IO; | 41 | res->flags |= rule->flags | IORESOURCE_IO; |
45 | *flags &= ~IORESOURCE_UNSET; | 42 | res->flags &= ~IORESOURCE_UNSET; |
46 | 43 | ||
47 | if (!rule->size) { | 44 | if (!rule->size) { |
48 | *flags |= IORESOURCE_DISABLED; | 45 | res->flags |= IORESOURCE_DISABLED; |
49 | dev_dbg(&dev->dev, " io %d disabled\n", idx); | 46 | dev_dbg(&dev->dev, " io %d disabled\n", idx); |
50 | return 1; /* skip disabled resource requests */ | 47 | return 1; /* skip disabled resource requests */ |
51 | } | 48 | } |
52 | 49 | ||
53 | *start = rule->min; | 50 | res->start = rule->min; |
54 | *end = *start + rule->size - 1; | 51 | res->end = res->start + rule->size - 1; |
55 | 52 | ||
56 | /* run through until pnp_check_port is happy */ | 53 | /* run through until pnp_check_port is happy */ |
57 | while (!pnp_check_port(dev, idx)) { | 54 | while (!pnp_check_port(dev, idx)) { |
58 | *start += rule->align; | 55 | res->start += rule->align; |
59 | *end = *start + rule->size - 1; | 56 | res->end = res->start + rule->size - 1; |
60 | if (*start > rule->max || !rule->align) { | 57 | if (res->start > rule->max || !rule->align) { |
61 | dev_dbg(&dev->dev, " couldn't assign io %d\n", idx); | 58 | dev_dbg(&dev->dev, " couldn't assign io %d\n", idx); |
62 | return 0; | 59 | return 0; |
63 | } | 60 | } |
64 | } | 61 | } |
65 | dev_dbg(&dev->dev, " assign io %d %#llx-%#llx\n", idx, | 62 | dev_dbg(&dev->dev, " assign io %d %#llx-%#llx\n", idx, |
66 | (unsigned long long) *start, (unsigned long long) *end); | 63 | (unsigned long long) res->start, (unsigned long long) res->end); |
67 | return 1; | 64 | return 1; |
68 | } | 65 | } |
69 | 66 | ||
70 | static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx) | 67 | static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx) |
71 | { | 68 | { |
72 | resource_size_t *start, *end; | 69 | struct resource *res; |
73 | unsigned long *flags; | ||
74 | 70 | ||
75 | if (idx >= PNP_MAX_MEM) { | 71 | if (idx >= PNP_MAX_MEM) { |
76 | dev_err(&dev->dev, "too many memory resources\n"); | 72 | dev_err(&dev->dev, "too many memory resources\n"); |
@@ -78,59 +74,56 @@ static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx) | |||
78 | return 1; | 74 | return 1; |
79 | } | 75 | } |
80 | 76 | ||
81 | start = &dev->res.mem_resource[idx].start; | 77 | res = &dev->res.mem_resource[idx]; |
82 | end = &dev->res.mem_resource[idx].end; | ||
83 | flags = &dev->res.mem_resource[idx].flags; | ||
84 | 78 | ||
85 | /* check if this resource has been manually set, if so skip */ | 79 | /* check if this resource has been manually set, if so skip */ |
86 | if (!(dev->res.mem_resource[idx].flags & IORESOURCE_AUTO)) { | 80 | if (!(res->flags & IORESOURCE_AUTO)) { |
87 | dev_dbg(&dev->dev, " mem %d already set to %#llx-%#llx " | 81 | dev_dbg(&dev->dev, " mem %d already set to %#llx-%#llx " |
88 | "flags %#lx\n", idx, (unsigned long long) *start, | 82 | "flags %#lx\n", idx, (unsigned long long) res->start, |
89 | (unsigned long long) *end, *flags); | 83 | (unsigned long long) res->end, res->flags); |
90 | return 1; | 84 | return 1; |
91 | } | 85 | } |
92 | 86 | ||
93 | /* set the initial values */ | 87 | /* set the initial values */ |
94 | *flags |= rule->flags | IORESOURCE_MEM; | 88 | res->flags |= rule->flags | IORESOURCE_MEM; |
95 | *flags &= ~IORESOURCE_UNSET; | 89 | res->flags &= ~IORESOURCE_UNSET; |
96 | 90 | ||
97 | /* convert pnp flags to standard Linux flags */ | 91 | /* convert pnp flags to standard Linux flags */ |
98 | if (!(rule->flags & IORESOURCE_MEM_WRITEABLE)) | 92 | if (!(rule->flags & IORESOURCE_MEM_WRITEABLE)) |
99 | *flags |= IORESOURCE_READONLY; | 93 | res->flags |= IORESOURCE_READONLY; |
100 | if (rule->flags & IORESOURCE_MEM_CACHEABLE) | 94 | if (rule->flags & IORESOURCE_MEM_CACHEABLE) |
101 | *flags |= IORESOURCE_CACHEABLE; | 95 | res->flags |= IORESOURCE_CACHEABLE; |
102 | if (rule->flags & IORESOURCE_MEM_RANGELENGTH) | 96 | if (rule->flags & IORESOURCE_MEM_RANGELENGTH) |
103 | *flags |= IORESOURCE_RANGELENGTH; | 97 | res->flags |= IORESOURCE_RANGELENGTH; |
104 | if (rule->flags & IORESOURCE_MEM_SHADOWABLE) | 98 | if (rule->flags & IORESOURCE_MEM_SHADOWABLE) |
105 | *flags |= IORESOURCE_SHADOWABLE; | 99 | res->flags |= IORESOURCE_SHADOWABLE; |
106 | 100 | ||
107 | if (!rule->size) { | 101 | if (!rule->size) { |
108 | *flags |= IORESOURCE_DISABLED; | 102 | res->flags |= IORESOURCE_DISABLED; |
109 | dev_dbg(&dev->dev, " mem %d disabled\n", idx); | 103 | dev_dbg(&dev->dev, " mem %d disabled\n", idx); |
110 | return 1; /* skip disabled resource requests */ | 104 | return 1; /* skip disabled resource requests */ |
111 | } | 105 | } |
112 | 106 | ||
113 | *start = rule->min; | 107 | res->start = rule->min; |
114 | *end = *start + rule->size - 1; | 108 | res->end = res->start + rule->size - 1; |
115 | 109 | ||
116 | /* run through until pnp_check_mem is happy */ | 110 | /* run through until pnp_check_mem is happy */ |
117 | while (!pnp_check_mem(dev, idx)) { | 111 | while (!pnp_check_mem(dev, idx)) { |
118 | *start += rule->align; | 112 | res->start += rule->align; |
119 | *end = *start + rule->size - 1; | 113 | res->end = res->start + rule->size - 1; |
120 | if (*start > rule->max || !rule->align) { | 114 | if (res->start > rule->max || !rule->align) { |
121 | dev_dbg(&dev->dev, " couldn't assign mem %d\n", idx); | 115 | dev_dbg(&dev->dev, " couldn't assign mem %d\n", idx); |
122 | return 0; | 116 | return 0; |
123 | } | 117 | } |
124 | } | 118 | } |
125 | dev_dbg(&dev->dev, " assign mem %d %#llx-%#llx\n", idx, | 119 | dev_dbg(&dev->dev, " assign mem %d %#llx-%#llx\n", idx, |
126 | (unsigned long long) *start, (unsigned long long) *end); | 120 | (unsigned long long) res->start, (unsigned long long) res->end); |
127 | return 1; | 121 | return 1; |
128 | } | 122 | } |
129 | 123 | ||
130 | static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx) | 124 | static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx) |
131 | { | 125 | { |
132 | resource_size_t *start, *end; | 126 | struct resource *res; |
133 | unsigned long *flags; | ||
134 | int i; | 127 | int i; |
135 | 128 | ||
136 | /* IRQ priority: this table is good for i386 */ | 129 | /* IRQ priority: this table is good for i386 */ |
@@ -144,40 +137,39 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx) | |||
144 | return 1; | 137 | return 1; |
145 | } | 138 | } |
146 | 139 | ||
147 | start = &dev->res.irq_resource[idx].start; | 140 | res = &dev->res.irq_resource[idx]; |
148 | end = &dev->res.irq_resource[idx].end; | ||
149 | flags = &dev->res.irq_resource[idx].flags; | ||
150 | 141 | ||
151 | /* check if this resource has been manually set, if so skip */ | 142 | /* check if this resource has been manually set, if so skip */ |
152 | if (!(dev->res.irq_resource[idx].flags & IORESOURCE_AUTO)) { | 143 | if (!(res->flags & IORESOURCE_AUTO)) { |
153 | dev_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n", | 144 | dev_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n", |
154 | idx, (int) *start, *flags); | 145 | idx, (int) res->start, res->flags); |
155 | return 1; | 146 | return 1; |
156 | } | 147 | } |
157 | 148 | ||
158 | /* set the initial values */ | 149 | /* set the initial values */ |
159 | *flags |= rule->flags | IORESOURCE_IRQ; | 150 | res->flags |= rule->flags | IORESOURCE_IRQ; |
160 | *flags &= ~IORESOURCE_UNSET; | 151 | res->flags &= ~IORESOURCE_UNSET; |
161 | 152 | ||
162 | if (bitmap_empty(rule->map, PNP_IRQ_NR)) { | 153 | if (bitmap_empty(rule->map, PNP_IRQ_NR)) { |
163 | *flags |= IORESOURCE_DISABLED; | 154 | res->flags |= IORESOURCE_DISABLED; |
164 | dev_dbg(&dev->dev, " irq %d disabled\n", idx); | 155 | dev_dbg(&dev->dev, " irq %d disabled\n", idx); |
165 | return 1; /* skip disabled resource requests */ | 156 | return 1; /* skip disabled resource requests */ |
166 | } | 157 | } |
167 | 158 | ||
168 | /* TBD: need check for >16 IRQ */ | 159 | /* TBD: need check for >16 IRQ */ |
169 | *start = find_next_bit(rule->map, PNP_IRQ_NR, 16); | 160 | res->start = find_next_bit(rule->map, PNP_IRQ_NR, 16); |
170 | if (*start < PNP_IRQ_NR) { | 161 | if (res->start < PNP_IRQ_NR) { |
171 | *end = *start; | 162 | res->end = res->start; |
172 | dev_dbg(&dev->dev, " assign irq %d %d\n", idx, (int) *start); | 163 | dev_dbg(&dev->dev, " assign irq %d %d\n", idx, |
164 | (int) res->start); | ||
173 | return 1; | 165 | return 1; |
174 | } | 166 | } |
175 | for (i = 0; i < 16; i++) { | 167 | for (i = 0; i < 16; i++) { |
176 | if (test_bit(xtab[i], rule->map)) { | 168 | if (test_bit(xtab[i], rule->map)) { |
177 | *start = *end = xtab[i]; | 169 | res->start = res->end = xtab[i]; |
178 | if (pnp_check_irq(dev, idx)) { | 170 | if (pnp_check_irq(dev, idx)) { |
179 | dev_dbg(&dev->dev, " assign irq %d %d\n", idx, | 171 | dev_dbg(&dev->dev, " assign irq %d %d\n", idx, |
180 | (int) *start); | 172 | (int) res->start); |
181 | return 1; | 173 | return 1; |
182 | } | 174 | } |
183 | } | 175 | } |
@@ -188,8 +180,7 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx) | |||
188 | 180 | ||
189 | static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) | 181 | static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) |
190 | { | 182 | { |
191 | resource_size_t *start, *end; | 183 | struct resource *res; |
192 | unsigned long *flags; | ||
193 | int i; | 184 | int i; |
194 | 185 | ||
195 | /* DMA priority: this table is good for i386 */ | 186 | /* DMA priority: this table is good for i386 */ |
@@ -202,35 +193,33 @@ static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) | |||
202 | return; | 193 | return; |
203 | } | 194 | } |
204 | 195 | ||
205 | start = &dev->res.dma_resource[idx].start; | 196 | res = &dev->res.dma_resource[idx]; |
206 | end = &dev->res.dma_resource[idx].end; | ||
207 | flags = &dev->res.dma_resource[idx].flags; | ||
208 | 197 | ||
209 | /* check if this resource has been manually set, if so skip */ | 198 | /* check if this resource has been manually set, if so skip */ |
210 | if (!(dev->res.dma_resource[idx].flags & IORESOURCE_AUTO)) { | 199 | if (!(res->flags & IORESOURCE_AUTO)) { |
211 | dev_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n", | 200 | dev_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n", |
212 | idx, (int) *start, *flags); | 201 | idx, (int) res->start, res->flags); |
213 | return; | 202 | return; |
214 | } | 203 | } |
215 | 204 | ||
216 | /* set the initial values */ | 205 | /* set the initial values */ |
217 | *flags |= rule->flags | IORESOURCE_DMA; | 206 | res->flags |= rule->flags | IORESOURCE_DMA; |
218 | *flags &= ~IORESOURCE_UNSET; | 207 | res->flags &= ~IORESOURCE_UNSET; |
219 | 208 | ||
220 | for (i = 0; i < 8; i++) { | 209 | for (i = 0; i < 8; i++) { |
221 | if (rule->map & (1 << xtab[i])) { | 210 | if (rule->map & (1 << xtab[i])) { |
222 | *start = *end = xtab[i]; | 211 | res->start = res->end = xtab[i]; |
223 | if (pnp_check_dma(dev, idx)) { | 212 | if (pnp_check_dma(dev, idx)) { |
224 | dev_dbg(&dev->dev, " assign dma %d %d\n", idx, | 213 | dev_dbg(&dev->dev, " assign dma %d %d\n", idx, |
225 | (int) *start); | 214 | (int) res->start); |
226 | return; | 215 | return; |
227 | } | 216 | } |
228 | } | 217 | } |
229 | } | 218 | } |
230 | #ifdef MAX_DMA_CHANNELS | 219 | #ifdef MAX_DMA_CHANNELS |
231 | *start = *end = MAX_DMA_CHANNELS; | 220 | res->start = res->end = MAX_DMA_CHANNELS; |
232 | #endif | 221 | #endif |
233 | *flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; | 222 | res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; |
234 | dev_dbg(&dev->dev, " disable dma %d\n", idx); | 223 | dev_dbg(&dev->dev, " disable dma %d\n", idx); |
235 | } | 224 | } |
236 | 225 | ||