aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/pnp/manager.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pnp/manager.c')
-rw-r--r--drivers/pnp/manager.c356
1 files changed, 168 insertions, 188 deletions
diff --git a/drivers/pnp/manager.c b/drivers/pnp/manager.c
index c28caf272c11..bea0914ff947 100644
--- a/drivers/pnp/manager.c
+++ b/drivers/pnp/manager.c
@@ -19,100 +19,118 @@ DEFINE_MUTEX(pnp_res_mutex);
19 19
20static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx) 20static int pnp_assign_port(struct pnp_dev *dev, struct pnp_port *rule, int idx)
21{ 21{
22 resource_size_t *start, *end; 22 struct pnp_resource *pnp_res;
23 unsigned long *flags; 23 struct resource *res;
24 24
25 if (idx >= PNP_MAX_PORT) { 25 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IO, idx);
26 if (!pnp_res) {
26 dev_err(&dev->dev, "too many I/O port resources\n"); 27 dev_err(&dev->dev, "too many I/O port resources\n");
27 /* pretend we were successful so at least the manager won't try again */ 28 /* pretend we were successful so at least the manager won't try again */
28 return 1; 29 return 1;
29 } 30 }
30 31
32 res = &pnp_res->res;
33
31 /* check if this resource has been manually set, if so skip */ 34 /* check if this resource has been manually set, if so skip */
32 if (!(dev->res.port_resource[idx].flags & IORESOURCE_AUTO)) 35 if (!(res->flags & IORESOURCE_AUTO)) {
36 dev_dbg(&dev->dev, " io %d already set to %#llx-%#llx "
37 "flags %#lx\n", idx, (unsigned long long) res->start,
38 (unsigned long long) res->end, res->flags);
33 return 1; 39 return 1;
34 40 }
35 start = &dev->res.port_resource[idx].start;
36 end = &dev->res.port_resource[idx].end;
37 flags = &dev->res.port_resource[idx].flags;
38 41
39 /* set the initial values */ 42 /* set the initial values */
40 *flags |= rule->flags | IORESOURCE_IO; 43 pnp_res->index = idx;
41 *flags &= ~IORESOURCE_UNSET; 44 res->flags |= rule->flags | IORESOURCE_IO;
45 res->flags &= ~IORESOURCE_UNSET;
42 46
43 if (!rule->size) { 47 if (!rule->size) {
44 *flags |= IORESOURCE_DISABLED; 48 res->flags |= IORESOURCE_DISABLED;
49 dev_dbg(&dev->dev, " io %d disabled\n", idx);
45 return 1; /* skip disabled resource requests */ 50 return 1; /* skip disabled resource requests */
46 } 51 }
47 52
48 *start = rule->min; 53 res->start = rule->min;
49 *end = *start + rule->size - 1; 54 res->end = res->start + rule->size - 1;
50 55
51 /* run through until pnp_check_port is happy */ 56 /* run through until pnp_check_port is happy */
52 while (!pnp_check_port(dev, idx)) { 57 while (!pnp_check_port(dev, res)) {
53 *start += rule->align; 58 res->start += rule->align;
54 *end = *start + rule->size - 1; 59 res->end = res->start + rule->size - 1;
55 if (*start > rule->max || !rule->align) 60 if (res->start > rule->max || !rule->align) {
61 dev_dbg(&dev->dev, " couldn't assign io %d\n", idx);
56 return 0; 62 return 0;
63 }
57 } 64 }
65 dev_dbg(&dev->dev, " assign io %d %#llx-%#llx\n", idx,
66 (unsigned long long) res->start, (unsigned long long) res->end);
58 return 1; 67 return 1;
59} 68}
60 69
61static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx) 70static int pnp_assign_mem(struct pnp_dev *dev, struct pnp_mem *rule, int idx)
62{ 71{
63 resource_size_t *start, *end; 72 struct pnp_resource *pnp_res;
64 unsigned long *flags; 73 struct resource *res;
65 74
66 if (idx >= PNP_MAX_MEM) { 75 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_MEM, idx);
76 if (!pnp_res) {
67 dev_err(&dev->dev, "too many memory resources\n"); 77 dev_err(&dev->dev, "too many memory resources\n");
68 /* pretend we were successful so at least the manager won't try again */ 78 /* pretend we were successful so at least the manager won't try again */
69 return 1; 79 return 1;
70 } 80 }
71 81
82 res = &pnp_res->res;
83
72 /* check if this resource has been manually set, if so skip */ 84 /* check if this resource has been manually set, if so skip */
73 if (!(dev->res.mem_resource[idx].flags & IORESOURCE_AUTO)) 85 if (!(res->flags & IORESOURCE_AUTO)) {
86 dev_dbg(&dev->dev, " mem %d already set to %#llx-%#llx "
87 "flags %#lx\n", idx, (unsigned long long) res->start,
88 (unsigned long long) res->end, res->flags);
74 return 1; 89 return 1;
75 90 }
76 start = &dev->res.mem_resource[idx].start;
77 end = &dev->res.mem_resource[idx].end;
78 flags = &dev->res.mem_resource[idx].flags;
79 91
80 /* set the initial values */ 92 /* set the initial values */
81 *flags |= rule->flags | IORESOURCE_MEM; 93 pnp_res->index = idx;
82 *flags &= ~IORESOURCE_UNSET; 94 res->flags |= rule->flags | IORESOURCE_MEM;
95 res->flags &= ~IORESOURCE_UNSET;
83 96
84 /* convert pnp flags to standard Linux flags */ 97 /* convert pnp flags to standard Linux flags */
85 if (!(rule->flags & IORESOURCE_MEM_WRITEABLE)) 98 if (!(rule->flags & IORESOURCE_MEM_WRITEABLE))
86 *flags |= IORESOURCE_READONLY; 99 res->flags |= IORESOURCE_READONLY;
87 if (rule->flags & IORESOURCE_MEM_CACHEABLE) 100 if (rule->flags & IORESOURCE_MEM_CACHEABLE)
88 *flags |= IORESOURCE_CACHEABLE; 101 res->flags |= IORESOURCE_CACHEABLE;
89 if (rule->flags & IORESOURCE_MEM_RANGELENGTH) 102 if (rule->flags & IORESOURCE_MEM_RANGELENGTH)
90 *flags |= IORESOURCE_RANGELENGTH; 103 res->flags |= IORESOURCE_RANGELENGTH;
91 if (rule->flags & IORESOURCE_MEM_SHADOWABLE) 104 if (rule->flags & IORESOURCE_MEM_SHADOWABLE)
92 *flags |= IORESOURCE_SHADOWABLE; 105 res->flags |= IORESOURCE_SHADOWABLE;
93 106
94 if (!rule->size) { 107 if (!rule->size) {
95 *flags |= IORESOURCE_DISABLED; 108 res->flags |= IORESOURCE_DISABLED;
109 dev_dbg(&dev->dev, " mem %d disabled\n", idx);
96 return 1; /* skip disabled resource requests */ 110 return 1; /* skip disabled resource requests */
97 } 111 }
98 112
99 *start = rule->min; 113 res->start = rule->min;
100 *end = *start + rule->size - 1; 114 res->end = res->start + rule->size - 1;
101 115
102 /* run through until pnp_check_mem is happy */ 116 /* run through until pnp_check_mem is happy */
103 while (!pnp_check_mem(dev, idx)) { 117 while (!pnp_check_mem(dev, res)) {
104 *start += rule->align; 118 res->start += rule->align;
105 *end = *start + rule->size - 1; 119 res->end = res->start + rule->size - 1;
106 if (*start > rule->max || !rule->align) 120 if (res->start > rule->max || !rule->align) {
121 dev_dbg(&dev->dev, " couldn't assign mem %d\n", idx);
107 return 0; 122 return 0;
123 }
108 } 124 }
125 dev_dbg(&dev->dev, " assign mem %d %#llx-%#llx\n", idx,
126 (unsigned long long) res->start, (unsigned long long) res->end);
109 return 1; 127 return 1;
110} 128}
111 129
112static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx) 130static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
113{ 131{
114 resource_size_t *start, *end; 132 struct pnp_resource *pnp_res;
115 unsigned long *flags; 133 struct resource *res;
116 int i; 134 int i;
117 135
118 /* IRQ priority: this table is good for i386 */ 136 /* IRQ priority: this table is good for i386 */
@@ -120,49 +138,59 @@ static int pnp_assign_irq(struct pnp_dev *dev, struct pnp_irq *rule, int idx)
120 5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2 138 5, 10, 11, 12, 9, 14, 15, 7, 3, 4, 13, 0, 1, 6, 8, 2
121 }; 139 };
122 140
123 if (idx >= PNP_MAX_IRQ) { 141 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_IRQ, idx);
142 if (!pnp_res) {
124 dev_err(&dev->dev, "too many IRQ resources\n"); 143 dev_err(&dev->dev, "too many IRQ resources\n");
125 /* pretend we were successful so at least the manager won't try again */ 144 /* pretend we were successful so at least the manager won't try again */
126 return 1; 145 return 1;
127 } 146 }
128 147
148 res = &pnp_res->res;
149
129 /* check if this resource has been manually set, if so skip */ 150 /* check if this resource has been manually set, if so skip */
130 if (!(dev->res.irq_resource[idx].flags & IORESOURCE_AUTO)) 151 if (!(res->flags & IORESOURCE_AUTO)) {
152 dev_dbg(&dev->dev, " irq %d already set to %d flags %#lx\n",
153 idx, (int) res->start, res->flags);
131 return 1; 154 return 1;
132 155 }
133 start = &dev->res.irq_resource[idx].start;
134 end = &dev->res.irq_resource[idx].end;
135 flags = &dev->res.irq_resource[idx].flags;
136 156
137 /* set the initial values */ 157 /* set the initial values */
138 *flags |= rule->flags | IORESOURCE_IRQ; 158 pnp_res->index = idx;
139 *flags &= ~IORESOURCE_UNSET; 159 res->flags |= rule->flags | IORESOURCE_IRQ;
160 res->flags &= ~IORESOURCE_UNSET;
140 161
141 if (bitmap_empty(rule->map, PNP_IRQ_NR)) { 162 if (bitmap_empty(rule->map, PNP_IRQ_NR)) {
142 *flags |= IORESOURCE_DISABLED; 163 res->flags |= IORESOURCE_DISABLED;
164 dev_dbg(&dev->dev, " irq %d disabled\n", idx);
143 return 1; /* skip disabled resource requests */ 165 return 1; /* skip disabled resource requests */
144 } 166 }
145 167
146 /* TBD: need check for >16 IRQ */ 168 /* TBD: need check for >16 IRQ */
147 *start = find_next_bit(rule->map, PNP_IRQ_NR, 16); 169 res->start = find_next_bit(rule->map, PNP_IRQ_NR, 16);
148 if (*start < PNP_IRQ_NR) { 170 if (res->start < PNP_IRQ_NR) {
149 *end = *start; 171 res->end = res->start;
172 dev_dbg(&dev->dev, " assign irq %d %d\n", idx,
173 (int) res->start);
150 return 1; 174 return 1;
151 } 175 }
152 for (i = 0; i < 16; i++) { 176 for (i = 0; i < 16; i++) {
153 if (test_bit(xtab[i], rule->map)) { 177 if (test_bit(xtab[i], rule->map)) {
154 *start = *end = xtab[i]; 178 res->start = res->end = xtab[i];
155 if (pnp_check_irq(dev, idx)) 179 if (pnp_check_irq(dev, res)) {
180 dev_dbg(&dev->dev, " assign irq %d %d\n", idx,
181 (int) res->start);
156 return 1; 182 return 1;
183 }
157 } 184 }
158 } 185 }
186 dev_dbg(&dev->dev, " couldn't assign irq %d\n", idx);
159 return 0; 187 return 0;
160} 188}
161 189
162static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx) 190static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
163{ 191{
164 resource_size_t *start, *end; 192 struct pnp_resource *pnp_res;
165 unsigned long *flags; 193 struct resource *res;
166 int i; 194 int i;
167 195
168 /* DMA priority: this table is good for i386 */ 196 /* DMA priority: this table is good for i386 */
@@ -170,71 +198,89 @@ static void pnp_assign_dma(struct pnp_dev *dev, struct pnp_dma *rule, int idx)
170 1, 3, 5, 6, 7, 0, 2, 4 198 1, 3, 5, 6, 7, 0, 2, 4
171 }; 199 };
172 200
173 if (idx >= PNP_MAX_DMA) { 201 pnp_res = pnp_get_pnp_resource(dev, IORESOURCE_DMA, idx);
202 if (!pnp_res) {
174 dev_err(&dev->dev, "too many DMA resources\n"); 203 dev_err(&dev->dev, "too many DMA resources\n");
175 return; 204 return;
176 } 205 }
177 206
207 res = &pnp_res->res;
208
178 /* check if this resource has been manually set, if so skip */ 209 /* check if this resource has been manually set, if so skip */
179 if (!(dev->res.dma_resource[idx].flags & IORESOURCE_AUTO)) 210 if (!(res->flags & IORESOURCE_AUTO)) {
211 dev_dbg(&dev->dev, " dma %d already set to %d flags %#lx\n",
212 idx, (int) res->start, res->flags);
180 return; 213 return;
181 214 }
182 start = &dev->res.dma_resource[idx].start;
183 end = &dev->res.dma_resource[idx].end;
184 flags = &dev->res.dma_resource[idx].flags;
185 215
186 /* set the initial values */ 216 /* set the initial values */
187 *flags |= rule->flags | IORESOURCE_DMA; 217 pnp_res->index = idx;
188 *flags &= ~IORESOURCE_UNSET; 218 res->flags |= rule->flags | IORESOURCE_DMA;
219 res->flags &= ~IORESOURCE_UNSET;
189 220
190 for (i = 0; i < 8; i++) { 221 for (i = 0; i < 8; i++) {
191 if (rule->map & (1 << xtab[i])) { 222 if (rule->map & (1 << xtab[i])) {
192 *start = *end = xtab[i]; 223 res->start = res->end = xtab[i];
193 if (pnp_check_dma(dev, idx)) 224 if (pnp_check_dma(dev, res)) {
225 dev_dbg(&dev->dev, " assign dma %d %d\n", idx,
226 (int) res->start);
194 return; 227 return;
228 }
195 } 229 }
196 } 230 }
197#ifdef MAX_DMA_CHANNELS 231#ifdef MAX_DMA_CHANNELS
198 *start = *end = MAX_DMA_CHANNELS; 232 res->start = res->end = MAX_DMA_CHANNELS;
199#endif 233#endif
200 *flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED; 234 res->flags |= IORESOURCE_UNSET | IORESOURCE_DISABLED;
235 dev_dbg(&dev->dev, " disable dma %d\n", idx);
236}
237
238void pnp_init_resource(struct resource *res)
239{
240 unsigned long type;
241
242 type = res->flags & (IORESOURCE_IO | IORESOURCE_MEM |
243 IORESOURCE_IRQ | IORESOURCE_DMA);
244
245 res->name = NULL;
246 res->flags = type | IORESOURCE_AUTO | IORESOURCE_UNSET;
247 if (type == IORESOURCE_IRQ || type == IORESOURCE_DMA) {
248 res->start = -1;
249 res->end = -1;
250 } else {
251 res->start = 0;
252 res->end = 0;
253 }
201} 254}
202 255
203/** 256/**
204 * pnp_init_resources - Resets a resource table to default values. 257 * pnp_init_resources - Resets a resource table to default values.
205 * @table: pointer to the desired resource table 258 * @table: pointer to the desired resource table
206 */ 259 */
207void pnp_init_resource_table(struct pnp_resource_table *table) 260void pnp_init_resources(struct pnp_dev *dev)
208{ 261{
262 struct resource *res;
209 int idx; 263 int idx;
210 264
211 for (idx = 0; idx < PNP_MAX_IRQ; idx++) { 265 for (idx = 0; idx < PNP_MAX_IRQ; idx++) {
212 table->irq_resource[idx].name = NULL; 266 res = &dev->res->irq[idx].res;
213 table->irq_resource[idx].start = -1; 267 res->flags = IORESOURCE_IRQ;
214 table->irq_resource[idx].end = -1; 268 pnp_init_resource(res);
215 table->irq_resource[idx].flags =
216 IORESOURCE_IRQ | IORESOURCE_AUTO | IORESOURCE_UNSET;
217 } 269 }
218 for (idx = 0; idx < PNP_MAX_DMA; idx++) { 270 for (idx = 0; idx < PNP_MAX_DMA; idx++) {
219 table->dma_resource[idx].name = NULL; 271 res = &dev->res->dma[idx].res;
220 table->dma_resource[idx].start = -1; 272 res->flags = IORESOURCE_DMA;
221 table->dma_resource[idx].end = -1; 273 pnp_init_resource(res);
222 table->dma_resource[idx].flags =
223 IORESOURCE_DMA | IORESOURCE_AUTO | IORESOURCE_UNSET;
224 } 274 }
225 for (idx = 0; idx < PNP_MAX_PORT; idx++) { 275 for (idx = 0; idx < PNP_MAX_PORT; idx++) {
226 table->port_resource[idx].name = NULL; 276 res = &dev->res->port[idx].res;
227 table->port_resource[idx].start = 0; 277 res->flags = IORESOURCE_IO;
228 table->port_resource[idx].end = 0; 278 pnp_init_resource(res);
229 table->port_resource[idx].flags =
230 IORESOURCE_IO | IORESOURCE_AUTO | IORESOURCE_UNSET;
231 } 279 }
232 for (idx = 0; idx < PNP_MAX_MEM; idx++) { 280 for (idx = 0; idx < PNP_MAX_MEM; idx++) {
233 table->mem_resource[idx].name = NULL; 281 res = &dev->res->mem[idx].res;
234 table->mem_resource[idx].start = 0; 282 res->flags = IORESOURCE_MEM;
235 table->mem_resource[idx].end = 0; 283 pnp_init_resource(res);
236 table->mem_resource[idx].flags =
237 IORESOURCE_MEM | IORESOURCE_AUTO | IORESOURCE_UNSET;
238 } 284 }
239} 285}
240 286
@@ -242,41 +288,38 @@ void pnp_init_resource_table(struct pnp_resource_table *table)
242 * pnp_clean_resources - clears resources that were not manually set 288 * pnp_clean_resources - clears resources that were not manually set
243 * @res: the resources to clean 289 * @res: the resources to clean
244 */ 290 */
245static void pnp_clean_resource_table(struct pnp_resource_table *res) 291static void pnp_clean_resource_table(struct pnp_dev *dev)
246{ 292{
293 struct resource *res;
247 int idx; 294 int idx;
248 295
249 for (idx = 0; idx < PNP_MAX_IRQ; idx++) { 296 for (idx = 0; idx < PNP_MAX_IRQ; idx++) {
250 if (!(res->irq_resource[idx].flags & IORESOURCE_AUTO)) 297 res = &dev->res->irq[idx].res;
251 continue; 298 if (res->flags & IORESOURCE_AUTO) {
252 res->irq_resource[idx].start = -1; 299 res->flags = IORESOURCE_IRQ;
253 res->irq_resource[idx].end = -1; 300 pnp_init_resource(res);
254 res->irq_resource[idx].flags = 301 }
255 IORESOURCE_IRQ | IORESOURCE_AUTO | IORESOURCE_UNSET;
256 } 302 }
257 for (idx = 0; idx < PNP_MAX_DMA; idx++) { 303 for (idx = 0; idx < PNP_MAX_DMA; idx++) {
258 if (!(res->dma_resource[idx].flags & IORESOURCE_AUTO)) 304 res = &dev->res->dma[idx].res;
259 continue; 305 if (res->flags & IORESOURCE_AUTO) {
260 res->dma_resource[idx].start = -1; 306 res->flags = IORESOURCE_DMA;
261 res->dma_resource[idx].end = -1; 307 pnp_init_resource(res);
262 res->dma_resource[idx].flags = 308 }
263 IORESOURCE_DMA | IORESOURCE_AUTO | IORESOURCE_UNSET;
264 } 309 }
265 for (idx = 0; idx < PNP_MAX_PORT; idx++) { 310 for (idx = 0; idx < PNP_MAX_PORT; idx++) {
266 if (!(res->port_resource[idx].flags & IORESOURCE_AUTO)) 311 res = &dev->res->port[idx].res;
267 continue; 312 if (res->flags & IORESOURCE_AUTO) {
268 res->port_resource[idx].start = 0; 313 res->flags = IORESOURCE_IO;
269 res->port_resource[idx].end = 0; 314 pnp_init_resource(res);
270 res->port_resource[idx].flags = 315 }
271 IORESOURCE_IO | IORESOURCE_AUTO | IORESOURCE_UNSET;
272 } 316 }
273 for (idx = 0; idx < PNP_MAX_MEM; idx++) { 317 for (idx = 0; idx < PNP_MAX_MEM; idx++) {
274 if (!(res->mem_resource[idx].flags & IORESOURCE_AUTO)) 318 res = &dev->res->mem[idx].res;
275 continue; 319 if (res->flags & IORESOURCE_AUTO) {
276 res->mem_resource[idx].start = 0; 320 res->flags = IORESOURCE_MEM;
277 res->mem_resource[idx].end = 0; 321 pnp_init_resource(res);
278 res->mem_resource[idx].flags = 322 }
279 IORESOURCE_MEM | IORESOURCE_AUTO | IORESOURCE_UNSET;
280 } 323 }
281} 324}
282 325
@@ -298,9 +341,11 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
298 if (!pnp_can_configure(dev)) 341 if (!pnp_can_configure(dev))
299 return -ENODEV; 342 return -ENODEV;
300 343
344 dbg_pnp_show_resources(dev, "before pnp_assign_resources");
301 mutex_lock(&pnp_res_mutex); 345 mutex_lock(&pnp_res_mutex);
302 pnp_clean_resource_table(&dev->res); /* start with a fresh slate */ 346 pnp_clean_resource_table(dev);
303 if (dev->independent) { 347 if (dev->independent) {
348 dev_dbg(&dev->dev, "assigning independent options\n");
304 port = dev->independent->port; 349 port = dev->independent->port;
305 mem = dev->independent->mem; 350 mem = dev->independent->mem;
306 irq = dev->independent->irq; 351 irq = dev->independent->irq;
@@ -333,6 +378,8 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
333 if (depnum) { 378 if (depnum) {
334 struct pnp_option *dep; 379 struct pnp_option *dep;
335 int i; 380 int i;
381
382 dev_dbg(&dev->dev, "assigning dependent option %d\n", depnum);
336 for (i = 1, dep = dev->dependent; i < depnum; 383 for (i = 1, dep = dev->dependent; i < depnum;
337 i++, dep = dep->next) 384 i++, dep = dep->next)
338 if (!dep) 385 if (!dep)
@@ -368,68 +415,17 @@ static int pnp_assign_resources(struct pnp_dev *dev, int depnum)
368 goto fail; 415 goto fail;
369 416
370 mutex_unlock(&pnp_res_mutex); 417 mutex_unlock(&pnp_res_mutex);
418 dbg_pnp_show_resources(dev, "after pnp_assign_resources");
371 return 1; 419 return 1;
372 420
373fail: 421fail:
374 pnp_clean_resource_table(&dev->res); 422 pnp_clean_resource_table(dev);
375 mutex_unlock(&pnp_res_mutex); 423 mutex_unlock(&pnp_res_mutex);
424 dbg_pnp_show_resources(dev, "after pnp_assign_resources (failed)");
376 return 0; 425 return 0;
377} 426}
378 427
379/** 428/**
380 * pnp_manual_config_dev - Disables Auto Config and Manually sets the resource table
381 * @dev: pointer to the desired device
382 * @res: pointer to the new resource config
383 * @mode: 0 or PNP_CONFIG_FORCE
384 *
385 * This function can be used by drivers that want to manually set thier resources.
386 */
387int pnp_manual_config_dev(struct pnp_dev *dev, struct pnp_resource_table *res,
388 int mode)
389{
390 int i;
391 struct pnp_resource_table *bak;
392
393 if (!pnp_can_configure(dev))
394 return -ENODEV;
395 bak = pnp_alloc(sizeof(struct pnp_resource_table));
396 if (!bak)
397 return -ENOMEM;
398 *bak = dev->res;
399
400 mutex_lock(&pnp_res_mutex);
401 dev->res = *res;
402 if (!(mode & PNP_CONFIG_FORCE)) {
403 for (i = 0; i < PNP_MAX_PORT; i++) {
404 if (!pnp_check_port(dev, i))
405 goto fail;
406 }
407 for (i = 0; i < PNP_MAX_MEM; i++) {
408 if (!pnp_check_mem(dev, i))
409 goto fail;
410 }
411 for (i = 0; i < PNP_MAX_IRQ; i++) {
412 if (!pnp_check_irq(dev, i))
413 goto fail;
414 }
415 for (i = 0; i < PNP_MAX_DMA; i++) {
416 if (!pnp_check_dma(dev, i))
417 goto fail;
418 }
419 }
420 mutex_unlock(&pnp_res_mutex);
421
422 kfree(bak);
423 return 0;
424
425fail:
426 dev->res = *bak;
427 mutex_unlock(&pnp_res_mutex);
428 kfree(bak);
429 return -EINVAL;
430}
431
432/**
433 * pnp_auto_config_dev - automatically assigns resources to a device 429 * pnp_auto_config_dev - automatically assigns resources to a device
434 * @dev: pointer to the desired device 430 * @dev: pointer to the desired device
435 */ 431 */
@@ -473,7 +469,8 @@ int pnp_start_dev(struct pnp_dev *dev)
473 return -EINVAL; 469 return -EINVAL;
474 } 470 }
475 471
476 if (dev->protocol->set(dev, &dev->res) < 0) { 472 dbg_pnp_show_resources(dev, "pnp_start_dev");
473 if (dev->protocol->set(dev) < 0) {
477 dev_err(&dev->dev, "activation failed\n"); 474 dev_err(&dev->dev, "activation failed\n");
478 return -EIO; 475 return -EIO;
479 } 476 }
@@ -549,30 +546,13 @@ int pnp_disable_dev(struct pnp_dev *dev)
549 546
550 /* release the resources so that other devices can use them */ 547 /* release the resources so that other devices can use them */
551 mutex_lock(&pnp_res_mutex); 548 mutex_lock(&pnp_res_mutex);
552 pnp_clean_resource_table(&dev->res); 549 pnp_clean_resource_table(dev);
553 mutex_unlock(&pnp_res_mutex); 550 mutex_unlock(&pnp_res_mutex);
554 551
555 return 0; 552 return 0;
556} 553}
557 554
558/**
559 * pnp_resource_change - change one resource
560 * @resource: pointer to resource to be changed
561 * @start: start of region
562 * @size: size of region
563 */
564void pnp_resource_change(struct resource *resource, resource_size_t start,
565 resource_size_t size)
566{
567 resource->flags &= ~(IORESOURCE_AUTO | IORESOURCE_UNSET);
568 resource->start = start;
569 resource->end = start + size - 1;
570}
571
572EXPORT_SYMBOL(pnp_manual_config_dev);
573EXPORT_SYMBOL(pnp_start_dev); 555EXPORT_SYMBOL(pnp_start_dev);
574EXPORT_SYMBOL(pnp_stop_dev); 556EXPORT_SYMBOL(pnp_stop_dev);
575EXPORT_SYMBOL(pnp_activate_dev); 557EXPORT_SYMBOL(pnp_activate_dev);
576EXPORT_SYMBOL(pnp_disable_dev); 558EXPORT_SYMBOL(pnp_disable_dev);
577EXPORT_SYMBOL(pnp_resource_change);
578EXPORT_SYMBOL(pnp_init_resource_table);