aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/i386/kernel/cpu/cpufreq/gx-suspmod.c2
-rw-r--r--arch/i386/pci/common.c1
-rw-r--r--arch/i386/pci/i386.c11
-rw-r--r--arch/sparc64/Kconfig18
-rw-r--r--arch/sparc64/kernel/entry.S21
-rw-r--r--arch/sparc64/kernel/irq.c581
-rw-r--r--arch/sparc64/kernel/pci_psycho.c3
-rw-r--r--arch/sparc64/kernel/pci_sabre.c46
-rw-r--r--arch/sparc64/kernel/pci_schizo.c78
-rw-r--r--arch/sparc64/kernel/time.c2
-rw-r--r--drivers/char/hw_random.c2
-rw-r--r--drivers/char/watchdog/i8xx_tco.c2
-rw-r--r--drivers/ide/setup-pci.c2
-rw-r--r--drivers/parport/parport_pc.c2
-rw-r--r--drivers/pci/Makefile1
-rw-r--r--drivers/pci/hotplug.c2
-rw-r--r--drivers/pci/pci-driver.c196
-rw-r--r--drivers/pci/pci.c6
-rw-r--r--drivers/pci/pcie/portdrv.h5
-rw-r--r--drivers/pci/pcie/portdrv_core.c8
-rw-r--r--drivers/pci/pcie/portdrv_pci.c79
-rw-r--r--drivers/pci/probe.c24
-rw-r--r--drivers/pci/quirks.c1
-rw-r--r--drivers/pci/setup-bus.c2
-rw-r--r--drivers/sbus/char/bpp.c20
-rw-r--r--include/asm-sparc64/irq.h49
-rw-r--r--include/asm-sparc64/pbm.h3
-rw-r--r--include/asm-sparc64/signal.h15
-rw-r--r--include/linux/compat_ioctl.h19
-rw-r--r--include/linux/pci-dynids.h18
-rw-r--r--include/linux/pci.h5
-rw-r--r--sound/pci/bt87x.c2
32 files changed, 554 insertions, 672 deletions
diff --git a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
index 1a49adb1f4a6..e86ea486c311 100644
--- a/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
+++ b/arch/i386/kernel/cpu/cpufreq/gx-suspmod.c
@@ -190,7 +190,7 @@ static __init struct pci_dev *gx_detect_chipset(void)
190 190
191 /* detect which companion chip is used */ 191 /* detect which companion chip is used */
192 while ((gx_pci = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, gx_pci)) != NULL) { 192 while ((gx_pci = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, gx_pci)) != NULL) {
193 if ((pci_match_device (gx_chipset_tbl, gx_pci)) != NULL) { 193 if ((pci_match_id(gx_chipset_tbl, gx_pci)) != NULL) {
194 return gx_pci; 194 return gx_pci;
195 } 195 }
196 } 196 }
diff --git a/arch/i386/pci/common.c b/arch/i386/pci/common.c
index 87325263cd4f..70bcd53451f6 100644
--- a/arch/i386/pci/common.c
+++ b/arch/i386/pci/common.c
@@ -165,6 +165,7 @@ static int __init pcibios_init(void)
165 if ((pci_probe & PCI_BIOS_SORT) && !(pci_probe & PCI_NO_SORT)) 165 if ((pci_probe & PCI_BIOS_SORT) && !(pci_probe & PCI_NO_SORT))
166 pcibios_sort(); 166 pcibios_sort();
167#endif 167#endif
168 pci_assign_unassigned_resources();
168 return 0; 169 return 0;
169} 170}
170 171
diff --git a/arch/i386/pci/i386.c b/arch/i386/pci/i386.c
index c205ea7e233b..93a364c82150 100644
--- a/arch/i386/pci/i386.c
+++ b/arch/i386/pci/i386.c
@@ -106,11 +106,16 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
106 if ((dev = bus->self)) { 106 if ((dev = bus->self)) {
107 for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) { 107 for (idx = PCI_BRIDGE_RESOURCES; idx < PCI_NUM_RESOURCES; idx++) {
108 r = &dev->resource[idx]; 108 r = &dev->resource[idx];
109 if (!r->start) 109 if (!r->flags)
110 continue; 110 continue;
111 pr = pci_find_parent_resource(dev, r); 111 pr = pci_find_parent_resource(dev, r);
112 if (!pr || request_resource(pr, r) < 0) 112 if (!r->start || !pr || request_resource(pr, r) < 0) {
113 printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, pci_name(dev)); 113 printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, pci_name(dev));
114 /* Something is wrong with the region.
115 Invalidate the resource to prevent child
116 resource allocations in this range. */
117 r->flags = 0;
118 }
114 } 119 }
115 } 120 }
116 pcibios_allocate_bus_resources(&bus->children); 121 pcibios_allocate_bus_resources(&bus->children);
@@ -227,7 +232,7 @@ int pcibios_enable_resources(struct pci_dev *dev, int mask)
227 232
228 pci_read_config_word(dev, PCI_COMMAND, &cmd); 233 pci_read_config_word(dev, PCI_COMMAND, &cmd);
229 old_cmd = cmd; 234 old_cmd = cmd;
230 for(idx=0; idx<6; idx++) { 235 for(idx = 0; idx < PCI_NUM_RESOURCES; idx++) {
231 /* Only set up the requested stuff */ 236 /* Only set up the requested stuff */
232 if (!(mask & (1<<idx))) 237 if (!(mask & (1<<idx)))
233 continue; 238 continue;
diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig
index e2b050eb3b96..d78bc13ebbb9 100644
--- a/arch/sparc64/Kconfig
+++ b/arch/sparc64/Kconfig
@@ -444,6 +444,24 @@ config PRINTER
444 If you have more than 8 printers, you need to increase the LP_NO 444 If you have more than 8 printers, you need to increase the LP_NO
445 macro in lp.c and the PARPORT_MAX macro in parport.h. 445 macro in lp.c and the PARPORT_MAX macro in parport.h.
446 446
447config PPDEV
448 tristate "Support for user-space parallel port device drivers"
449 depends on PARPORT
450 ---help---
451 Saying Y to this adds support for /dev/parport device nodes. This
452 is needed for programs that want portable access to the parallel
453 port, for instance deviceid (which displays Plug-and-Play device
454 IDs).
455
456 This is the parallel port equivalent of SCSI generic support (sg).
457 It is safe to say N to this -- it is not needed for normal printing
458 or parallel port CD-ROM/disk support.
459
460 To compile this driver as a module, choose M here: the
461 module will be called ppdev.
462
463 If unsure, say N.
464
447config ENVCTRL 465config ENVCTRL
448 tristate "SUNW, envctrl support" 466 tristate "SUNW, envctrl support"
449 depends on PCI 467 depends on PCI
diff --git a/arch/sparc64/kernel/entry.S b/arch/sparc64/kernel/entry.S
index eee516a71c14..d3973d8a7195 100644
--- a/arch/sparc64/kernel/entry.S
+++ b/arch/sparc64/kernel/entry.S
@@ -553,13 +553,11 @@ do_ivec:
553 sllx %g3, 5, %g3 553 sllx %g3, 5, %g3
554 or %g2, %lo(ivector_table), %g2 554 or %g2, %lo(ivector_table), %g2
555 add %g2, %g3, %g3 555 add %g2, %g3, %g3
556 ldx [%g3 + 0x08], %g2 /* irq_info */
557 ldub [%g3 + 0x04], %g4 /* pil */ 556 ldub [%g3 + 0x04], %g4 /* pil */
558 brz,pn %g2, do_ivec_spurious 557 mov 1, %g2
559 mov 1, %g2
560
561 sllx %g2, %g4, %g2 558 sllx %g2, %g4, %g2
562 sllx %g4, 2, %g4 559 sllx %g4, 2, %g4
560
563 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */ 561 lduw [%g6 + %g4], %g5 /* g5 = irq_work(cpu, pil) */
564 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */ 562 stw %g5, [%g3 + 0x00] /* bucket->irq_chain = g5 */
565 stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */ 563 stw %g3, [%g6 + %g4] /* irq_work(cpu, pil) = bucket */
@@ -567,9 +565,9 @@ do_ivec:
567 retry 565 retry
568do_ivec_xcall: 566do_ivec_xcall:
569 mov 0x50, %g1 567 mov 0x50, %g1
570
571 ldxa [%g1 + %g0] ASI_INTR_R, %g1 568 ldxa [%g1 + %g0] ASI_INTR_R, %g1
572 srl %g3, 0, %g3 569 srl %g3, 0, %g3
570
573 mov 0x60, %g7 571 mov 0x60, %g7
574 ldxa [%g7 + %g0] ASI_INTR_R, %g7 572 ldxa [%g7 + %g0] ASI_INTR_R, %g7
575 stxa %g0, [%g0] ASI_INTR_RECEIVE 573 stxa %g0, [%g0] ASI_INTR_RECEIVE
@@ -581,19 +579,6 @@ do_ivec_xcall:
5811: jmpl %g3, %g0 5791: jmpl %g3, %g0
582 nop 580 nop
583 581
584do_ivec_spurious:
585 stw %g3, [%g6 + 0x00] /* irq_work(cpu, 0) = bucket */
586 rdpr %pstate, %g5
587
588 wrpr %g5, PSTATE_IG | PSTATE_AG, %pstate
589 sethi %hi(109f), %g7
590 ba,pt %xcc, etrap
591109: or %g7, %lo(109b), %g7
592 call catch_disabled_ivec
593 add %sp, PTREGS_OFF, %o0
594 ba,pt %xcc, rtrap
595 clr %l6
596
597 .globl save_alternate_globals 582 .globl save_alternate_globals
598save_alternate_globals: /* %o0 = save_area */ 583save_alternate_globals: /* %o0 = save_area */
599 rdpr %pstate, %o5 584 rdpr %pstate, %o5
diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
index 424712577307..74a2e0808cbc 100644
--- a/arch/sparc64/kernel/irq.c
+++ b/arch/sparc64/kernel/irq.c
@@ -71,31 +71,7 @@ struct irq_work_struct {
71struct irq_work_struct __irq_work[NR_CPUS]; 71struct irq_work_struct __irq_work[NR_CPUS];
72#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)]) 72#define irq_work(__cpu, __pil) &(__irq_work[(__cpu)].irq_worklists[(__pil)])
73 73
74#ifdef CONFIG_PCI 74static struct irqaction *irq_action[NR_IRQS+1];
75/* This is a table of physical addresses used to deal with IBF_DMA_SYNC.
76 * It is used for PCI only to synchronize DMA transfers with IRQ delivery
77 * for devices behind busses other than APB on Sabre systems.
78 *
79 * Currently these physical addresses are just config space accesses
80 * to the command register for that device.
81 */
82unsigned long pci_dma_wsync;
83unsigned long dma_sync_reg_table[256];
84unsigned char dma_sync_reg_table_entry = 0;
85#endif
86
87/* This is based upon code in the 32-bit Sparc kernel written mostly by
88 * David Redman (djhr@tadpole.co.uk).
89 */
90#define MAX_STATIC_ALLOC 4
91static struct irqaction static_irqaction[MAX_STATIC_ALLOC];
92static int static_irq_count;
93
94/* This is exported so that fast IRQ handlers can get at it... -DaveM */
95struct irqaction *irq_action[NR_IRQS+1] = {
96 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL,
97 NULL, NULL, NULL, NULL, NULL, NULL , NULL, NULL
98};
99 75
100/* This only synchronizes entities which modify IRQ handler 76/* This only synchronizes entities which modify IRQ handler
101 * state and some selected user-level spots that want to 77 * state and some selected user-level spots that want to
@@ -241,17 +217,22 @@ void disable_irq(unsigned int irq)
241 * the CPU %tick register and not by some normal vectored interrupt 217 * the CPU %tick register and not by some normal vectored interrupt
242 * source. To handle this special case, we use this dummy INO bucket. 218 * source. To handle this special case, we use this dummy INO bucket.
243 */ 219 */
220static struct irq_desc pil0_dummy_desc;
244static struct ino_bucket pil0_dummy_bucket = { 221static struct ino_bucket pil0_dummy_bucket = {
245 0, /* irq_chain */ 222 .irq_info = &pil0_dummy_desc,
246 0, /* pil */
247 0, /* pending */
248 0, /* flags */
249 0, /* __unused */
250 NULL, /* irq_info */
251 0UL, /* iclr */
252 0UL, /* imap */
253}; 223};
254 224
225static void build_irq_error(const char *msg, unsigned int ino, int pil, int inofixup,
226 unsigned long iclr, unsigned long imap,
227 struct ino_bucket *bucket)
228{
229 prom_printf("IRQ: INO %04x (%d:%016lx:%016lx) --> "
230 "(%d:%d:%016lx:%016lx), halting...\n",
231 ino, bucket->pil, bucket->iclr, bucket->imap,
232 pil, inofixup, iclr, imap);
233 prom_halt();
234}
235
255unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap) 236unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long imap)
256{ 237{
257 struct ino_bucket *bucket; 238 struct ino_bucket *bucket;
@@ -280,28 +261,35 @@ unsigned int build_irq(int pil, int inofixup, unsigned long iclr, unsigned long
280 prom_halt(); 261 prom_halt();
281 } 262 }
282 263
283 /* Ok, looks good, set it up. Don't touch the irq_chain or
284 * the pending flag.
285 */
286 bucket = &ivector_table[ino]; 264 bucket = &ivector_table[ino];
287 if ((bucket->flags & IBF_ACTIVE) || 265 if (bucket->flags & IBF_ACTIVE)
288 (bucket->irq_info != NULL)) { 266 build_irq_error("IRQ: Trying to build active INO bucket.\n",
289 /* This is a gross fatal error if it happens here. */ 267 ino, pil, inofixup, iclr, imap, bucket);
290 prom_printf("IRQ: Trying to reinit INO bucket, fatal error.\n"); 268
291 prom_printf("IRQ: Request INO %04x (%d:%d:%016lx:%016lx)\n", 269 if (bucket->irq_info) {
292 ino, pil, inofixup, iclr, imap); 270 if (bucket->imap != imap || bucket->iclr != iclr)
293 prom_printf("IRQ: Existing (%d:%016lx:%016lx)\n", 271 build_irq_error("IRQ: Trying to reinit INO bucket.\n",
294 bucket->pil, bucket->iclr, bucket->imap); 272 ino, pil, inofixup, iclr, imap, bucket);
295 prom_printf("IRQ: Cannot continue, halting...\n"); 273
274 goto out;
275 }
276
277 bucket->irq_info = kmalloc(sizeof(struct irq_desc), GFP_ATOMIC);
278 if (!bucket->irq_info) {
279 prom_printf("IRQ: Error, kmalloc(irq_desc) failed.\n");
296 prom_halt(); 280 prom_halt();
297 } 281 }
282 memset(bucket->irq_info, 0, sizeof(struct irq_desc));
283
284 /* Ok, looks good, set it up. Don't touch the irq_chain or
285 * the pending flag.
286 */
298 bucket->imap = imap; 287 bucket->imap = imap;
299 bucket->iclr = iclr; 288 bucket->iclr = iclr;
300 bucket->pil = pil; 289 bucket->pil = pil;
301 bucket->flags = 0; 290 bucket->flags = 0;
302 291
303 bucket->irq_info = NULL; 292out:
304
305 return __irq(bucket); 293 return __irq(bucket);
306} 294}
307 295
@@ -319,26 +307,65 @@ static void atomic_bucket_insert(struct ino_bucket *bucket)
319 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate)); 307 __asm__ __volatile__("wrpr %0, 0x0, %%pstate" : : "r" (pstate));
320} 308}
321 309
310static int check_irq_sharing(int pil, unsigned long irqflags)
311{
312 struct irqaction *action, *tmp;
313
314 action = *(irq_action + pil);
315 if (action) {
316 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) {
317 for (tmp = action; tmp->next; tmp = tmp->next)
318 ;
319 } else {
320 return -EBUSY;
321 }
322 }
323 return 0;
324}
325
326static void append_irq_action(int pil, struct irqaction *action)
327{
328 struct irqaction **pp = irq_action + pil;
329
330 while (*pp)
331 pp = &((*pp)->next);
332 *pp = action;
333}
334
335static struct irqaction *get_action_slot(struct ino_bucket *bucket)
336{
337 struct irq_desc *desc = bucket->irq_info;
338 int max_irq, i;
339
340 max_irq = 1;
341 if (bucket->flags & IBF_PCI)
342 max_irq = MAX_IRQ_DESC_ACTION;
343 for (i = 0; i < max_irq; i++) {
344 struct irqaction *p = &desc->action[i];
345 u32 mask = (1 << i);
346
347 if (desc->action_active_mask & mask)
348 continue;
349
350 desc->action_active_mask |= mask;
351 return p;
352 }
353 return NULL;
354}
355
322int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *), 356int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
323 unsigned long irqflags, const char *name, void *dev_id) 357 unsigned long irqflags, const char *name, void *dev_id)
324{ 358{
325 struct irqaction *action, *tmp = NULL; 359 struct irqaction *action;
326 struct ino_bucket *bucket = __bucket(irq); 360 struct ino_bucket *bucket = __bucket(irq);
327 unsigned long flags; 361 unsigned long flags;
328 int pending = 0; 362 int pending = 0;
329 363
330 if ((bucket != &pil0_dummy_bucket) && 364 if (unlikely(!handler))
331 (bucket < &ivector_table[0] ||
332 bucket >= &ivector_table[NUM_IVECS])) {
333 unsigned int *caller;
334
335 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller));
336 printk(KERN_CRIT "request_irq: Old style IRQ registry attempt "
337 "from %p, irq %08x.\n", caller, irq);
338 return -EINVAL; 365 return -EINVAL;
339 } 366
340 if (!handler) 367 if (unlikely(!bucket->irq_info))
341 return -EINVAL; 368 return -ENODEV;
342 369
343 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) { 370 if ((bucket != &pil0_dummy_bucket) && (irqflags & SA_SAMPLE_RANDOM)) {
344 /* 371 /*
@@ -356,93 +383,20 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
356 383
357 spin_lock_irqsave(&irq_action_lock, flags); 384 spin_lock_irqsave(&irq_action_lock, flags);
358 385
359 action = *(bucket->pil + irq_action); 386 if (check_irq_sharing(bucket->pil, irqflags)) {
360 if (action) { 387 spin_unlock_irqrestore(&irq_action_lock, flags);
361 if ((action->flags & SA_SHIRQ) && (irqflags & SA_SHIRQ)) 388 return -EBUSY;
362 for (tmp = action; tmp->next; tmp = tmp->next)
363 ;
364 else {
365 spin_unlock_irqrestore(&irq_action_lock, flags);
366 return -EBUSY;
367 }
368 action = NULL; /* Or else! */
369 } 389 }
370 390
371 /* If this is flagged as statically allocated then we use our 391 action = get_action_slot(bucket);
372 * private struct which is never freed.
373 */
374 if (irqflags & SA_STATIC_ALLOC) {
375 if (static_irq_count < MAX_STATIC_ALLOC)
376 action = &static_irqaction[static_irq_count++];
377 else
378 printk("Request for IRQ%d (%s) SA_STATIC_ALLOC failed "
379 "using kmalloc\n", irq, name);
380 }
381 if (action == NULL)
382 action = (struct irqaction *)kmalloc(sizeof(struct irqaction),
383 GFP_ATOMIC);
384
385 if (!action) { 392 if (!action) {
386 spin_unlock_irqrestore(&irq_action_lock, flags); 393 spin_unlock_irqrestore(&irq_action_lock, flags);
387 return -ENOMEM; 394 return -ENOMEM;
388 } 395 }
389 396
390 if (bucket == &pil0_dummy_bucket) { 397 bucket->flags |= IBF_ACTIVE;
391 bucket->irq_info = action; 398 pending = 0;
392 bucket->flags |= IBF_ACTIVE; 399 if (bucket != &pil0_dummy_bucket) {
393 } else {
394 if ((bucket->flags & IBF_ACTIVE) != 0) {
395 void *orig = bucket->irq_info;
396 void **vector = NULL;
397
398 if ((bucket->flags & IBF_PCI) == 0) {
399 printk("IRQ: Trying to share non-PCI bucket.\n");
400 goto free_and_ebusy;
401 }
402 if ((bucket->flags & IBF_MULTI) == 0) {
403 vector = kmalloc(sizeof(void *) * 4, GFP_ATOMIC);
404 if (vector == NULL)
405 goto free_and_enomem;
406
407 /* We might have slept. */
408 if ((bucket->flags & IBF_MULTI) != 0) {
409 int ent;
410
411 kfree(vector);
412 vector = (void **)bucket->irq_info;
413 for(ent = 0; ent < 4; ent++) {
414 if (vector[ent] == NULL) {
415 vector[ent] = action;
416 break;
417 }
418 }
419 if (ent == 4)
420 goto free_and_ebusy;
421 } else {
422 vector[0] = orig;
423 vector[1] = action;
424 vector[2] = NULL;
425 vector[3] = NULL;
426 bucket->irq_info = vector;
427 bucket->flags |= IBF_MULTI;
428 }
429 } else {
430 int ent;
431
432 vector = (void **)orig;
433 for (ent = 0; ent < 4; ent++) {
434 if (vector[ent] == NULL) {
435 vector[ent] = action;
436 break;
437 }
438 }
439 if (ent == 4)
440 goto free_and_ebusy;
441 }
442 } else {
443 bucket->irq_info = action;
444 bucket->flags |= IBF_ACTIVE;
445 }
446 pending = bucket->pending; 400 pending = bucket->pending;
447 if (pending) 401 if (pending)
448 bucket->pending = 0; 402 bucket->pending = 0;
@@ -456,10 +410,7 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
456 put_ino_in_irqaction(action, irq); 410 put_ino_in_irqaction(action, irq);
457 put_smpaff_in_irqaction(action, CPU_MASK_NONE); 411 put_smpaff_in_irqaction(action, CPU_MASK_NONE);
458 412
459 if (tmp) 413 append_irq_action(bucket->pil, action);
460 tmp->next = action;
461 else
462 *(bucket->pil + irq_action) = action;
463 414
464 enable_irq(irq); 415 enable_irq(irq);
465 416
@@ -468,147 +419,103 @@ int request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_
468 atomic_bucket_insert(bucket); 419 atomic_bucket_insert(bucket);
469 set_softint(1 << bucket->pil); 420 set_softint(1 << bucket->pil);
470 } 421 }
422
471 spin_unlock_irqrestore(&irq_action_lock, flags); 423 spin_unlock_irqrestore(&irq_action_lock, flags);
472 if ((bucket != &pil0_dummy_bucket) && (!(irqflags & SA_STATIC_ALLOC))) 424
425 if (bucket != &pil0_dummy_bucket)
473 register_irq_proc(__irq_ino(irq)); 426 register_irq_proc(__irq_ino(irq));
474 427
475#ifdef CONFIG_SMP 428#ifdef CONFIG_SMP
476 distribute_irqs(); 429 distribute_irqs();
477#endif 430#endif
478 return 0; 431 return 0;
479
480free_and_ebusy:
481 kfree(action);
482 spin_unlock_irqrestore(&irq_action_lock, flags);
483 return -EBUSY;
484
485free_and_enomem:
486 kfree(action);
487 spin_unlock_irqrestore(&irq_action_lock, flags);
488 return -ENOMEM;
489} 432}
490 433
491EXPORT_SYMBOL(request_irq); 434EXPORT_SYMBOL(request_irq);
492 435
493void free_irq(unsigned int irq, void *dev_id) 436static struct irqaction *unlink_irq_action(unsigned int irq, void *dev_id)
494{ 437{
495 struct irqaction *action; 438 struct ino_bucket *bucket = __bucket(irq);
496 struct irqaction *tmp = NULL; 439 struct irqaction *action, **pp;
497 unsigned long flags;
498 struct ino_bucket *bucket = __bucket(irq), *bp;
499
500 if ((bucket != &pil0_dummy_bucket) &&
501 (bucket < &ivector_table[0] ||
502 bucket >= &ivector_table[NUM_IVECS])) {
503 unsigned int *caller;
504 440
505 __asm__ __volatile__("mov %%i7, %0" : "=r" (caller)); 441 pp = irq_action + bucket->pil;
506 printk(KERN_CRIT "free_irq: Old style IRQ removal attempt " 442 action = *pp;
507 "from %p, irq %08x.\n", caller, irq); 443 if (unlikely(!action))
508 return; 444 return NULL;
509 }
510
511 spin_lock_irqsave(&irq_action_lock, flags);
512 445
513 action = *(bucket->pil + irq_action); 446 if (unlikely(!action->handler)) {
514 if (!action->handler) {
515 printk("Freeing free IRQ %d\n", bucket->pil); 447 printk("Freeing free IRQ %d\n", bucket->pil);
516 return; 448 return NULL;
517 }
518 if (dev_id) {
519 for ( ; action; action = action->next) {
520 if (action->dev_id == dev_id)
521 break;
522 tmp = action;
523 }
524 if (!action) {
525 printk("Trying to free free shared IRQ %d\n", bucket->pil);
526 spin_unlock_irqrestore(&irq_action_lock, flags);
527 return;
528 }
529 } else if (action->flags & SA_SHIRQ) {
530 printk("Trying to free shared IRQ %d with NULL device ID\n", bucket->pil);
531 spin_unlock_irqrestore(&irq_action_lock, flags);
532 return;
533 } 449 }
534 450
535 if (action->flags & SA_STATIC_ALLOC) { 451 while (action && action->dev_id != dev_id) {
536 printk("Attempt to free statically allocated IRQ %d (%s)\n", 452 pp = &action->next;
537 bucket->pil, action->name); 453 action = *pp;
538 spin_unlock_irqrestore(&irq_action_lock, flags);
539 return;
540 } 454 }
541 455
542 if (action && tmp) 456 if (likely(action))
543 tmp->next = action->next; 457 *pp = action->next;
544 else 458
545 *(bucket->pil + irq_action) = action->next; 459 return action;
460}
461
462void free_irq(unsigned int irq, void *dev_id)
463{
464 struct irqaction *action;
465 struct ino_bucket *bucket;
466 unsigned long flags;
467
468 spin_lock_irqsave(&irq_action_lock, flags);
469
470 action = unlink_irq_action(irq, dev_id);
546 471
547 spin_unlock_irqrestore(&irq_action_lock, flags); 472 spin_unlock_irqrestore(&irq_action_lock, flags);
548 473
474 if (unlikely(!action))
475 return;
476
549 synchronize_irq(irq); 477 synchronize_irq(irq);
550 478
551 spin_lock_irqsave(&irq_action_lock, flags); 479 spin_lock_irqsave(&irq_action_lock, flags);
552 480
481 bucket = __bucket(irq);
553 if (bucket != &pil0_dummy_bucket) { 482 if (bucket != &pil0_dummy_bucket) {
483 struct irq_desc *desc = bucket->irq_info;
554 unsigned long imap = bucket->imap; 484 unsigned long imap = bucket->imap;
555 void **vector, *orig; 485 int ent, i;
556 int ent;
557
558 orig = bucket->irq_info;
559 vector = (void **)orig;
560
561 if ((bucket->flags & IBF_MULTI) != 0) {
562 int other = 0;
563 void *orphan = NULL;
564 for (ent = 0; ent < 4; ent++) {
565 if (vector[ent] == action)
566 vector[ent] = NULL;
567 else if (vector[ent] != NULL) {
568 orphan = vector[ent];
569 other++;
570 }
571 }
572 486
573 /* Only free when no other shared irq 487 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
574 * uses this bucket. 488 struct irqaction *p = &desc->action[i];
575 */ 489
576 if (other) { 490 if (p == action) {
577 if (other == 1) { 491 desc->action_active_mask &= ~(1 << i);
578 /* Convert back to non-shared bucket. */ 492 break;
579 bucket->irq_info = orphan;
580 bucket->flags &= ~(IBF_MULTI);
581 kfree(vector);
582 }
583 goto out;
584 } 493 }
585 } else {
586 bucket->irq_info = NULL;
587 } 494 }
588 495
589 /* This unique interrupt source is now inactive. */ 496 if (!desc->action_active_mask) {
590 bucket->flags &= ~IBF_ACTIVE; 497 /* This unique interrupt source is now inactive. */
498 bucket->flags &= ~IBF_ACTIVE;
591 499
592 /* See if any other buckets share this bucket's IMAP 500 /* See if any other buckets share this bucket's IMAP
593 * and are still active. 501 * and are still active.
594 */ 502 */
595 for (ent = 0; ent < NUM_IVECS; ent++) { 503 for (ent = 0; ent < NUM_IVECS; ent++) {
596 bp = &ivector_table[ent]; 504 struct ino_bucket *bp = &ivector_table[ent];
597 if (bp != bucket && 505 if (bp != bucket &&
598 bp->imap == imap && 506 bp->imap == imap &&
599 (bp->flags & IBF_ACTIVE) != 0) 507 (bp->flags & IBF_ACTIVE) != 0)
600 break; 508 break;
601 } 509 }
602 510
603 /* Only disable when no other sub-irq levels of 511 /* Only disable when no other sub-irq levels of
604 * the same IMAP are active. 512 * the same IMAP are active.
605 */ 513 */
606 if (ent == NUM_IVECS) 514 if (ent == NUM_IVECS)
607 disable_irq(irq); 515 disable_irq(irq);
516 }
608 } 517 }
609 518
610out:
611 kfree(action);
612 spin_unlock_irqrestore(&irq_action_lock, flags); 519 spin_unlock_irqrestore(&irq_action_lock, flags);
613} 520}
614 521
@@ -647,99 +554,55 @@ void synchronize_irq(unsigned int irq)
647} 554}
648#endif /* CONFIG_SMP */ 555#endif /* CONFIG_SMP */
649 556
650void catch_disabled_ivec(struct pt_regs *regs) 557static void process_bucket(int irq, struct ino_bucket *bp, struct pt_regs *regs)
651{
652 int cpu = smp_processor_id();
653 struct ino_bucket *bucket = __bucket(*irq_work(cpu, 0));
654
655 /* We can actually see this on Ultra/PCI PCI cards, which are bridges
656 * to other devices. Here a single IMAP enabled potentially multiple
657 * unique interrupt sources (which each do have a unique ICLR register.
658 *
659 * So what we do is just register that the IVEC arrived, when registered
660 * for real the request_irq() code will check the bit and signal
661 * a local CPU interrupt for it.
662 */
663#if 0
664 printk("IVEC: Spurious interrupt vector (%x) received at (%016lx)\n",
665 bucket - &ivector_table[0], regs->tpc);
666#endif
667 *irq_work(cpu, 0) = 0;
668 bucket->pending = 1;
669}
670
671/* Tune this... */
672#define FORWARD_VOLUME 12
673
674#ifdef CONFIG_SMP
675
676static inline void redirect_intr(int cpu, struct ino_bucket *bp)
677{ 558{
678 /* Ok, here is what is going on: 559 struct irq_desc *desc = bp->irq_info;
679 * 1) Retargeting IRQs on Starfire is very 560 unsigned char flags = bp->flags;
680 * expensive so just forget about it on them. 561 u32 action_mask, i;
681 * 2) Moving around very high priority interrupts 562 int random;
682 * is a losing game.
683 * 3) If the current cpu is idle, interrupts are
684 * useful work, so keep them here. But do not
685 * pass to our neighbour if he is not very idle.
686 * 4) If sysadmin explicitly asks for directed intrs,
687 * Just Do It.
688 */
689 struct irqaction *ap = bp->irq_info;
690 cpumask_t cpu_mask;
691 unsigned int buddy, ticks;
692 563
693 cpu_mask = get_smpaff_in_irqaction(ap); 564 bp->flags |= IBF_INPROGRESS;
694 cpus_and(cpu_mask, cpu_mask, cpu_online_map);
695 if (cpus_empty(cpu_mask))
696 cpu_mask = cpu_online_map;
697 565
698 if (this_is_starfire != 0 || 566 if (unlikely(!(flags & IBF_ACTIVE))) {
699 bp->pil >= 10 || current->pid == 0) 567 bp->pending = 1;
700 goto out; 568 goto out;
701
702 /* 'cpu' is the MID (ie. UPAID), calculate the MID
703 * of our buddy.
704 */
705 buddy = cpu + 1;
706 if (buddy >= NR_CPUS)
707 buddy = 0;
708
709 ticks = 0;
710 while (!cpu_isset(buddy, cpu_mask)) {
711 if (++buddy >= NR_CPUS)
712 buddy = 0;
713 if (++ticks > NR_CPUS) {
714 put_smpaff_in_irqaction(ap, CPU_MASK_NONE);
715 goto out;
716 }
717 } 569 }
718 570
719 if (buddy == cpu) 571 if (desc->pre_handler)
720 goto out; 572 desc->pre_handler(bp,
573 desc->pre_handler_arg1,
574 desc->pre_handler_arg2);
721 575
722 /* Voo-doo programming. */ 576 action_mask = desc->action_active_mask;
723 if (cpu_data(buddy).idle_volume < FORWARD_VOLUME) 577 random = 0;
724 goto out; 578 for (i = 0; i < MAX_IRQ_DESC_ACTION; i++) {
579 struct irqaction *p = &desc->action[i];
580 u32 mask = (1 << i);
725 581
726 /* This just so happens to be correct on Cheetah 582 if (!(action_mask & mask))
727 * at the moment. 583 continue;
728 */ 584
729 buddy <<= 26; 585 action_mask &= ~mask;
730 586
731 /* Push it to our buddy. */ 587 if (p->handler(__irq(bp), p->dev_id, regs) == IRQ_HANDLED)
732 upa_writel(buddy | IMAP_VALID, bp->imap); 588 random |= p->flags;
733 589
590 if (!action_mask)
591 break;
592 }
593 if (bp->pil != 0) {
594 upa_writel(ICLR_IDLE, bp->iclr);
595 /* Test and add entropy */
596 if (random & SA_SAMPLE_RANDOM)
597 add_interrupt_randomness(irq);
598 }
734out: 599out:
735 return; 600 bp->flags &= ~IBF_INPROGRESS;
736} 601}
737 602
738#endif
739
740void handler_irq(int irq, struct pt_regs *regs) 603void handler_irq(int irq, struct pt_regs *regs)
741{ 604{
742 struct ino_bucket *bp, *nbp; 605 struct ino_bucket *bp;
743 int cpu = smp_processor_id(); 606 int cpu = smp_processor_id();
744 607
745#ifndef CONFIG_SMP 608#ifndef CONFIG_SMP
@@ -757,8 +620,6 @@ void handler_irq(int irq, struct pt_regs *regs)
757 clear_softint(clr_mask); 620 clear_softint(clr_mask);
758 } 621 }
759#else 622#else
760 int should_forward = 0;
761
762 clear_softint(1 << irq); 623 clear_softint(1 << irq);
763#endif 624#endif
764 625
@@ -773,63 +634,12 @@ void handler_irq(int irq, struct pt_regs *regs)
773#else 634#else
774 bp = __bucket(xchg32(irq_work(cpu, irq), 0)); 635 bp = __bucket(xchg32(irq_work(cpu, irq), 0));
775#endif 636#endif
776 for ( ; bp != NULL; bp = nbp) { 637 while (bp) {
777 unsigned char flags = bp->flags; 638 struct ino_bucket *nbp = __bucket(bp->irq_chain);
778 unsigned char random = 0;
779 639
780 nbp = __bucket(bp->irq_chain);
781 bp->irq_chain = 0; 640 bp->irq_chain = 0;
782 641 process_bucket(irq, bp, regs);
783 bp->flags |= IBF_INPROGRESS; 642 bp = nbp;
784
785 if ((flags & IBF_ACTIVE) != 0) {
786#ifdef CONFIG_PCI
787 if ((flags & IBF_DMA_SYNC) != 0) {
788 upa_readl(dma_sync_reg_table[bp->synctab_ent]);
789 upa_readq(pci_dma_wsync);
790 }
791#endif
792 if ((flags & IBF_MULTI) == 0) {
793 struct irqaction *ap = bp->irq_info;
794 int ret;
795
796 ret = ap->handler(__irq(bp), ap->dev_id, regs);
797 if (ret == IRQ_HANDLED)
798 random |= ap->flags;
799 } else {
800 void **vector = (void **)bp->irq_info;
801 int ent;
802 for (ent = 0; ent < 4; ent++) {
803 struct irqaction *ap = vector[ent];
804 if (ap != NULL) {
805 int ret;
806
807 ret = ap->handler(__irq(bp),
808 ap->dev_id,
809 regs);
810 if (ret == IRQ_HANDLED)
811 random |= ap->flags;
812 }
813 }
814 }
815 /* Only the dummy bucket lacks IMAP/ICLR. */
816 if (bp->pil != 0) {
817#ifdef CONFIG_SMP
818 if (should_forward) {
819 redirect_intr(cpu, bp);
820 should_forward = 0;
821 }
822#endif
823 upa_writel(ICLR_IDLE, bp->iclr);
824
825 /* Test and add entropy */
826 if (random & SA_SAMPLE_RANDOM)
827 add_interrupt_randomness(irq);
828 }
829 } else
830 bp->pending = 1;
831
832 bp->flags &= ~IBF_INPROGRESS;
833 } 643 }
834 irq_exit(); 644 irq_exit();
835} 645}
@@ -959,7 +769,10 @@ static void distribute_irqs(void)
959 */ 769 */
960 for (level = 1; level < NR_IRQS; level++) { 770 for (level = 1; level < NR_IRQS; level++) {
961 struct irqaction *p = irq_action[level]; 771 struct irqaction *p = irq_action[level];
962 if (level == 12) continue; 772
773 if (level == 12)
774 continue;
775
963 while(p) { 776 while(p) {
964 cpu = retarget_one_irq(p, cpu); 777 cpu = retarget_one_irq(p, cpu);
965 p = p->next; 778 p = p->next;
diff --git a/arch/sparc64/kernel/pci_psycho.c b/arch/sparc64/kernel/pci_psycho.c
index 534320ef0db2..91ab466d6c66 100644
--- a/arch/sparc64/kernel/pci_psycho.c
+++ b/arch/sparc64/kernel/pci_psycho.c
@@ -1303,8 +1303,7 @@ static void psycho_controller_hwinit(struct pci_controller_info *p)
1303{ 1303{
1304 u64 tmp; 1304 u64 tmp;
1305 1305
1306 /* PROM sets the IRQ retry value too low, increase it. */ 1306 psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 5);
1307 psycho_write(p->pbm_A.controller_regs + PSYCHO_IRQ_RETRY, 0xff);
1308 1307
1309 /* Enable arbiter for all PCI slots. */ 1308 /* Enable arbiter for all PCI slots. */
1310 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL); 1309 tmp = psycho_read(p->pbm_A.controller_regs + PSYCHO_PCIA_CTRL);
diff --git a/arch/sparc64/kernel/pci_sabre.c b/arch/sparc64/kernel/pci_sabre.c
index 53d333b4a4e8..52bf3431a422 100644
--- a/arch/sparc64/kernel/pci_sabre.c
+++ b/arch/sparc64/kernel/pci_sabre.c
@@ -595,6 +595,23 @@ static int __init sabre_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
595 return ret; 595 return ret;
596} 596}
597 597
598/* When a device lives behind a bridge deeper in the PCI bus topology
599 * than APB, a special sequence must run to make sure all pending DMA
600 * transfers at the time of IRQ delivery are visible in the coherency
601 * domain by the cpu. This sequence is to perform a read on the far
602 * side of the non-APB bridge, then perform a read of Sabre's DMA
603 * write-sync register.
604 */
605static void sabre_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2)
606{
607 struct pci_dev *pdev = _arg1;
608 unsigned long sync_reg = (unsigned long) _arg2;
609 u16 _unused;
610
611 pci_read_config_word(pdev, PCI_VENDOR_ID, &_unused);
612 sabre_read(sync_reg);
613}
614
598static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm, 615static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm,
599 struct pci_dev *pdev, 616 struct pci_dev *pdev,
600 unsigned int ino) 617 unsigned int ino)
@@ -639,24 +656,14 @@ static unsigned int __init sabre_irq_build(struct pci_pbm_info *pbm,
639 if (pdev) { 656 if (pdev) {
640 struct pcidev_cookie *pcp = pdev->sysdata; 657 struct pcidev_cookie *pcp = pdev->sysdata;
641 658
642 /* When a device lives behind a bridge deeper in the
643 * PCI bus topology than APB, a special sequence must
644 * run to make sure all pending DMA transfers at the
645 * time of IRQ delivery are visible in the coherency
646 * domain by the cpu. This sequence is to perform
647 * a read on the far side of the non-APB bridge, then
648 * perform a read of Sabre's DMA write-sync register.
649 *
650 * Currently, the PCI_CONFIG register for the device
651 * is used for this read from the far side of the bridge.
652 */
653 if (pdev->bus->number != pcp->pbm->pci_first_busno) { 659 if (pdev->bus->number != pcp->pbm->pci_first_busno) {
654 bucket->flags |= IBF_DMA_SYNC; 660 struct pci_controller_info *p = pcp->pbm->parent;
655 bucket->synctab_ent = dma_sync_reg_table_entry++; 661 struct irq_desc *d = bucket->irq_info;
656 dma_sync_reg_table[bucket->synctab_ent] = 662
657 (unsigned long) sabre_pci_config_mkaddr( 663 d->pre_handler = sabre_wsync_handler;
658 pcp->pbm, 664 d->pre_handler_arg1 = pdev;
659 pdev->bus->number, pdev->devfn, PCI_COMMAND); 665 d->pre_handler_arg2 = (void *)
666 p->pbm_A.controller_regs + SABRE_WRSYNC;
660 } 667 }
661 } 668 }
662 return __irq(bucket); 669 return __irq(bucket);
@@ -1626,10 +1633,9 @@ void __init sabre_init(int pnode, char *model_name)
1626 */ 1633 */
1627 p->pbm_A.controller_regs = pr_regs[0].phys_addr; 1634 p->pbm_A.controller_regs = pr_regs[0].phys_addr;
1628 p->pbm_B.controller_regs = pr_regs[0].phys_addr; 1635 p->pbm_B.controller_regs = pr_regs[0].phys_addr;
1629 pci_dma_wsync = p->pbm_A.controller_regs + SABRE_WRSYNC;
1630 1636
1631 printk("PCI: Found SABRE, main regs at %016lx, wsync at %016lx\n", 1637 printk("PCI: Found SABRE, main regs at %016lx\n",
1632 p->pbm_A.controller_regs, pci_dma_wsync); 1638 p->pbm_A.controller_regs);
1633 1639
1634 /* Clear interrupts */ 1640 /* Clear interrupts */
1635 1641
diff --git a/arch/sparc64/kernel/pci_schizo.c b/arch/sparc64/kernel/pci_schizo.c
index 5753175b94e6..6a182bb66281 100644
--- a/arch/sparc64/kernel/pci_schizo.c
+++ b/arch/sparc64/kernel/pci_schizo.c
@@ -15,6 +15,7 @@
15#include <asm/iommu.h> 15#include <asm/iommu.h>
16#include <asm/irq.h> 16#include <asm/irq.h>
17#include <asm/upa.h> 17#include <asm/upa.h>
18#include <asm/pstate.h>
18 19
19#include "pci_impl.h" 20#include "pci_impl.h"
20#include "iommu_common.h" 21#include "iommu_common.h"
@@ -326,6 +327,44 @@ static int __init schizo_ino_to_pil(struct pci_dev *pdev, unsigned int ino)
326 return ret; 327 return ret;
327} 328}
328 329
330static void tomatillo_wsync_handler(struct ino_bucket *bucket, void *_arg1, void *_arg2)
331{
332 unsigned long sync_reg = (unsigned long) _arg2;
333 u64 mask = 1 << (__irq_ino(__irq(bucket)) & IMAP_INO);
334 u64 val;
335 int limit;
336
337 schizo_write(sync_reg, mask);
338
339 limit = 100000;
340 val = 0;
341 while (--limit) {
342 val = schizo_read(sync_reg);
343 if (!(val & mask))
344 break;
345 }
346 if (limit <= 0) {
347 printk("tomatillo_wsync_handler: DMA won't sync [%lx:%lx]\n",
348 val, mask);
349 }
350
351 if (_arg1) {
352 static unsigned char cacheline[64]
353 __attribute__ ((aligned (64)));
354
355 __asm__ __volatile__("rd %%fprs, %0\n\t"
356 "or %0, %4, %1\n\t"
357 "wr %1, 0x0, %%fprs\n\t"
358 "stda %%f0, [%5] %6\n\t"
359 "wr %0, 0x0, %%fprs\n\t"
360 "membar #Sync"
361 : "=&r" (mask), "=&r" (val)
362 : "0" (mask), "1" (val),
363 "i" (FPRS_FEF), "r" (&cacheline[0]),
364 "i" (ASI_BLK_COMMIT_P));
365 }
366}
367
329static unsigned int schizo_irq_build(struct pci_pbm_info *pbm, 368static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
330 struct pci_dev *pdev, 369 struct pci_dev *pdev,
331 unsigned int ino) 370 unsigned int ino)
@@ -369,6 +408,15 @@ static unsigned int schizo_irq_build(struct pci_pbm_info *pbm,
369 bucket = __bucket(build_irq(pil, ign_fixup, iclr, imap)); 408 bucket = __bucket(build_irq(pil, ign_fixup, iclr, imap));
370 bucket->flags |= IBF_PCI; 409 bucket->flags |= IBF_PCI;
371 410
411 if (pdev && pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) {
412 struct irq_desc *p = bucket->irq_info;
413
414 p->pre_handler = tomatillo_wsync_handler;
415 p->pre_handler_arg1 = ((pbm->chip_version <= 4) ?
416 (void *) 1 : (void *) 0);
417 p->pre_handler_arg2 = (void *) pbm->sync_reg;
418 }
419
372 return __irq(bucket); 420 return __irq(bucket);
373} 421}
374 422
@@ -885,6 +933,7 @@ static irqreturn_t schizo_ce_intr(int irq, void *dev_id, struct pt_regs *regs)
885 933
886#define SCHIZO_PCI_CTRL (0x2000UL) 934#define SCHIZO_PCI_CTRL (0x2000UL)
887#define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */ 935#define SCHIZO_PCICTRL_BUS_UNUS (1UL << 63UL) /* Safari */
936#define SCHIZO_PCICTRL_DTO_INT (1UL << 61UL) /* Tomatillo */
888#define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */ 937#define SCHIZO_PCICTRL_ARB_PRIO (0x1ff << 52UL) /* Tomatillo */
889#define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */ 938#define SCHIZO_PCICTRL_ESLCK (1UL << 51UL) /* Safari */
890#define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */ 939#define SCHIZO_PCICTRL_ERRSLOT (7UL << 48UL) /* Safari */
@@ -1887,37 +1936,27 @@ static void __init schizo_pbm_hw_init(struct pci_pbm_info *pbm)
1887{ 1936{
1888 u64 tmp; 1937 u64 tmp;
1889 1938
1890 /* Set IRQ retry to infinity. */ 1939 schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY, 5);
1891 schizo_write(pbm->pbm_regs + SCHIZO_PCI_IRQ_RETRY,
1892 SCHIZO_IRQ_RETRY_INF);
1893 1940
1894 /* Enable arbiter for all PCI slots. Also, disable PCI interval
1895 * timer so that DTO (Discard TimeOuts) are not reported because
1896 * some Schizo revisions report them erroneously.
1897 */
1898 tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_CTRL); 1941 tmp = schizo_read(pbm->pbm_regs + SCHIZO_PCI_CTRL);
1899 if (pbm->chip_type == PBM_CHIP_TYPE_SCHIZO_PLUS &&
1900 pbm->chip_version == 0x5 &&
1901 pbm->chip_revision == 0x1)
1902 tmp |= 0x0f;
1903 else
1904 tmp |= 0xff;
1905 1942
1906 tmp &= ~SCHIZO_PCICTRL_PTO; 1943 /* Enable arbiter for all PCI slots. */
1944 tmp |= 0xff;
1945
1907 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && 1946 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
1908 pbm->chip_version >= 0x2) 1947 pbm->chip_version >= 0x2)
1909 tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT; 1948 tmp |= 0x3UL << SCHIZO_PCICTRL_PTO_SHIFT;
1910 else
1911 tmp |= 0x1UL << SCHIZO_PCICTRL_PTO_SHIFT;
1912 1949
1913 if (!prom_getbool(pbm->prom_node, "no-bus-parking")) 1950 if (!prom_getbool(pbm->prom_node, "no-bus-parking"))
1914 tmp |= SCHIZO_PCICTRL_PARK; 1951 tmp |= SCHIZO_PCICTRL_PARK;
1952 else
1953 tmp &= ~SCHIZO_PCICTRL_PARK;
1915 1954
1916 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO && 1955 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO &&
1917 pbm->chip_version <= 0x1) 1956 pbm->chip_version <= 0x1)
1918 tmp |= (1UL << 61); 1957 tmp |= SCHIZO_PCICTRL_DTO_INT;
1919 else 1958 else
1920 tmp &= ~(1UL << 61); 1959 tmp &= ~SCHIZO_PCICTRL_DTO_INT;
1921 1960
1922 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO) 1961 if (pbm->chip_type == PBM_CHIP_TYPE_TOMATILLO)
1923 tmp |= (SCHIZO_PCICTRL_MRM_PREF | 1962 tmp |= (SCHIZO_PCICTRL_MRM_PREF |
@@ -2015,6 +2054,9 @@ static void __init schizo_pbm_init(struct pci_controller_info *p,
2015 pbm->pbm_regs = pr_regs[0].phys_addr; 2054 pbm->pbm_regs = pr_regs[0].phys_addr;
2016 pbm->controller_regs = pr_regs[1].phys_addr - 0x10000UL; 2055 pbm->controller_regs = pr_regs[1].phys_addr - 0x10000UL;
2017 2056
2057 if (chip_type == PBM_CHIP_TYPE_TOMATILLO)
2058 pbm->sync_reg = pr_regs[3].phys_addr + 0x1a18UL;
2059
2018 sprintf(pbm->name, 2060 sprintf(pbm->name,
2019 (chip_type == PBM_CHIP_TYPE_TOMATILLO ? 2061 (chip_type == PBM_CHIP_TYPE_TOMATILLO ?
2020 "TOMATILLO%d PBM%c" : 2062 "TOMATILLO%d PBM%c" :
diff --git a/arch/sparc64/kernel/time.c b/arch/sparc64/kernel/time.c
index 71b4e3807694..b40db389f90b 100644
--- a/arch/sparc64/kernel/time.c
+++ b/arch/sparc64/kernel/time.c
@@ -973,7 +973,7 @@ static void sparc64_start_timers(irqreturn_t (*cfunc)(int, void *, struct pt_reg
973 int err; 973 int err;
974 974
975 /* Register IRQ handler. */ 975 /* Register IRQ handler. */
976 err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, SA_STATIC_ALLOC, 976 err = request_irq(build_irq(0, 0, 0UL, 0UL), cfunc, 0,
977 "timer", NULL); 977 "timer", NULL);
978 978
979 if (err) { 979 if (err) {
diff --git a/drivers/char/hw_random.c b/drivers/char/hw_random.c
index 7e6ac14c2450..3480535a09c5 100644
--- a/drivers/char/hw_random.c
+++ b/drivers/char/hw_random.c
@@ -579,7 +579,7 @@ static int __init rng_init (void)
579 579
580 /* Probe for Intel, AMD RNGs */ 580 /* Probe for Intel, AMD RNGs */
581 for_each_pci_dev(pdev) { 581 for_each_pci_dev(pdev) {
582 ent = pci_match_device (rng_pci_tbl, pdev); 582 ent = pci_match_id(rng_pci_tbl, pdev);
583 if (ent) { 583 if (ent) {
584 rng_ops = &rng_vendor_ops[ent->driver_data]; 584 rng_ops = &rng_vendor_ops[ent->driver_data];
585 goto match; 585 goto match;
diff --git a/drivers/char/watchdog/i8xx_tco.c b/drivers/char/watchdog/i8xx_tco.c
index b14d642439ed..5d07ee59679d 100644
--- a/drivers/char/watchdog/i8xx_tco.c
+++ b/drivers/char/watchdog/i8xx_tco.c
@@ -401,7 +401,7 @@ static unsigned char __init i8xx_tco_getdevice (void)
401 */ 401 */
402 402
403 while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) { 403 while ((dev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
404 if (pci_match_device(i8xx_tco_pci_tbl, dev)) { 404 if (pci_match_id(i8xx_tco_pci_tbl, dev)) {
405 i8xx_tco_pci = dev; 405 i8xx_tco_pci = dev;
406 break; 406 break;
407 } 407 }
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index e501675ad72e..77da827b2898 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -847,7 +847,7 @@ static int __init ide_scan_pcidev(struct pci_dev *dev)
847 d = list_entry(l, struct pci_driver, node); 847 d = list_entry(l, struct pci_driver, node);
848 if(d->id_table) 848 if(d->id_table)
849 { 849 {
850 const struct pci_device_id *id = pci_match_device(d->id_table, dev); 850 const struct pci_device_id *id = pci_match_id(d->id_table, dev);
851 if(id != NULL) 851 if(id != NULL)
852 { 852 {
853 if(d->probe(dev, id) >= 0) 853 if(d->probe(dev, id) >= 0)
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 80edfa3abd29..4598c6a9212d 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -3008,7 +3008,7 @@ static int __init parport_pc_init_superio (int autoirq, int autodma)
3008 int ret = 0; 3008 int ret = 0;
3009 3009
3010 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) { 3010 while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev)) != NULL) {
3011 id = pci_match_device (parport_pc_pci_tbl, pdev); 3011 id = pci_match_id(parport_pc_pci_tbl, pdev);
3012 if (id == NULL || id->driver_data >= last_sio) 3012 if (id == NULL || id->driver_data >= last_sio)
3013 continue; 3013 continue;
3014 3014
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 7dea494c0d7b..3657f6199c48 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_HOTPLUG_PCI) += hotplug/
19# 19#
20# Some architectures use the generic PCI setup functions 20# Some architectures use the generic PCI setup functions
21# 21#
22obj-$(CONFIG_X86) += setup-bus.o
22obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o 23obj-$(CONFIG_ALPHA) += setup-bus.o setup-irq.o
23obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o 24obj-$(CONFIG_ARM) += setup-bus.o setup-irq.o
24obj-$(CONFIG_PARISC) += setup-bus.o 25obj-$(CONFIG_PARISC) += setup-bus.o
diff --git a/drivers/pci/hotplug.c b/drivers/pci/hotplug.c
index 3903f8c559b6..b844bc972324 100644
--- a/drivers/pci/hotplug.c
+++ b/drivers/pci/hotplug.c
@@ -54,7 +54,7 @@ int pci_hotplug (struct device *dev, char **envp, int num_envp,
54 54
55 envp[i++] = scratch; 55 envp[i++] = scratch;
56 length += scnprintf (scratch, buffer_size - length, 56 length += scnprintf (scratch, buffer_size - length,
57 "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n", 57 "MODALIAS=pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x",
58 pdev->vendor, pdev->device, 58 pdev->vendor, pdev->device,
59 pdev->subsystem_vendor, pdev->subsystem_device, 59 pdev->subsystem_vendor, pdev->subsystem_device,
60 (u8)(pdev->class >> 16), (u8)(pdev->class >> 8), 60 (u8)(pdev->class >> 16), (u8)(pdev->class >> 8),
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index e65bf2b395aa..aac6de9568e5 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -7,7 +7,6 @@
7#include <linux/module.h> 7#include <linux/module.h>
8#include <linux/init.h> 8#include <linux/init.h>
9#include <linux/device.h> 9#include <linux/device.h>
10#include <linux/pci-dynids.h>
11#include "pci.h" 10#include "pci.h"
12 11
13/* 12/*
@@ -19,35 +18,11 @@
19 */ 18 */
20 19
21#ifdef CONFIG_HOTPLUG 20#ifdef CONFIG_HOTPLUG
22/**
23 * pci_device_probe_dynamic()
24 *
25 * Walk the dynamic ID list looking for a match.
26 * returns 0 and sets pci_dev->driver when drv claims pci_dev, else error.
27 */
28static int
29pci_device_probe_dynamic(struct pci_driver *drv, struct pci_dev *pci_dev)
30{
31 int error = -ENODEV;
32 struct list_head *pos;
33 struct dynid *dynid;
34 21
35 spin_lock(&drv->dynids.lock); 22struct pci_dynid {
36 list_for_each(pos, &drv->dynids.list) { 23 struct list_head node;
37 dynid = list_entry(pos, struct dynid, node); 24 struct pci_device_id id;
38 if (pci_match_one_device(&dynid->id, pci_dev)) { 25};
39 spin_unlock(&drv->dynids.lock);
40 error = drv->probe(pci_dev, &dynid->id);
41 if (error >= 0) {
42 pci_dev->driver = drv;
43 return 0;
44 }
45 return error;
46 }
47 }
48 spin_unlock(&drv->dynids.lock);
49 return error;
50}
51 26
52/** 27/**
53 * store_new_id 28 * store_new_id
@@ -58,8 +33,7 @@ pci_device_probe_dynamic(struct pci_driver *drv, struct pci_dev *pci_dev)
58static inline ssize_t 33static inline ssize_t
59store_new_id(struct device_driver *driver, const char *buf, size_t count) 34store_new_id(struct device_driver *driver, const char *buf, size_t count)
60{ 35{
61 struct dynid *dynid; 36 struct pci_dynid *dynid;
62 struct bus_type * bus;
63 struct pci_driver *pdrv = to_pci_driver(driver); 37 struct pci_driver *pdrv = to_pci_driver(driver);
64 __u32 vendor=PCI_ANY_ID, device=PCI_ANY_ID, subvendor=PCI_ANY_ID, 38 __u32 vendor=PCI_ANY_ID, device=PCI_ANY_ID, subvendor=PCI_ANY_ID,
65 subdevice=PCI_ANY_ID, class=0, class_mask=0; 39 subdevice=PCI_ANY_ID, class=0, class_mask=0;
@@ -91,37 +65,22 @@ store_new_id(struct device_driver *driver, const char *buf, size_t count)
91 list_add_tail(&pdrv->dynids.list, &dynid->node); 65 list_add_tail(&pdrv->dynids.list, &dynid->node);
92 spin_unlock(&pdrv->dynids.lock); 66 spin_unlock(&pdrv->dynids.lock);
93 67
94 bus = get_bus(pdrv->driver.bus); 68 if (get_driver(&pdrv->driver)) {
95 if (bus) { 69 driver_attach(&pdrv->driver);
96 if (get_driver(&pdrv->driver)) { 70 put_driver(&pdrv->driver);
97 down_write(&bus->subsys.rwsem);
98 driver_attach(&pdrv->driver);
99 up_write(&bus->subsys.rwsem);
100 put_driver(&pdrv->driver);
101 }
102 put_bus(bus);
103 } 71 }
104 72
105 return count; 73 return count;
106} 74}
107
108static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id); 75static DRIVER_ATTR(new_id, S_IWUSR, NULL, store_new_id);
109static inline void
110pci_init_dynids(struct pci_dynids *dynids)
111{
112 spin_lock_init(&dynids->lock);
113 INIT_LIST_HEAD(&dynids->list);
114}
115 76
116static void 77static void
117pci_free_dynids(struct pci_driver *drv) 78pci_free_dynids(struct pci_driver *drv)
118{ 79{
119 struct list_head *pos, *n; 80 struct pci_dynid *dynid, *n;
120 struct dynid *dynid;
121 81
122 spin_lock(&drv->dynids.lock); 82 spin_lock(&drv->dynids.lock);
123 list_for_each_safe(pos, n, &drv->dynids.list) { 83 list_for_each_entry_safe(dynid, n, &drv->dynids.list, node) {
124 dynid = list_entry(pos, struct dynid, node);
125 list_del(&dynid->node); 84 list_del(&dynid->node);
126 kfree(dynid); 85 kfree(dynid);
127 } 86 }
@@ -138,83 +97,70 @@ pci_create_newid_file(struct pci_driver *drv)
138 return error; 97 return error;
139} 98}
140 99
141static int
142pci_bus_match_dynids(const struct pci_dev *pci_dev, struct pci_driver *pci_drv)
143{
144 struct list_head *pos;
145 struct dynid *dynid;
146
147 spin_lock(&pci_drv->dynids.lock);
148 list_for_each(pos, &pci_drv->dynids.list) {
149 dynid = list_entry(pos, struct dynid, node);
150 if (pci_match_one_device(&dynid->id, pci_dev)) {
151 spin_unlock(&pci_drv->dynids.lock);
152 return 1;
153 }
154 }
155 spin_unlock(&pci_drv->dynids.lock);
156 return 0;
157}
158
159#else /* !CONFIG_HOTPLUG */ 100#else /* !CONFIG_HOTPLUG */
160static inline int pci_device_probe_dynamic(struct pci_driver *drv, struct pci_dev *pci_dev)
161{
162 return -ENODEV;
163}
164static inline void pci_init_dynids(struct pci_dynids *dynids) {}
165static inline void pci_free_dynids(struct pci_driver *drv) {} 101static inline void pci_free_dynids(struct pci_driver *drv) {}
166static inline int pci_create_newid_file(struct pci_driver *drv) 102static inline int pci_create_newid_file(struct pci_driver *drv)
167{ 103{
168 return 0; 104 return 0;
169} 105}
170static inline int pci_bus_match_dynids(const struct pci_dev *pci_dev, struct pci_driver *pci_drv)
171{
172 return 0;
173}
174#endif 106#endif
175 107
176/** 108/**
177 * pci_match_device - Tell if a PCI device structure has a matching 109 * pci_match_id - See if a pci device matches a given pci_id table
178 * PCI device id structure
179 * @ids: array of PCI device id structures to search in 110 * @ids: array of PCI device id structures to search in
180 * @dev: the PCI device structure to match against 111 * @dev: the PCI device structure to match against.
181 * 112 *
182 * Used by a driver to check whether a PCI device present in the 113 * Used by a driver to check whether a PCI device present in the
183 * system is in its list of supported devices.Returns the matching 114 * system is in its list of supported devices. Returns the matching
184 * pci_device_id structure or %NULL if there is no match. 115 * pci_device_id structure or %NULL if there is no match.
116 *
117 * Depreciated, don't use this as it will not catch any dynamic ids
118 * that a driver might want to check for.
185 */ 119 */
186const struct pci_device_id * 120const struct pci_device_id *pci_match_id(const struct pci_device_id *ids,
187pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) 121 struct pci_dev *dev)
188{ 122{
189 while (ids->vendor || ids->subvendor || ids->class_mask) { 123 if (ids) {
190 if (pci_match_one_device(ids, dev)) 124 while (ids->vendor || ids->subvendor || ids->class_mask) {
191 return ids; 125 if (pci_match_one_device(ids, dev))
192 ids++; 126 return ids;
127 ids++;
128 }
193 } 129 }
194 return NULL; 130 return NULL;
195} 131}
196 132
197/** 133/**
198 * pci_device_probe_static() 134 * pci_match_device - Tell if a PCI device structure has a matching
199 * 135 * PCI device id structure
200 * returns 0 and sets pci_dev->driver when drv claims pci_dev, else error. 136 * @ids: array of PCI device id structures to search in
137 * @dev: the PCI device structure to match against
138 * @drv: the PCI driver to match against
139 *
140 * Used by a driver to check whether a PCI device present in the
141 * system is in its list of supported devices. Returns the matching
142 * pci_device_id structure or %NULL if there is no match.
201 */ 143 */
202static int 144const struct pci_device_id *pci_match_device(struct pci_driver *drv,
203pci_device_probe_static(struct pci_driver *drv, struct pci_dev *pci_dev) 145 struct pci_dev *dev)
204{ 146{
205 int error = -ENODEV;
206 const struct pci_device_id *id; 147 const struct pci_device_id *id;
148 struct pci_dynid *dynid;
207 149
208 if (!drv->id_table) 150 id = pci_match_id(drv->id_table, dev);
209 return error;
210 id = pci_match_device(drv->id_table, pci_dev);
211 if (id) 151 if (id)
212 error = drv->probe(pci_dev, id); 152 return id;
213 if (error >= 0) { 153
214 pci_dev->driver = drv; 154 /* static ids didn't match, lets look at the dynamic ones */
215 error = 0; 155 spin_lock(&drv->dynids.lock);
156 list_for_each_entry(dynid, &drv->dynids.list, node) {
157 if (pci_match_one_device(&dynid->id, dev)) {
158 spin_unlock(&drv->dynids.lock);
159 return &dynid->id;
160 }
216 } 161 }
217 return error; 162 spin_unlock(&drv->dynids.lock);
163 return NULL;
218} 164}
219 165
220/** 166/**
@@ -225,13 +171,20 @@ pci_device_probe_static(struct pci_driver *drv, struct pci_dev *pci_dev)
225 */ 171 */
226static int 172static int
227__pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev) 173__pci_device_probe(struct pci_driver *drv, struct pci_dev *pci_dev)
228{ 174{
175 const struct pci_device_id *id;
229 int error = 0; 176 int error = 0;
230 177
231 if (!pci_dev->driver && drv->probe) { 178 if (!pci_dev->driver && drv->probe) {
232 error = pci_device_probe_static(drv, pci_dev); 179 error = -ENODEV;
233 if (error == -ENODEV) 180
234 error = pci_device_probe_dynamic(drv, pci_dev); 181 id = pci_match_device(drv, pci_dev);
182 if (id)
183 error = drv->probe(pci_dev, id);
184 if (error >= 0) {
185 pci_dev->driver = drv;
186 error = 0;
187 }
235 } 188 }
236 return error; 189 return error;
237} 190}
@@ -371,12 +324,6 @@ static struct kobj_type pci_driver_kobj_type = {
371 .sysfs_ops = &pci_driver_sysfs_ops, 324 .sysfs_ops = &pci_driver_sysfs_ops,
372}; 325};
373 326
374static int
375pci_populate_driver_dir(struct pci_driver *drv)
376{
377 return pci_create_newid_file(drv);
378}
379
380/** 327/**
381 * pci_register_driver - register a new pci driver 328 * pci_register_driver - register a new pci driver
382 * @drv: the driver structure to register 329 * @drv: the driver structure to register
@@ -401,13 +348,15 @@ int pci_register_driver(struct pci_driver *drv)
401 drv->driver.shutdown = pci_device_shutdown; 348 drv->driver.shutdown = pci_device_shutdown;
402 drv->driver.owner = drv->owner; 349 drv->driver.owner = drv->owner;
403 drv->driver.kobj.ktype = &pci_driver_kobj_type; 350 drv->driver.kobj.ktype = &pci_driver_kobj_type;
404 pci_init_dynids(&drv->dynids); 351
352 spin_lock_init(&drv->dynids.lock);
353 INIT_LIST_HEAD(&drv->dynids.list);
405 354
406 /* register with core */ 355 /* register with core */
407 error = driver_register(&drv->driver); 356 error = driver_register(&drv->driver);
408 357
409 if (!error) 358 if (!error)
410 pci_populate_driver_dir(drv); 359 error = pci_create_newid_file(drv);
411 360
412 return error; 361 return error;
413} 362}
@@ -463,21 +412,17 @@ pci_dev_driver(const struct pci_dev *dev)
463 * system is in its list of supported devices.Returns the matching 412 * system is in its list of supported devices.Returns the matching
464 * pci_device_id structure or %NULL if there is no match. 413 * pci_device_id structure or %NULL if there is no match.
465 */ 414 */
466static int pci_bus_match(struct device * dev, struct device_driver * drv) 415static int pci_bus_match(struct device *dev, struct device_driver *drv)
467{ 416{
468 const struct pci_dev * pci_dev = to_pci_dev(dev); 417 struct pci_dev *pci_dev = to_pci_dev(dev);
469 struct pci_driver * pci_drv = to_pci_driver(drv); 418 struct pci_driver *pci_drv = to_pci_driver(drv);
470 const struct pci_device_id * ids = pci_drv->id_table;
471 const struct pci_device_id *found_id; 419 const struct pci_device_id *found_id;
472 420
473 if (!ids) 421 found_id = pci_match_device(pci_drv, pci_dev);
474 return 0;
475
476 found_id = pci_match_device(ids, pci_dev);
477 if (found_id) 422 if (found_id)
478 return 1; 423 return 1;
479 424
480 return pci_bus_match_dynids(pci_dev, pci_drv); 425 return 0;
481} 426}
482 427
483/** 428/**
@@ -536,6 +481,7 @@ static int __init pci_driver_init(void)
536 481
537postcore_initcall(pci_driver_init); 482postcore_initcall(pci_driver_init);
538 483
484EXPORT_SYMBOL(pci_match_id);
539EXPORT_SYMBOL(pci_match_device); 485EXPORT_SYMBOL(pci_match_device);
540EXPORT_SYMBOL(pci_register_driver); 486EXPORT_SYMBOL(pci_register_driver);
541EXPORT_SYMBOL(pci_unregister_driver); 487EXPORT_SYMBOL(pci_unregister_driver);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index f04b9ffe4153..d382bdb7b560 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -334,10 +334,6 @@ EXPORT_SYMBOL(pci_choose_state);
334/** 334/**
335 * pci_save_state - save the PCI configuration space of a device before suspending 335 * pci_save_state - save the PCI configuration space of a device before suspending
336 * @dev: - PCI device that we're dealing with 336 * @dev: - PCI device that we're dealing with
337 * @buffer: - buffer to hold config space context
338 *
339 * @buffer must be large enough to hold the entire PCI 2.2 config space
340 * (>= 64 bytes).
341 */ 337 */
342int 338int
343pci_save_state(struct pci_dev *dev) 339pci_save_state(struct pci_dev *dev)
@@ -352,8 +348,6 @@ pci_save_state(struct pci_dev *dev)
352/** 348/**
353 * pci_restore_state - Restore the saved state of a PCI device 349 * pci_restore_state - Restore the saved state of a PCI device
354 * @dev: - PCI device that we're dealing with 350 * @dev: - PCI device that we're dealing with
355 * @buffer: - saved PCI config space
356 *
357 */ 351 */
358int 352int
359pci_restore_state(struct pci_dev *dev) 353pci_restore_state(struct pci_dev *dev)
diff --git a/drivers/pci/pcie/portdrv.h b/drivers/pci/pcie/portdrv.h
index 537b372dc340..a63bd8f72601 100644
--- a/drivers/pci/pcie/portdrv.h
+++ b/drivers/pci/pcie/portdrv.h
@@ -27,6 +27,11 @@
27 27
28#define get_descriptor_id(type, service) (((type - 4) << 4) | service) 28#define get_descriptor_id(type, service) (((type - 4) << 4) | service)
29 29
30struct pcie_port_device_ext {
31 int interrupt_mode; /* [0:INTx | 1:MSI | 2:MSI-X] */
32 unsigned int saved_msi_config_space[5];
33};
34
30extern struct bus_type pcie_port_bus_type; 35extern struct bus_type pcie_port_bus_type;
31extern int pcie_port_device_probe(struct pci_dev *dev); 36extern int pcie_port_device_probe(struct pci_dev *dev);
32extern int pcie_port_device_register(struct pci_dev *dev); 37extern int pcie_port_device_register(struct pci_dev *dev);
diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c
index f5c5f10a3d2f..4db69982876e 100644
--- a/drivers/pci/pcie/portdrv_core.c
+++ b/drivers/pci/pcie/portdrv_core.c
@@ -275,10 +275,17 @@ int pcie_port_device_probe(struct pci_dev *dev)
275 275
276int pcie_port_device_register(struct pci_dev *dev) 276int pcie_port_device_register(struct pci_dev *dev)
277{ 277{
278 struct pcie_port_device_ext *p_ext;
278 int status, type, capabilities, irq_mode, i; 279 int status, type, capabilities, irq_mode, i;
279 int vectors[PCIE_PORT_DEVICE_MAXSERVICES]; 280 int vectors[PCIE_PORT_DEVICE_MAXSERVICES];
280 u16 reg16; 281 u16 reg16;
281 282
283 /* Allocate port device extension */
284 if (!(p_ext = kmalloc(sizeof(struct pcie_port_device_ext), GFP_KERNEL)))
285 return -ENOMEM;
286
287 pci_set_drvdata(dev, p_ext);
288
282 /* Get port type */ 289 /* Get port type */
283 pci_read_config_word(dev, 290 pci_read_config_word(dev,
284 pci_find_capability(dev, PCI_CAP_ID_EXP) + 291 pci_find_capability(dev, PCI_CAP_ID_EXP) +
@@ -288,6 +295,7 @@ int pcie_port_device_register(struct pci_dev *dev)
288 /* Now get port services */ 295 /* Now get port services */
289 capabilities = get_port_device_capability(dev); 296 capabilities = get_port_device_capability(dev);
290 irq_mode = assign_interrupt_mode(dev, vectors, capabilities); 297 irq_mode = assign_interrupt_mode(dev, vectors, capabilities);
298 p_ext->interrupt_mode = irq_mode;
291 299
292 /* Allocate child services if any */ 300 /* Allocate child services if any */
293 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { 301 for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) {
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index e9095ee508e3..30bac7ed7c16 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -29,6 +29,78 @@ MODULE_LICENSE("GPL");
29/* global data */ 29/* global data */
30static const char device_name[] = "pcieport-driver"; 30static const char device_name[] = "pcieport-driver";
31 31
32static void pci_save_msi_state(struct pci_dev *dev)
33{
34 struct pcie_port_device_ext *p_ext = pci_get_drvdata(dev);
35 int i = 0, pos;
36 u16 control;
37
38 if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSI)) <= 0)
39 return;
40
41 pci_read_config_dword(dev, pos, &p_ext->saved_msi_config_space[i++]);
42 control = p_ext->saved_msi_config_space[0] >> 16;
43 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
44 &p_ext->saved_msi_config_space[i++]);
45 if (control & PCI_MSI_FLAGS_64BIT) {
46 pci_read_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
47 &p_ext->saved_msi_config_space[i++]);
48 pci_read_config_dword(dev, pos + PCI_MSI_DATA_64,
49 &p_ext->saved_msi_config_space[i++]);
50 } else
51 pci_read_config_dword(dev, pos + PCI_MSI_DATA_32,
52 &p_ext->saved_msi_config_space[i++]);
53 if (control & PCI_MSI_FLAGS_MASKBIT)
54 pci_read_config_dword(dev, pos + PCI_MSI_MASK_BIT,
55 &p_ext->saved_msi_config_space[i++]);
56}
57
58static void pci_restore_msi_state(struct pci_dev *dev)
59{
60 struct pcie_port_device_ext *p_ext = pci_get_drvdata(dev);
61 int i = 0, pos;
62 u16 control;
63
64 if ((pos = pci_find_capability(dev, PCI_CAP_ID_MSI)) <= 0)
65 return;
66
67 control = p_ext->saved_msi_config_space[i++] >> 16;
68 pci_write_config_word(dev, pos + PCI_MSI_FLAGS, control);
69 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_LO,
70 p_ext->saved_msi_config_space[i++]);
71 if (control & PCI_MSI_FLAGS_64BIT) {
72 pci_write_config_dword(dev, pos + PCI_MSI_ADDRESS_HI,
73 p_ext->saved_msi_config_space[i++]);
74 pci_write_config_dword(dev, pos + PCI_MSI_DATA_64,
75 p_ext->saved_msi_config_space[i++]);
76 } else
77 pci_write_config_dword(dev, pos + PCI_MSI_DATA_32,
78 p_ext->saved_msi_config_space[i++]);
79 if (control & PCI_MSI_FLAGS_MASKBIT)
80 pci_write_config_dword(dev, pos + PCI_MSI_MASK_BIT,
81 p_ext->saved_msi_config_space[i++]);
82}
83
84static void pcie_portdrv_save_config(struct pci_dev *dev)
85{
86 struct pcie_port_device_ext *p_ext = pci_get_drvdata(dev);
87
88 pci_save_state(dev);
89 if (p_ext->interrupt_mode == PCIE_PORT_MSI_MODE)
90 pci_save_msi_state(dev);
91}
92
93static void pcie_portdrv_restore_config(struct pci_dev *dev)
94{
95 struct pcie_port_device_ext *p_ext = pci_get_drvdata(dev);
96
97 pci_restore_state(dev);
98 if (p_ext->interrupt_mode == PCIE_PORT_MSI_MODE)
99 pci_restore_msi_state(dev);
100 pci_enable_device(dev);
101 pci_set_master(dev);
102}
103
32/* 104/*
33 * pcie_portdrv_probe - Probe PCI-Express port devices 105 * pcie_portdrv_probe - Probe PCI-Express port devices
34 * @dev: PCI-Express port device being probed 106 * @dev: PCI-Express port device being probed
@@ -64,16 +136,21 @@ static int __devinit pcie_portdrv_probe (struct pci_dev *dev,
64static void pcie_portdrv_remove (struct pci_dev *dev) 136static void pcie_portdrv_remove (struct pci_dev *dev)
65{ 137{
66 pcie_port_device_remove(dev); 138 pcie_port_device_remove(dev);
139 kfree(pci_get_drvdata(dev));
67} 140}
68 141
69#ifdef CONFIG_PM 142#ifdef CONFIG_PM
70static int pcie_portdrv_suspend (struct pci_dev *dev, pm_message_t state) 143static int pcie_portdrv_suspend (struct pci_dev *dev, pm_message_t state)
71{ 144{
72 return pcie_port_device_suspend(dev, state); 145 int ret = pcie_port_device_suspend(dev, state);
146
147 pcie_portdrv_save_config(dev);
148 return ret;
73} 149}
74 150
75static int pcie_portdrv_resume (struct pci_dev *dev) 151static int pcie_portdrv_resume (struct pci_dev *dev)
76{ 152{
153 pcie_portdrv_restore_config(dev);
77 return pcie_port_device_resume(dev); 154 return pcie_port_device_resume(dev);
78} 155}
79#endif 156#endif
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 6a0a82f0508b..df3bdae2040f 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -239,9 +239,8 @@ void __devinit pci_read_bridge_bases(struct pci_bus *child)
239 239
240 if (dev->transparent) { 240 if (dev->transparent) {
241 printk(KERN_INFO "PCI: Transparent bridge - %s\n", pci_name(dev)); 241 printk(KERN_INFO "PCI: Transparent bridge - %s\n", pci_name(dev));
242 for(i = 0; i < PCI_BUS_NUM_RESOURCES; i++) 242 for(i = 3; i < PCI_BUS_NUM_RESOURCES; i++)
243 child->resource[i] = child->parent->resource[i]; 243 child->resource[i] = child->parent->resource[i - 3];
244 return;
245 } 244 }
246 245
247 for(i=0; i<3; i++) 246 for(i=0; i<3; i++)
@@ -398,6 +397,16 @@ static void pci_enable_crs(struct pci_dev *dev)
398 pci_write_config_word(dev, rpcap + PCI_EXP_RTCTL, rpctl); 397 pci_write_config_word(dev, rpcap + PCI_EXP_RTCTL, rpctl);
399} 398}
400 399
400static void __devinit pci_fixup_parent_subordinate_busnr(struct pci_bus *child, int max)
401{
402 struct pci_bus *parent = child->parent;
403 while (parent->parent && parent->subordinate < max) {
404 parent->subordinate = max;
405 pci_write_config_byte(parent->self, PCI_SUBORDINATE_BUS, max);
406 parent = parent->parent;
407 }
408}
409
401unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus); 410unsigned int __devinit pci_scan_child_bus(struct pci_bus *bus);
402 411
403/* 412/*
@@ -499,7 +508,13 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
499 508
500 if (!is_cardbus) { 509 if (!is_cardbus) {
501 child->bridge_ctl = PCI_BRIDGE_CTL_NO_ISA; 510 child->bridge_ctl = PCI_BRIDGE_CTL_NO_ISA;
502 511 /*
512 * Adjust subordinate busnr in parent buses.
513 * We do this before scanning for children because
514 * some devices may not be detected if the bios
515 * was lazy.
516 */
517 pci_fixup_parent_subordinate_busnr(child, max);
503 /* Now we can scan all subordinate buses... */ 518 /* Now we can scan all subordinate buses... */
504 max = pci_scan_child_bus(child); 519 max = pci_scan_child_bus(child);
505 } else { 520 } else {
@@ -513,6 +528,7 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max
513 max+i+1)) 528 max+i+1))
514 break; 529 break;
515 max += i; 530 max += i;
531 pci_fixup_parent_subordinate_busnr(child, max);
516 } 532 }
517 /* 533 /*
518 * Set the subordinate bus number to its real value. 534 * Set the subordinate bus number to its real value.
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 968033fd29f0..1521fd5d95cc 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -767,6 +767,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
767 if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) { 767 if (unlikely(dev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK)) {
768 if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB) 768 if (dev->device == PCI_DEVICE_ID_INTEL_82845_HB)
769 switch(dev->subsystem_device) { 769 switch(dev->subsystem_device) {
770 case 0x8025: /* P4B-LX */
770 case 0x8070: /* P4B */ 771 case 0x8070: /* P4B */
771 case 0x8088: /* P4B533 */ 772 case 0x8088: /* P4B533 */
772 case 0x1626: /* L3C notebook */ 773 case 0x1626: /* L3C notebook */
diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c
index 6b628de948af..c1bdfb424658 100644
--- a/drivers/pci/setup-bus.c
+++ b/drivers/pci/setup-bus.c
@@ -273,6 +273,8 @@ find_free_bus_resource(struct pci_bus *bus, unsigned long type)
273 273
274 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) { 274 for (i = 0; i < PCI_BUS_NUM_RESOURCES; i++) {
275 r = bus->resource[i]; 275 r = bus->resource[i];
276 if (r == &ioport_resource || r == &iomem_resource)
277 continue;
276 if (r && (r->flags & type_mask) == type && !r->parent) 278 if (r && (r->flags & type_mask) == type && !r->parent)
277 return r; 279 return r;
278 } 280 }
diff --git a/drivers/sbus/char/bpp.c b/drivers/sbus/char/bpp.c
index 8f0f46907a81..87302fb14885 100644
--- a/drivers/sbus/char/bpp.c
+++ b/drivers/sbus/char/bpp.c
@@ -79,10 +79,6 @@ struct inst {
79 79
80 unsigned char run_length; 80 unsigned char run_length;
81 unsigned char repeat_byte; 81 unsigned char repeat_byte;
82
83 /* These members manage timeouts for programmed delays */
84 wait_queue_head_t wait_queue;
85 struct timer_list timer_list;
86}; 82};
87 83
88static struct inst instances[BPP_NO]; 84static struct inst instances[BPP_NO];
@@ -297,16 +293,10 @@ static unsigned short get_pins(unsigned minor)
297 293
298#endif /* __sparc__ */ 294#endif /* __sparc__ */
299 295
300static void bpp_wake_up(unsigned long val)
301{ wake_up(&instances[val].wait_queue); }
302
303static void snooze(unsigned long snooze_time, unsigned minor) 296static void snooze(unsigned long snooze_time, unsigned minor)
304{ 297{
305 init_timer(&instances[minor].timer_list); 298 set_current_state(TASK_UNINTERRUPTIBLE);
306 instances[minor].timer_list.expires = jiffies + snooze_time + 1; 299 schedule_timeout(snooze_time + 1);
307 instances[minor].timer_list.data = minor;
308 add_timer(&instances[minor].timer_list);
309 sleep_on (&instances[minor].wait_queue);
310} 300}
311 301
312static int wait_for(unsigned short set, unsigned short clr, 302static int wait_for(unsigned short set, unsigned short clr,
@@ -880,11 +870,8 @@ static void probeLptPort(unsigned idx)
880 instances[idx].enhanced = 0; 870 instances[idx].enhanced = 0;
881 instances[idx].direction = 0; 871 instances[idx].direction = 0;
882 instances[idx].mode = COMPATIBILITY; 872 instances[idx].mode = COMPATIBILITY;
883 instances[idx].wait_queue = 0;
884 instances[idx].run_length = 0; 873 instances[idx].run_length = 0;
885 instances[idx].run_flag = 0; 874 instances[idx].run_flag = 0;
886 init_timer(&instances[idx].timer_list);
887 instances[idx].timer_list.function = bpp_wake_up;
888 if (!request_region(lpAddr,3, dev_name)) return; 875 if (!request_region(lpAddr,3, dev_name)) return;
889 876
890 /* 877 /*
@@ -977,11 +964,8 @@ static void probeLptPort(unsigned idx)
977 instances[idx].enhanced = 0; 964 instances[idx].enhanced = 0;
978 instances[idx].direction = 0; 965 instances[idx].direction = 0;
979 instances[idx].mode = COMPATIBILITY; 966 instances[idx].mode = COMPATIBILITY;
980 init_waitqueue_head(&instances[idx].wait_queue);
981 instances[idx].run_length = 0; 967 instances[idx].run_length = 0;
982 instances[idx].run_flag = 0; 968 instances[idx].run_flag = 0;
983 init_timer(&instances[idx].timer_list);
984 instances[idx].timer_list.function = bpp_wake_up;
985 969
986 if (!rp) return; 970 if (!rp) return;
987 971
diff --git a/include/asm-sparc64/irq.h b/include/asm-sparc64/irq.h
index 018e2e46082b..8b70edcb80dc 100644
--- a/include/asm-sparc64/irq.h
+++ b/include/asm-sparc64/irq.h
@@ -16,6 +16,18 @@
16#include <asm/pil.h> 16#include <asm/pil.h>
17#include <asm/ptrace.h> 17#include <asm/ptrace.h>
18 18
19struct ino_bucket;
20
21#define MAX_IRQ_DESC_ACTION 4
22
23struct irq_desc {
24 void (*pre_handler)(struct ino_bucket *, void *, void *);
25 void *pre_handler_arg1;
26 void *pre_handler_arg2;
27 u32 action_active_mask;
28 struct irqaction action[MAX_IRQ_DESC_ACTION];
29};
30
19/* You should not mess with this directly. That's the job of irq.c. 31/* You should not mess with this directly. That's the job of irq.c.
20 * 32 *
21 * If you make changes here, please update hand coded assembler of 33 * If you make changes here, please update hand coded assembler of
@@ -42,24 +54,11 @@ struct ino_bucket {
42 /* Miscellaneous flags. */ 54 /* Miscellaneous flags. */
43/*0x06*/unsigned char flags; 55/*0x06*/unsigned char flags;
44 56
45 /* This is used to deal with IBF_DMA_SYNC on 57 /* Currently unused. */
46 * Sabre systems. 58/*0x07*/unsigned char __pad;
47 */ 59
48/*0x07*/unsigned char synctab_ent; 60 /* Reference to IRQ descriptor for this bucket. */
49 61/*0x08*/struct irq_desc *irq_info;
50 /* Reference to handler for this IRQ. If this is
51 * non-NULL this means it is active and should be
52 * serviced. Else the pending member is set to one
53 * and later registry of the interrupt checks for
54 * this condition.
55 *
56 * Normally this is just an irq_action structure.
57 * But, on PCI, if multiple interrupt sources behind
58 * a bridge have multiple interrupt sources that share
59 * the same INO bucket, this points to an array of
60 * pointers to four IRQ action structures.
61 */
62/*0x08*/void *irq_info;
63 62
64 /* Sun5 Interrupt Clear Register. */ 63 /* Sun5 Interrupt Clear Register. */
65/*0x10*/unsigned long iclr; 64/*0x10*/unsigned long iclr;
@@ -69,12 +68,6 @@ struct ino_bucket {
69 68
70}; 69};
71 70
72#ifdef CONFIG_PCI
73extern unsigned long pci_dma_wsync;
74extern unsigned long dma_sync_reg_table[256];
75extern unsigned char dma_sync_reg_table_entry;
76#endif
77
78/* IMAP/ICLR register defines */ 71/* IMAP/ICLR register defines */
79#define IMAP_VALID 0x80000000 /* IRQ Enabled */ 72#define IMAP_VALID 0x80000000 /* IRQ Enabled */
80#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */ 73#define IMAP_TID_UPA 0x7c000000 /* UPA TargetID */
@@ -90,11 +83,9 @@ extern unsigned char dma_sync_reg_table_entry;
90#define ICLR_PENDING 0x00000003 /* Pending state */ 83#define ICLR_PENDING 0x00000003 /* Pending state */
91 84
92/* Only 8-bits are available, be careful. -DaveM */ 85/* Only 8-bits are available, be careful. -DaveM */
93#define IBF_DMA_SYNC 0x01 /* DMA synchronization behind PCI bridge needed. */ 86#define IBF_PCI 0x02 /* PSYCHO/SABRE/SCHIZO PCI interrupt. */
94#define IBF_PCI 0x02 /* Indicates PSYCHO/SABRE/SCHIZO PCI interrupt. */ 87#define IBF_ACTIVE 0x04 /* Interrupt is active and has a handler.*/
95#define IBF_ACTIVE 0x04 /* This interrupt is active and has a handler. */ 88#define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */
96#define IBF_MULTI 0x08 /* On PCI, indicates shared bucket. */
97#define IBF_INPROGRESS 0x10 /* IRQ is being serviced. */
98 89
99#define NUM_IVECS (IMAP_INR + 1) 90#define NUM_IVECS (IMAP_INR + 1)
100extern struct ino_bucket ivector_table[NUM_IVECS]; 91extern struct ino_bucket ivector_table[NUM_IVECS];
diff --git a/include/asm-sparc64/pbm.h b/include/asm-sparc64/pbm.h
index 4c15610a2bac..38bbbccb4068 100644
--- a/include/asm-sparc64/pbm.h
+++ b/include/asm-sparc64/pbm.h
@@ -145,6 +145,9 @@ struct pci_pbm_info {
145 /* Physical address base of PBM registers. */ 145 /* Physical address base of PBM registers. */
146 unsigned long pbm_regs; 146 unsigned long pbm_regs;
147 147
148 /* Physical address of DMA sync register, if any. */
149 unsigned long sync_reg;
150
148 /* Opaque 32-bit system bus Port ID. */ 151 /* Opaque 32-bit system bus Port ID. */
149 u32 portid; 152 u32 portid;
150 153
diff --git a/include/asm-sparc64/signal.h b/include/asm-sparc64/signal.h
index becdf1bc5924..e3059bb4a465 100644
--- a/include/asm-sparc64/signal.h
+++ b/include/asm-sparc64/signal.h
@@ -162,21 +162,6 @@ struct sigstack {
162#define MINSIGSTKSZ 4096 162#define MINSIGSTKSZ 4096
163#define SIGSTKSZ 16384 163#define SIGSTKSZ 16384
164 164
165#ifdef __KERNEL__
166/*
167 * DJHR
168 * SA_STATIC_ALLOC is used for the SPARC system to indicate that this
169 * interrupt handler's irq structure should be statically allocated
170 * by the request_irq routine.
171 * The alternative is that arch/sparc/kernel/irq.c has carnal knowledge
172 * of interrupt usage and that sucks. Also without a flag like this
173 * it may be possible for the free_irq routine to attempt to free
174 * statically allocated data.. which is NOT GOOD.
175 *
176 */
177#define SA_STATIC_ALLOC 0x80
178#endif
179
180#include <asm-generic/signal.h> 165#include <asm-generic/signal.h>
181 166
182struct __new_sigaction { 167struct __new_sigaction {
diff --git a/include/linux/compat_ioctl.h b/include/linux/compat_ioctl.h
index 70a4ebb5d964..ecb0d39c0798 100644
--- a/include/linux/compat_ioctl.h
+++ b/include/linux/compat_ioctl.h
@@ -346,10 +346,27 @@ COMPATIBLE_IOCTL(PPPOEIOCDFWD)
346/* LP */ 346/* LP */
347COMPATIBLE_IOCTL(LPGETSTATUS) 347COMPATIBLE_IOCTL(LPGETSTATUS)
348/* ppdev */ 348/* ppdev */
349COMPATIBLE_IOCTL(PPSETMODE)
350COMPATIBLE_IOCTL(PPRSTATUS)
351COMPATIBLE_IOCTL(PPRCONTROL)
352COMPATIBLE_IOCTL(PPWCONTROL)
353COMPATIBLE_IOCTL(PPFCONTROL)
354COMPATIBLE_IOCTL(PPRDATA)
355COMPATIBLE_IOCTL(PPWDATA)
349COMPATIBLE_IOCTL(PPCLAIM) 356COMPATIBLE_IOCTL(PPCLAIM)
350COMPATIBLE_IOCTL(PPRELEASE) 357COMPATIBLE_IOCTL(PPRELEASE)
351COMPATIBLE_IOCTL(PPEXCL)
352COMPATIBLE_IOCTL(PPYIELD) 358COMPATIBLE_IOCTL(PPYIELD)
359COMPATIBLE_IOCTL(PPEXCL)
360COMPATIBLE_IOCTL(PPDATADIR)
361COMPATIBLE_IOCTL(PPNEGOT)
362COMPATIBLE_IOCTL(PPWCTLONIRQ)
363COMPATIBLE_IOCTL(PPCLRIRQ)
364COMPATIBLE_IOCTL(PPSETPHASE)
365COMPATIBLE_IOCTL(PPGETMODES)
366COMPATIBLE_IOCTL(PPGETMODE)
367COMPATIBLE_IOCTL(PPGETPHASE)
368COMPATIBLE_IOCTL(PPGETFLAGS)
369COMPATIBLE_IOCTL(PPSETFLAGS)
353/* CDROM stuff */ 370/* CDROM stuff */
354COMPATIBLE_IOCTL(CDROMPAUSE) 371COMPATIBLE_IOCTL(CDROMPAUSE)
355COMPATIBLE_IOCTL(CDROMRESUME) 372COMPATIBLE_IOCTL(CDROMRESUME)
diff --git a/include/linux/pci-dynids.h b/include/linux/pci-dynids.h
deleted file mode 100644
index 183b6b0de81c..000000000000
--- a/include/linux/pci-dynids.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * PCI defines and function prototypes
3 * Copyright 2003 Dell Inc.
4 * by Matt Domsch <Matt_Domsch@dell.com>
5 */
6
7#ifndef LINUX_PCI_DYNIDS_H
8#define LINUX_PCI_DYNIDS_H
9
10#include <linux/list.h>
11#include <linux/mod_devicetable.h>
12
13struct dynid {
14 struct list_head node;
15 struct pci_device_id id;
16};
17
18#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 66798b46f308..7ac14961ba22 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -586,7 +586,7 @@ struct pci_dev {
586#define PCI_NUM_RESOURCES 11 586#define PCI_NUM_RESOURCES 11
587 587
588#ifndef PCI_BUS_NUM_RESOURCES 588#ifndef PCI_BUS_NUM_RESOURCES
589#define PCI_BUS_NUM_RESOURCES 4 589#define PCI_BUS_NUM_RESOURCES 8
590#endif 590#endif
591 591
592#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */ 592#define PCI_REGION_FLAG_MASK 0x0fU /* These bits of resource flags tell us the PCI region flags */
@@ -860,7 +860,8 @@ int pci_register_driver(struct pci_driver *);
860void pci_unregister_driver(struct pci_driver *); 860void pci_unregister_driver(struct pci_driver *);
861void pci_remove_behind_bridge(struct pci_dev *); 861void pci_remove_behind_bridge(struct pci_dev *);
862struct pci_driver *pci_dev_driver(const struct pci_dev *); 862struct pci_driver *pci_dev_driver(const struct pci_dev *);
863const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev); 863const struct pci_device_id *pci_match_device(struct pci_driver *drv, struct pci_dev *dev);
864const struct pci_device_id *pci_match_id(const struct pci_device_id *ids, struct pci_dev *dev);
864int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass); 865int pci_scan_bridge(struct pci_bus *bus, struct pci_dev * dev, int max, int pass);
865 866
866/* kmem_cache style wrapper around pci_alloc_consistent() */ 867/* kmem_cache style wrapper around pci_alloc_consistent() */
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index defdc5a459f0..909fef8903cb 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -804,7 +804,7 @@ static int __devinit snd_bt87x_detect_card(struct pci_dev *pci)
804 int i; 804 int i;
805 const struct pci_device_id *supported; 805 const struct pci_device_id *supported;
806 806
807 supported = pci_match_device(snd_bt87x_ids, pci); 807 supported = pci_match_device(driver, pci);
808 if (supported) 808 if (supported)
809 return supported->driver_data; 809 return supported->driver_data;
810 810