diff options
Diffstat (limited to 'drivers')
70 files changed, 4015 insertions, 2215 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 7ef172c2a1d6..f688c214be0c 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig | |||
@@ -204,13 +204,25 @@ config ACPI_NUMA | |||
204 | 204 | ||
205 | config ACPI_WMI | 205 | config ACPI_WMI |
206 | tristate "WMI (EXPERIMENTAL)" | 206 | tristate "WMI (EXPERIMENTAL)" |
207 | depends on X86 | ||
207 | depends on EXPERIMENTAL | 208 | depends on EXPERIMENTAL |
208 | help | 209 | help |
209 | This driver adds support for the ACPI-WMI mapper device (PNP0C14) | 210 | This driver adds support for the ACPI-WMI (Windows Management |
210 | found on some systems. | 211 | Instrumentation) mapper device (PNP0C14) found on some systems. |
212 | |||
213 | ACPI-WMI is a proprietary extension to ACPI to expose parts of the | ||
214 | ACPI firmware to userspace - this is done through various vendor | ||
215 | defined methods and data blocks in a PNP0C14 device, which are then | ||
216 | made available for userspace to call. | ||
217 | |||
218 | The implementation of this in Linux currently only exposes this to | ||
219 | other kernel space drivers. | ||
220 | |||
221 | This driver is a required dependency to build the firmware specific | ||
222 | drivers needed on many machines, including Acer and HP laptops. | ||
211 | 223 | ||
212 | NOTE: You will need another driver or userspace application on top of | 224 | It is safe to enable this driver even if your DSDT doesn't define |
213 | this to actually use anything defined in the ACPI-WMI mapper. | 225 | any ACPI-WMI devices. |
214 | 226 | ||
215 | config ACPI_ASUS | 227 | config ACPI_ASUS |
216 | tristate "ASUS/Medion Laptop Extras" | 228 | tristate "ASUS/Medion Laptop Extras" |
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c index 6dbaa2d15fe0..9ce983ed60f0 100644 --- a/drivers/acpi/blacklist.c +++ b/drivers/acpi/blacklist.c | |||
@@ -445,6 +445,8 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
445 | * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"), | 445 | * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"), |
446 | * _OSI(Linux) is a NOP: | 446 | * _OSI(Linux) is a NOP: |
447 | * DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"), | 447 | * DMI_MATCH(DMI_PRODUCT_VERSION, "3000 N100"), |
448 | * _OSI(Linux) effect unknown | ||
449 | * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"), | ||
448 | */ | 450 | */ |
449 | { | 451 | { |
450 | .callback = dmi_enable_osi_linux, | 452 | .callback = dmi_enable_osi_linux, |
@@ -464,6 +466,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
464 | }, | 466 | }, |
465 | { | 467 | { |
466 | .callback = dmi_unknown_osi_linux, | 468 | .callback = dmi_unknown_osi_linux, |
469 | .ident = "Lenovo ThinkPad X61", | ||
470 | .matches = { | ||
471 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | ||
472 | DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"), | ||
473 | }, | ||
474 | }, | ||
475 | { | ||
476 | .callback = dmi_unknown_osi_linux, | ||
467 | .ident = "Lenovo 3000 V100", | 477 | .ident = "Lenovo 3000 V100", |
468 | .matches = { | 478 | .matches = { |
469 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), | 479 | DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), |
@@ -505,6 +515,16 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = { | |||
505 | DMI_MATCH(DMI_PRODUCT_NAME, "NEC VERSA M360"), | 515 | DMI_MATCH(DMI_PRODUCT_NAME, "NEC VERSA M360"), |
506 | }, | 516 | }, |
507 | }, | 517 | }, |
518 | /* Panasonic */ | ||
519 | { | ||
520 | .callback = dmi_unknown_osi_linux, | ||
521 | .ident = "Panasonic", | ||
522 | .matches = { | ||
523 | DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"), | ||
524 | /* Toughbook CF-52 */ | ||
525 | DMI_MATCH(DMI_PRODUCT_NAME, "CF-52CCABVBG"), | ||
526 | }, | ||
527 | }, | ||
508 | /* | 528 | /* |
509 | * Disable OSI(Linux) warnings on all "Samsung Electronics" | 529 | * Disable OSI(Linux) warnings on all "Samsung Electronics" |
510 | * | 530 | * |
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c index 27ccd68b8f46..a14501c98f40 100644 --- a/drivers/acpi/osl.c +++ b/drivers/acpi/osl.c | |||
@@ -343,7 +343,7 @@ struct acpi_table_header *acpi_find_dsdt_initrd(void) | |||
343 | struct kstat stat; | 343 | struct kstat stat; |
344 | char *ramfs_dsdt_name = "/DSDT.aml"; | 344 | char *ramfs_dsdt_name = "/DSDT.aml"; |
345 | 345 | ||
346 | printk(KERN_INFO PREFIX "Checking initramfs for custom DSDT"); | 346 | printk(KERN_INFO PREFIX "Checking initramfs for custom DSDT\n"); |
347 | 347 | ||
348 | /* | 348 | /* |
349 | * Never do this at home, only the user-space is allowed to open a file. | 349 | * Never do this at home, only the user-space is allowed to open a file. |
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c index f32010bee4d5..b477a4be8a69 100644 --- a/drivers/acpi/processor_perflib.c +++ b/drivers/acpi/processor_perflib.c | |||
@@ -50,6 +50,10 @@ ACPI_MODULE_NAME("processor_perflib"); | |||
50 | 50 | ||
51 | static DEFINE_MUTEX(performance_mutex); | 51 | static DEFINE_MUTEX(performance_mutex); |
52 | 52 | ||
53 | /* Use cpufreq debug layer for _PPC changes. */ | ||
54 | #define cpufreq_printk(msg...) cpufreq_debug_printk(CPUFREQ_DEBUG_CORE, \ | ||
55 | "cpufreq-core", msg) | ||
56 | |||
53 | /* | 57 | /* |
54 | * _PPC support is implemented as a CPUfreq policy notifier: | 58 | * _PPC support is implemented as a CPUfreq policy notifier: |
55 | * This means each time a CPUfreq driver registered also with | 59 | * This means each time a CPUfreq driver registered also with |
@@ -131,6 +135,9 @@ static int acpi_processor_get_platform_limit(struct acpi_processor *pr) | |||
131 | return -ENODEV; | 135 | return -ENODEV; |
132 | } | 136 | } |
133 | 137 | ||
138 | cpufreq_printk("CPU %d: _PPC is %d - frequency %s limited\n", pr->id, | ||
139 | (int)ppc, ppc ? "" : "not"); | ||
140 | |||
134 | pr->performance_platform_limit = (int)ppc; | 141 | pr->performance_platform_limit = (int)ppc; |
135 | 142 | ||
136 | return 0; | 143 | return 0; |
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig index 64e5148d82bc..b6d230b3209f 100644 --- a/drivers/block/Kconfig +++ b/drivers/block/Kconfig | |||
@@ -322,7 +322,7 @@ config BLK_DEV_UB | |||
322 | If unsure, say N. | 322 | If unsure, say N. |
323 | 323 | ||
324 | config BLK_DEV_RAM | 324 | config BLK_DEV_RAM |
325 | tristate "RAM disk support" | 325 | tristate "RAM block device support" |
326 | ---help--- | 326 | ---help--- |
327 | Saying Y here will allow you to use a portion of your RAM memory as | 327 | Saying Y here will allow you to use a portion of your RAM memory as |
328 | a block device, so that you can make file systems on it, read and | 328 | a block device, so that you can make file systems on it, read and |
@@ -357,15 +357,15 @@ config BLK_DEV_RAM_SIZE | |||
357 | The default value is 4096 kilobytes. Only change this if you know | 357 | The default value is 4096 kilobytes. Only change this if you know |
358 | what you are doing. | 358 | what you are doing. |
359 | 359 | ||
360 | config BLK_DEV_RAM_BLOCKSIZE | 360 | config BLK_DEV_XIP |
361 | int "Default RAM disk block size (bytes)" | 361 | bool "Support XIP filesystems on RAM block device" |
362 | depends on BLK_DEV_RAM | 362 | depends on BLK_DEV_RAM |
363 | default "1024" | 363 | default n |
364 | help | 364 | help |
365 | The default value is 1024 bytes. PAGE_SIZE is a much more | 365 | Support XIP filesystems (such as ext2 with XIP support on) on |
366 | efficient choice however. The default is kept to ensure initrd | 366 | top of block ram device. This will slightly enlarge the kernel, and |
367 | setups function - apparently needed by the rd_load_image routine | 367 | will prevent RAM block device backing store memory from being |
368 | that supposes the filesystem in the image uses a 1024 blocksize. | 368 | allocated from highmem (only a problem for highmem systems). |
369 | 369 | ||
370 | config CDROM_PKTCDVD | 370 | config CDROM_PKTCDVD |
371 | tristate "Packet writing on CD/DVD media" | 371 | tristate "Packet writing on CD/DVD media" |
diff --git a/drivers/block/Makefile b/drivers/block/Makefile index 7691505a2e12..01c972415cb2 100644 --- a/drivers/block/Makefile +++ b/drivers/block/Makefile | |||
@@ -11,7 +11,7 @@ obj-$(CONFIG_AMIGA_FLOPPY) += amiflop.o | |||
11 | obj-$(CONFIG_PS3_DISK) += ps3disk.o | 11 | obj-$(CONFIG_PS3_DISK) += ps3disk.o |
12 | obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o | 12 | obj-$(CONFIG_ATARI_FLOPPY) += ataflop.o |
13 | obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o | 13 | obj-$(CONFIG_AMIGA_Z2RAM) += z2ram.o |
14 | obj-$(CONFIG_BLK_DEV_RAM) += rd.o | 14 | obj-$(CONFIG_BLK_DEV_RAM) += brd.o |
15 | obj-$(CONFIG_BLK_DEV_LOOP) += loop.o | 15 | obj-$(CONFIG_BLK_DEV_LOOP) += loop.o |
16 | obj-$(CONFIG_BLK_DEV_PS2) += ps2esdi.o | 16 | obj-$(CONFIG_BLK_DEV_PS2) += ps2esdi.o |
17 | obj-$(CONFIG_BLK_DEV_XD) += xd.o | 17 | obj-$(CONFIG_BLK_DEV_XD) += xd.o |
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h index 07f02f855ab5..280e71ee744c 100644 --- a/drivers/block/aoe/aoe.h +++ b/drivers/block/aoe/aoe.h | |||
@@ -1,5 +1,5 @@ | |||
1 | /* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ |
2 | #define VERSION "32" | 2 | #define VERSION "47" |
3 | #define AOE_MAJOR 152 | 3 | #define AOE_MAJOR 152 |
4 | #define DEVICE_NAME "aoe" | 4 | #define DEVICE_NAME "aoe" |
5 | 5 | ||
@@ -76,10 +76,8 @@ enum { | |||
76 | DEVFL_EXT = (1<<2), /* device accepts lba48 commands */ | 76 | DEVFL_EXT = (1<<2), /* device accepts lba48 commands */ |
77 | DEVFL_CLOSEWAIT = (1<<3), /* device is waiting for all closes to revalidate */ | 77 | DEVFL_CLOSEWAIT = (1<<3), /* device is waiting for all closes to revalidate */ |
78 | DEVFL_GDALLOC = (1<<4), /* need to alloc gendisk */ | 78 | DEVFL_GDALLOC = (1<<4), /* need to alloc gendisk */ |
79 | DEVFL_PAUSE = (1<<5), | 79 | DEVFL_KICKME = (1<<5), /* slow polling network card catch */ |
80 | DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */ | 80 | DEVFL_NEWSIZE = (1<<6), /* need to update dev size in block layer */ |
81 | DEVFL_MAXBCNT = (1<<7), /* d->maxbcnt is not changeable */ | ||
82 | DEVFL_KICKME = (1<<8), | ||
83 | 81 | ||
84 | BUFFL_FAIL = 1, | 82 | BUFFL_FAIL = 1, |
85 | }; | 83 | }; |
@@ -88,17 +86,25 @@ enum { | |||
88 | DEFAULTBCNT = 2 * 512, /* 2 sectors */ | 86 | DEFAULTBCNT = 2 * 512, /* 2 sectors */ |
89 | NPERSHELF = 16, /* number of slots per shelf address */ | 87 | NPERSHELF = 16, /* number of slots per shelf address */ |
90 | FREETAG = -1, | 88 | FREETAG = -1, |
91 | MIN_BUFS = 8, | 89 | MIN_BUFS = 16, |
90 | NTARGETS = 8, | ||
91 | NAOEIFS = 8, | ||
92 | NSKBPOOLMAX = 128, | ||
93 | |||
94 | TIMERTICK = HZ / 10, | ||
95 | MINTIMER = HZ >> 2, | ||
96 | MAXTIMER = HZ << 1, | ||
97 | HELPWAIT = 20, | ||
92 | }; | 98 | }; |
93 | 99 | ||
94 | struct buf { | 100 | struct buf { |
95 | struct list_head bufs; | 101 | struct list_head bufs; |
96 | ulong start_time; /* for disk stats */ | 102 | ulong stime; /* for disk stats */ |
97 | ulong flags; | 103 | ulong flags; |
98 | ulong nframesout; | 104 | ulong nframesout; |
99 | char *bufaddr; | ||
100 | ulong resid; | 105 | ulong resid; |
101 | ulong bv_resid; | 106 | ulong bv_resid; |
107 | ulong bv_off; | ||
102 | sector_t sector; | 108 | sector_t sector; |
103 | struct bio *bio; | 109 | struct bio *bio; |
104 | struct bio_vec *bv; | 110 | struct bio_vec *bv; |
@@ -114,19 +120,38 @@ struct frame { | |||
114 | struct sk_buff *skb; | 120 | struct sk_buff *skb; |
115 | }; | 121 | }; |
116 | 122 | ||
123 | struct aoeif { | ||
124 | struct net_device *nd; | ||
125 | unsigned char lost; | ||
126 | unsigned char lostjumbo; | ||
127 | ushort maxbcnt; | ||
128 | }; | ||
129 | |||
130 | struct aoetgt { | ||
131 | unsigned char addr[6]; | ||
132 | ushort nframes; | ||
133 | struct frame *frames; | ||
134 | struct aoeif ifs[NAOEIFS]; | ||
135 | struct aoeif *ifp; /* current aoeif in use */ | ||
136 | ushort nout; | ||
137 | ushort maxout; | ||
138 | u16 lasttag; /* last tag sent */ | ||
139 | u16 useme; | ||
140 | ulong lastwadj; /* last window adjustment */ | ||
141 | int wpkts, rpkts; | ||
142 | int dataref; | ||
143 | }; | ||
144 | |||
117 | struct aoedev { | 145 | struct aoedev { |
118 | struct aoedev *next; | 146 | struct aoedev *next; |
119 | unsigned char addr[6]; /* remote mac addr */ | ||
120 | ushort flags; | ||
121 | ulong sysminor; | 147 | ulong sysminor; |
122 | ulong aoemajor; | 148 | ulong aoemajor; |
123 | ulong aoeminor; | 149 | u16 aoeminor; |
150 | u16 flags; | ||
124 | u16 nopen; /* (bd_openers isn't available without sleeping) */ | 151 | u16 nopen; /* (bd_openers isn't available without sleeping) */ |
125 | u16 lasttag; /* last tag sent */ | ||
126 | u16 rttavg; /* round trip average of requests/responses */ | 152 | u16 rttavg; /* round trip average of requests/responses */ |
127 | u16 mintimer; | 153 | u16 mintimer; |
128 | u16 fw_ver; /* version of blade's firmware */ | 154 | u16 fw_ver; /* version of blade's firmware */ |
129 | u16 maxbcnt; | ||
130 | struct work_struct work;/* disk create work struct */ | 155 | struct work_struct work;/* disk create work struct */ |
131 | struct gendisk *gd; | 156 | struct gendisk *gd; |
132 | struct request_queue blkq; | 157 | struct request_queue blkq; |
@@ -134,15 +159,17 @@ struct aoedev { | |||
134 | sector_t ssize; | 159 | sector_t ssize; |
135 | struct timer_list timer; | 160 | struct timer_list timer; |
136 | spinlock_t lock; | 161 | spinlock_t lock; |
137 | struct net_device *ifp; /* interface ed is attached to */ | ||
138 | struct sk_buff *sendq_hd; /* packets needing to be sent, list head */ | 162 | struct sk_buff *sendq_hd; /* packets needing to be sent, list head */ |
139 | struct sk_buff *sendq_tl; | 163 | struct sk_buff *sendq_tl; |
164 | struct sk_buff *skbpool_hd; | ||
165 | struct sk_buff *skbpool_tl; | ||
166 | int nskbpool; | ||
140 | mempool_t *bufpool; /* for deadlock-free Buf allocation */ | 167 | mempool_t *bufpool; /* for deadlock-free Buf allocation */ |
141 | struct list_head bufq; /* queue of bios to work on */ | 168 | struct list_head bufq; /* queue of bios to work on */ |
142 | struct buf *inprocess; /* the one we're currently working on */ | 169 | struct buf *inprocess; /* the one we're currently working on */ |
143 | ushort lostjumbo; | 170 | struct aoetgt *targets[NTARGETS]; |
144 | ushort nframes; /* number of frames below */ | 171 | struct aoetgt **tgt; /* target in use when working */ |
145 | struct frame *frames; | 172 | struct aoetgt **htgt; /* target needing rexmit assistance */ |
146 | }; | 173 | }; |
147 | 174 | ||
148 | 175 | ||
@@ -160,14 +187,16 @@ void aoecmd_cfg(ushort aoemajor, unsigned char aoeminor); | |||
160 | void aoecmd_ata_rsp(struct sk_buff *); | 187 | void aoecmd_ata_rsp(struct sk_buff *); |
161 | void aoecmd_cfg_rsp(struct sk_buff *); | 188 | void aoecmd_cfg_rsp(struct sk_buff *); |
162 | void aoecmd_sleepwork(struct work_struct *); | 189 | void aoecmd_sleepwork(struct work_struct *); |
163 | struct sk_buff *new_skb(ulong); | 190 | void aoecmd_cleanslate(struct aoedev *); |
191 | struct sk_buff *aoecmd_ata_id(struct aoedev *); | ||
164 | 192 | ||
165 | int aoedev_init(void); | 193 | int aoedev_init(void); |
166 | void aoedev_exit(void); | 194 | void aoedev_exit(void); |
167 | struct aoedev *aoedev_by_aoeaddr(int maj, int min); | 195 | struct aoedev *aoedev_by_aoeaddr(int maj, int min); |
168 | struct aoedev *aoedev_by_sysminor_m(ulong sysminor, ulong bufcnt); | 196 | struct aoedev *aoedev_by_sysminor_m(ulong sysminor); |
169 | void aoedev_downdev(struct aoedev *d); | 197 | void aoedev_downdev(struct aoedev *d); |
170 | int aoedev_isbusy(struct aoedev *d); | 198 | int aoedev_isbusy(struct aoedev *d); |
199 | int aoedev_flush(const char __user *str, size_t size); | ||
171 | 200 | ||
172 | int aoenet_init(void); | 201 | int aoenet_init(void); |
173 | void aoenet_exit(void); | 202 | void aoenet_exit(void); |
@@ -175,4 +204,4 @@ void aoenet_xmit(struct sk_buff *); | |||
175 | int is_aoe_netif(struct net_device *ifp); | 204 | int is_aoe_netif(struct net_device *ifp); |
176 | int set_aoe_iflist(const char __user *str, size_t size); | 205 | int set_aoe_iflist(const char __user *str, size_t size); |
177 | 206 | ||
178 | u64 mac_addr(char addr[6]); | 207 | unsigned long long mac_addr(char addr[6]); |
diff --git a/drivers/block/aoe/aoeblk.c b/drivers/block/aoe/aoeblk.c index 826d12381e21..0c39782b2660 100644 --- a/drivers/block/aoe/aoeblk.c +++ b/drivers/block/aoe/aoeblk.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ |
2 | /* | 2 | /* |
3 | * aoeblk.c | 3 | * aoeblk.c |
4 | * block device routines | 4 | * block device routines |
@@ -24,7 +24,7 @@ static ssize_t aoedisk_show_state(struct device *dev, | |||
24 | return snprintf(page, PAGE_SIZE, | 24 | return snprintf(page, PAGE_SIZE, |
25 | "%s%s\n", | 25 | "%s%s\n", |
26 | (d->flags & DEVFL_UP) ? "up" : "down", | 26 | (d->flags & DEVFL_UP) ? "up" : "down", |
27 | (d->flags & DEVFL_PAUSE) ? ",paused" : | 27 | (d->flags & DEVFL_KICKME) ? ",kickme" : |
28 | (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : ""); | 28 | (d->nopen && !(d->flags & DEVFL_UP)) ? ",closewait" : ""); |
29 | /* I'd rather see nopen exported so we can ditch closewait */ | 29 | /* I'd rather see nopen exported so we can ditch closewait */ |
30 | } | 30 | } |
@@ -33,17 +33,48 @@ static ssize_t aoedisk_show_mac(struct device *dev, | |||
33 | { | 33 | { |
34 | struct gendisk *disk = dev_to_disk(dev); | 34 | struct gendisk *disk = dev_to_disk(dev); |
35 | struct aoedev *d = disk->private_data; | 35 | struct aoedev *d = disk->private_data; |
36 | struct aoetgt *t = d->targets[0]; | ||
36 | 37 | ||
37 | return snprintf(page, PAGE_SIZE, "%012llx\n", | 38 | if (t == NULL) |
38 | (unsigned long long)mac_addr(d->addr)); | 39 | return snprintf(page, PAGE_SIZE, "none\n"); |
40 | return snprintf(page, PAGE_SIZE, "%012llx\n", mac_addr(t->addr)); | ||
39 | } | 41 | } |
40 | static ssize_t aoedisk_show_netif(struct device *dev, | 42 | static ssize_t aoedisk_show_netif(struct device *dev, |
41 | struct device_attribute *attr, char *page) | 43 | struct device_attribute *attr, char *page) |
42 | { | 44 | { |
43 | struct gendisk *disk = dev_to_disk(dev); | 45 | struct gendisk *disk = dev_to_disk(dev); |
44 | struct aoedev *d = disk->private_data; | 46 | struct aoedev *d = disk->private_data; |
47 | struct net_device *nds[8], **nd, **nnd, **ne; | ||
48 | struct aoetgt **t, **te; | ||
49 | struct aoeif *ifp, *e; | ||
50 | char *p; | ||
51 | |||
52 | memset(nds, 0, sizeof nds); | ||
53 | nd = nds; | ||
54 | ne = nd + ARRAY_SIZE(nds); | ||
55 | t = d->targets; | ||
56 | te = t + NTARGETS; | ||
57 | for (; t < te && *t; t++) { | ||
58 | ifp = (*t)->ifs; | ||
59 | e = ifp + NAOEIFS; | ||
60 | for (; ifp < e && ifp->nd; ifp++) { | ||
61 | for (nnd = nds; nnd < nd; nnd++) | ||
62 | if (*nnd == ifp->nd) | ||
63 | break; | ||
64 | if (nnd == nd && nd != ne) | ||
65 | *nd++ = ifp->nd; | ||
66 | } | ||
67 | } | ||
45 | 68 | ||
46 | return snprintf(page, PAGE_SIZE, "%s\n", d->ifp->name); | 69 | ne = nd; |
70 | nd = nds; | ||
71 | if (*nd == NULL) | ||
72 | return snprintf(page, PAGE_SIZE, "none\n"); | ||
73 | for (p = page; nd < ne; nd++) | ||
74 | p += snprintf(p, PAGE_SIZE - (p-page), "%s%s", | ||
75 | p == page ? "" : ",", (*nd)->name); | ||
76 | p += snprintf(p, PAGE_SIZE - (p-page), "\n"); | ||
77 | return p-page; | ||
47 | } | 78 | } |
48 | /* firmware version */ | 79 | /* firmware version */ |
49 | static ssize_t aoedisk_show_fwver(struct device *dev, | 80 | static ssize_t aoedisk_show_fwver(struct device *dev, |
@@ -134,7 +165,23 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) | |||
134 | 165 | ||
135 | blk_queue_bounce(q, &bio); | 166 | blk_queue_bounce(q, &bio); |
136 | 167 | ||
168 | if (bio == NULL) { | ||
169 | printk(KERN_ERR "aoe: bio is NULL\n"); | ||
170 | BUG(); | ||
171 | return 0; | ||
172 | } | ||
137 | d = bio->bi_bdev->bd_disk->private_data; | 173 | d = bio->bi_bdev->bd_disk->private_data; |
174 | if (d == NULL) { | ||
175 | printk(KERN_ERR "aoe: bd_disk->private_data is NULL\n"); | ||
176 | BUG(); | ||
177 | bio_endio(bio, -ENXIO); | ||
178 | return 0; | ||
179 | } else if (bio->bi_io_vec == NULL) { | ||
180 | printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); | ||
181 | BUG(); | ||
182 | bio_endio(bio, -ENXIO); | ||
183 | return 0; | ||
184 | } | ||
138 | buf = mempool_alloc(d->bufpool, GFP_NOIO); | 185 | buf = mempool_alloc(d->bufpool, GFP_NOIO); |
139 | if (buf == NULL) { | 186 | if (buf == NULL) { |
140 | printk(KERN_INFO "aoe: buf allocation failure\n"); | 187 | printk(KERN_INFO "aoe: buf allocation failure\n"); |
@@ -143,19 +190,19 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) | |||
143 | } | 190 | } |
144 | memset(buf, 0, sizeof(*buf)); | 191 | memset(buf, 0, sizeof(*buf)); |
145 | INIT_LIST_HEAD(&buf->bufs); | 192 | INIT_LIST_HEAD(&buf->bufs); |
146 | buf->start_time = jiffies; | 193 | buf->stime = jiffies; |
147 | buf->bio = bio; | 194 | buf->bio = bio; |
148 | buf->resid = bio->bi_size; | 195 | buf->resid = bio->bi_size; |
149 | buf->sector = bio->bi_sector; | 196 | buf->sector = bio->bi_sector; |
150 | buf->bv = &bio->bi_io_vec[bio->bi_idx]; | 197 | buf->bv = &bio->bi_io_vec[bio->bi_idx]; |
151 | WARN_ON(buf->bv->bv_len == 0); | ||
152 | buf->bv_resid = buf->bv->bv_len; | 198 | buf->bv_resid = buf->bv->bv_len; |
153 | buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset; | 199 | WARN_ON(buf->bv_resid == 0); |
200 | buf->bv_off = buf->bv->bv_offset; | ||
154 | 201 | ||
155 | spin_lock_irqsave(&d->lock, flags); | 202 | spin_lock_irqsave(&d->lock, flags); |
156 | 203 | ||
157 | if ((d->flags & DEVFL_UP) == 0) { | 204 | if ((d->flags & DEVFL_UP) == 0) { |
158 | printk(KERN_INFO "aoe: device %ld.%ld is not up\n", | 205 | printk(KERN_INFO "aoe: device %ld.%d is not up\n", |
159 | d->aoemajor, d->aoeminor); | 206 | d->aoemajor, d->aoeminor); |
160 | spin_unlock_irqrestore(&d->lock, flags); | 207 | spin_unlock_irqrestore(&d->lock, flags); |
161 | mempool_free(buf, d->bufpool); | 208 | mempool_free(buf, d->bufpool); |
@@ -208,14 +255,15 @@ aoeblk_gdalloc(void *vp) | |||
208 | 255 | ||
209 | gd = alloc_disk(AOE_PARTITIONS); | 256 | gd = alloc_disk(AOE_PARTITIONS); |
210 | if (gd == NULL) { | 257 | if (gd == NULL) { |
211 | printk(KERN_ERR "aoe: cannot allocate disk structure for %ld.%ld\n", | 258 | printk(KERN_ERR |
259 | "aoe: cannot allocate disk structure for %ld.%d\n", | ||
212 | d->aoemajor, d->aoeminor); | 260 | d->aoemajor, d->aoeminor); |
213 | goto err; | 261 | goto err; |
214 | } | 262 | } |
215 | 263 | ||
216 | d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache); | 264 | d->bufpool = mempool_create_slab_pool(MIN_BUFS, buf_pool_cache); |
217 | if (d->bufpool == NULL) { | 265 | if (d->bufpool == NULL) { |
218 | printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%ld\n", | 266 | printk(KERN_ERR "aoe: cannot allocate bufpool for %ld.%d\n", |
219 | d->aoemajor, d->aoeminor); | 267 | d->aoemajor, d->aoeminor); |
220 | goto err_disk; | 268 | goto err_disk; |
221 | } | 269 | } |
@@ -229,7 +277,7 @@ aoeblk_gdalloc(void *vp) | |||
229 | gd->fops = &aoe_bdops; | 277 | gd->fops = &aoe_bdops; |
230 | gd->private_data = d; | 278 | gd->private_data = d; |
231 | gd->capacity = d->ssize; | 279 | gd->capacity = d->ssize; |
232 | snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%ld", | 280 | snprintf(gd->disk_name, sizeof gd->disk_name, "etherd/e%ld.%d", |
233 | d->aoemajor, d->aoeminor); | 281 | d->aoemajor, d->aoeminor); |
234 | 282 | ||
235 | gd->queue = &d->blkq; | 283 | gd->queue = &d->blkq; |
diff --git a/drivers/block/aoe/aoechr.c b/drivers/block/aoe/aoechr.c index d5480e34cb22..e8e60e7a2e70 100644 --- a/drivers/block/aoe/aoechr.c +++ b/drivers/block/aoe/aoechr.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ |
2 | /* | 2 | /* |
3 | * aoechr.c | 3 | * aoechr.c |
4 | * AoE character device driver | 4 | * AoE character device driver |
@@ -6,6 +6,7 @@ | |||
6 | 6 | ||
7 | #include <linux/hdreg.h> | 7 | #include <linux/hdreg.h> |
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/delay.h> | ||
9 | #include "aoe.h" | 10 | #include "aoe.h" |
10 | 11 | ||
11 | enum { | 12 | enum { |
@@ -14,6 +15,7 @@ enum { | |||
14 | MINOR_DISCOVER, | 15 | MINOR_DISCOVER, |
15 | MINOR_INTERFACES, | 16 | MINOR_INTERFACES, |
16 | MINOR_REVALIDATE, | 17 | MINOR_REVALIDATE, |
18 | MINOR_FLUSH, | ||
17 | MSGSZ = 2048, | 19 | MSGSZ = 2048, |
18 | NMSG = 100, /* message backlog to retain */ | 20 | NMSG = 100, /* message backlog to retain */ |
19 | }; | 21 | }; |
@@ -42,6 +44,7 @@ static struct aoe_chardev chardevs[] = { | |||
42 | { MINOR_DISCOVER, "discover" }, | 44 | { MINOR_DISCOVER, "discover" }, |
43 | { MINOR_INTERFACES, "interfaces" }, | 45 | { MINOR_INTERFACES, "interfaces" }, |
44 | { MINOR_REVALIDATE, "revalidate" }, | 46 | { MINOR_REVALIDATE, "revalidate" }, |
47 | { MINOR_FLUSH, "flush" }, | ||
45 | }; | 48 | }; |
46 | 49 | ||
47 | static int | 50 | static int |
@@ -68,6 +71,7 @@ revalidate(const char __user *str, size_t size) | |||
68 | int major, minor, n; | 71 | int major, minor, n; |
69 | ulong flags; | 72 | ulong flags; |
70 | struct aoedev *d; | 73 | struct aoedev *d; |
74 | struct sk_buff *skb; | ||
71 | char buf[16]; | 75 | char buf[16]; |
72 | 76 | ||
73 | if (size >= sizeof buf) | 77 | if (size >= sizeof buf) |
@@ -85,13 +89,20 @@ revalidate(const char __user *str, size_t size) | |||
85 | d = aoedev_by_aoeaddr(major, minor); | 89 | d = aoedev_by_aoeaddr(major, minor); |
86 | if (!d) | 90 | if (!d) |
87 | return -EINVAL; | 91 | return -EINVAL; |
88 | |||
89 | spin_lock_irqsave(&d->lock, flags); | 92 | spin_lock_irqsave(&d->lock, flags); |
90 | d->flags &= ~DEVFL_MAXBCNT; | 93 | aoecmd_cleanslate(d); |
91 | d->flags |= DEVFL_PAUSE; | 94 | loop: |
95 | skb = aoecmd_ata_id(d); | ||
92 | spin_unlock_irqrestore(&d->lock, flags); | 96 | spin_unlock_irqrestore(&d->lock, flags); |
97 | /* try again if we are able to sleep a bit, | ||
98 | * otherwise give up this revalidation | ||
99 | */ | ||
100 | if (!skb && !msleep_interruptible(200)) { | ||
101 | spin_lock_irqsave(&d->lock, flags); | ||
102 | goto loop; | ||
103 | } | ||
104 | aoenet_xmit(skb); | ||
93 | aoecmd_cfg(major, minor); | 105 | aoecmd_cfg(major, minor); |
94 | |||
95 | return 0; | 106 | return 0; |
96 | } | 107 | } |
97 | 108 | ||
@@ -149,6 +160,9 @@ aoechr_write(struct file *filp, const char __user *buf, size_t cnt, loff_t *offp | |||
149 | break; | 160 | break; |
150 | case MINOR_REVALIDATE: | 161 | case MINOR_REVALIDATE: |
151 | ret = revalidate(buf, cnt); | 162 | ret = revalidate(buf, cnt); |
163 | break; | ||
164 | case MINOR_FLUSH: | ||
165 | ret = aoedev_flush(buf, cnt); | ||
152 | } | 166 | } |
153 | if (ret == 0) | 167 | if (ret == 0) |
154 | ret = cnt; | 168 | ret = cnt; |
@@ -185,52 +199,51 @@ aoechr_read(struct file *filp, char __user *buf, size_t cnt, loff_t *off) | |||
185 | ulong flags; | 199 | ulong flags; |
186 | 200 | ||
187 | n = (unsigned long) filp->private_data; | 201 | n = (unsigned long) filp->private_data; |
188 | switch (n) { | 202 | if (n != MINOR_ERR) |
189 | case MINOR_ERR: | 203 | return -EFAULT; |
190 | spin_lock_irqsave(&emsgs_lock, flags); | 204 | |
191 | loop: | 205 | spin_lock_irqsave(&emsgs_lock, flags); |
192 | em = emsgs + emsgs_head_idx; | ||
193 | if ((em->flags & EMFL_VALID) == 0) { | ||
194 | if (filp->f_flags & O_NDELAY) { | ||
195 | spin_unlock_irqrestore(&emsgs_lock, flags); | ||
196 | return -EAGAIN; | ||
197 | } | ||
198 | nblocked_emsgs_readers++; | ||
199 | 206 | ||
207 | for (;;) { | ||
208 | em = emsgs + emsgs_head_idx; | ||
209 | if ((em->flags & EMFL_VALID) != 0) | ||
210 | break; | ||
211 | if (filp->f_flags & O_NDELAY) { | ||
200 | spin_unlock_irqrestore(&emsgs_lock, flags); | 212 | spin_unlock_irqrestore(&emsgs_lock, flags); |
213 | return -EAGAIN; | ||
214 | } | ||
215 | nblocked_emsgs_readers++; | ||
201 | 216 | ||
202 | n = down_interruptible(&emsgs_sema); | 217 | spin_unlock_irqrestore(&emsgs_lock, flags); |
218 | |||
219 | n = down_interruptible(&emsgs_sema); | ||
203 | 220 | ||
204 | spin_lock_irqsave(&emsgs_lock, flags); | 221 | spin_lock_irqsave(&emsgs_lock, flags); |
205 | 222 | ||
206 | nblocked_emsgs_readers--; | 223 | nblocked_emsgs_readers--; |
207 | 224 | ||
208 | if (n) { | 225 | if (n) { |
209 | spin_unlock_irqrestore(&emsgs_lock, flags); | ||
210 | return -ERESTARTSYS; | ||
211 | } | ||
212 | goto loop; | ||
213 | } | ||
214 | if (em->len > cnt) { | ||
215 | spin_unlock_irqrestore(&emsgs_lock, flags); | 226 | spin_unlock_irqrestore(&emsgs_lock, flags); |
216 | return -EAGAIN; | 227 | return -ERESTARTSYS; |
217 | } | 228 | } |
218 | mp = em->msg; | 229 | } |
219 | len = em->len; | 230 | if (em->len > cnt) { |
220 | em->msg = NULL; | 231 | spin_unlock_irqrestore(&emsgs_lock, flags); |
221 | em->flags &= ~EMFL_VALID; | 232 | return -EAGAIN; |
233 | } | ||
234 | mp = em->msg; | ||
235 | len = em->len; | ||
236 | em->msg = NULL; | ||
237 | em->flags &= ~EMFL_VALID; | ||
222 | 238 | ||
223 | emsgs_head_idx++; | 239 | emsgs_head_idx++; |
224 | emsgs_head_idx %= ARRAY_SIZE(emsgs); | 240 | emsgs_head_idx %= ARRAY_SIZE(emsgs); |
225 | 241 | ||
226 | spin_unlock_irqrestore(&emsgs_lock, flags); | 242 | spin_unlock_irqrestore(&emsgs_lock, flags); |
227 | 243 | ||
228 | n = copy_to_user(buf, mp, len); | 244 | n = copy_to_user(buf, mp, len); |
229 | kfree(mp); | 245 | kfree(mp); |
230 | return n == 0 ? len : -EFAULT; | 246 | return n == 0 ? len : -EFAULT; |
231 | default: | ||
232 | return -EFAULT; | ||
233 | } | ||
234 | } | 247 | } |
235 | 248 | ||
236 | static const struct file_operations aoe_fops = { | 249 | static const struct file_operations aoe_fops = { |
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c index 4d59d5057734..d00293ba3b45 100644 --- a/drivers/block/aoe/aoecmd.c +++ b/drivers/block/aoe/aoecmd.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ |
2 | /* | 2 | /* |
3 | * aoecmd.c | 3 | * aoecmd.c |
4 | * Filesystem request handling methods | 4 | * Filesystem request handling methods |
@@ -9,19 +9,21 @@ | |||
9 | #include <linux/skbuff.h> | 9 | #include <linux/skbuff.h> |
10 | #include <linux/netdevice.h> | 10 | #include <linux/netdevice.h> |
11 | #include <linux/genhd.h> | 11 | #include <linux/genhd.h> |
12 | #include <linux/moduleparam.h> | ||
12 | #include <net/net_namespace.h> | 13 | #include <net/net_namespace.h> |
13 | #include <asm/unaligned.h> | 14 | #include <asm/unaligned.h> |
14 | #include "aoe.h" | 15 | #include "aoe.h" |
15 | 16 | ||
16 | #define TIMERTICK (HZ / 10) | ||
17 | #define MINTIMER (2 * TIMERTICK) | ||
18 | #define MAXTIMER (HZ << 1) | ||
19 | |||
20 | static int aoe_deadsecs = 60 * 3; | 17 | static int aoe_deadsecs = 60 * 3; |
21 | module_param(aoe_deadsecs, int, 0644); | 18 | module_param(aoe_deadsecs, int, 0644); |
22 | MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev."); | 19 | MODULE_PARM_DESC(aoe_deadsecs, "After aoe_deadsecs seconds, give up and fail dev."); |
23 | 20 | ||
24 | struct sk_buff * | 21 | static int aoe_maxout = 16; |
22 | module_param(aoe_maxout, int, 0644); | ||
23 | MODULE_PARM_DESC(aoe_maxout, | ||
24 | "Only aoe_maxout outstanding packets for every MAC on eX.Y."); | ||
25 | |||
26 | static struct sk_buff * | ||
25 | new_skb(ulong len) | 27 | new_skb(ulong len) |
26 | { | 28 | { |
27 | struct sk_buff *skb; | 29 | struct sk_buff *skb; |
@@ -43,12 +45,12 @@ new_skb(ulong len) | |||
43 | } | 45 | } |
44 | 46 | ||
45 | static struct frame * | 47 | static struct frame * |
46 | getframe(struct aoedev *d, int tag) | 48 | getframe(struct aoetgt *t, int tag) |
47 | { | 49 | { |
48 | struct frame *f, *e; | 50 | struct frame *f, *e; |
49 | 51 | ||
50 | f = d->frames; | 52 | f = t->frames; |
51 | e = f + d->nframes; | 53 | e = f + t->nframes; |
52 | for (; f<e; f++) | 54 | for (; f<e; f++) |
53 | if (f->tag == tag) | 55 | if (f->tag == tag) |
54 | return f; | 56 | return f; |
@@ -61,21 +63,21 @@ getframe(struct aoedev *d, int tag) | |||
61 | * This driver reserves tag -1 to mean "unused frame." | 63 | * This driver reserves tag -1 to mean "unused frame." |
62 | */ | 64 | */ |
63 | static int | 65 | static int |
64 | newtag(struct aoedev *d) | 66 | newtag(struct aoetgt *t) |
65 | { | 67 | { |
66 | register ulong n; | 68 | register ulong n; |
67 | 69 | ||
68 | n = jiffies & 0xffff; | 70 | n = jiffies & 0xffff; |
69 | return n |= (++d->lasttag & 0x7fff) << 16; | 71 | return n |= (++t->lasttag & 0x7fff) << 16; |
70 | } | 72 | } |
71 | 73 | ||
72 | static int | 74 | static int |
73 | aoehdr_atainit(struct aoedev *d, struct aoe_hdr *h) | 75 | aoehdr_atainit(struct aoedev *d, struct aoetgt *t, struct aoe_hdr *h) |
74 | { | 76 | { |
75 | u32 host_tag = newtag(d); | 77 | u32 host_tag = newtag(t); |
76 | 78 | ||
77 | memcpy(h->src, d->ifp->dev_addr, sizeof h->src); | 79 | memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); |
78 | memcpy(h->dst, d->addr, sizeof h->dst); | 80 | memcpy(h->dst, t->addr, sizeof h->dst); |
79 | h->type = __constant_cpu_to_be16(ETH_P_AOE); | 81 | h->type = __constant_cpu_to_be16(ETH_P_AOE); |
80 | h->verfl = AOE_HVER; | 82 | h->verfl = AOE_HVER; |
81 | h->major = cpu_to_be16(d->aoemajor); | 83 | h->major = cpu_to_be16(d->aoemajor); |
@@ -98,42 +100,162 @@ put_lba(struct aoe_atahdr *ah, sector_t lba) | |||
98 | } | 100 | } |
99 | 101 | ||
100 | static void | 102 | static void |
101 | aoecmd_ata_rw(struct aoedev *d, struct frame *f) | 103 | ifrotate(struct aoetgt *t) |
104 | { | ||
105 | t->ifp++; | ||
106 | if (t->ifp >= &t->ifs[NAOEIFS] || t->ifp->nd == NULL) | ||
107 | t->ifp = t->ifs; | ||
108 | if (t->ifp->nd == NULL) { | ||
109 | printk(KERN_INFO "aoe: no interface to rotate to\n"); | ||
110 | BUG(); | ||
111 | } | ||
112 | } | ||
113 | |||
114 | static void | ||
115 | skb_pool_put(struct aoedev *d, struct sk_buff *skb) | ||
116 | { | ||
117 | if (!d->skbpool_hd) | ||
118 | d->skbpool_hd = skb; | ||
119 | else | ||
120 | d->skbpool_tl->next = skb; | ||
121 | d->skbpool_tl = skb; | ||
122 | } | ||
123 | |||
124 | static struct sk_buff * | ||
125 | skb_pool_get(struct aoedev *d) | ||
126 | { | ||
127 | struct sk_buff *skb; | ||
128 | |||
129 | skb = d->skbpool_hd; | ||
130 | if (skb && atomic_read(&skb_shinfo(skb)->dataref) == 1) { | ||
131 | d->skbpool_hd = skb->next; | ||
132 | skb->next = NULL; | ||
133 | return skb; | ||
134 | } | ||
135 | if (d->nskbpool < NSKBPOOLMAX | ||
136 | && (skb = new_skb(ETH_ZLEN))) { | ||
137 | d->nskbpool++; | ||
138 | return skb; | ||
139 | } | ||
140 | return NULL; | ||
141 | } | ||
142 | |||
143 | /* freeframe is where we do our load balancing so it's a little hairy. */ | ||
144 | static struct frame * | ||
145 | freeframe(struct aoedev *d) | ||
146 | { | ||
147 | struct frame *f, *e, *rf; | ||
148 | struct aoetgt **t; | ||
149 | struct sk_buff *skb; | ||
150 | |||
151 | if (d->targets[0] == NULL) { /* shouldn't happen, but I'm paranoid */ | ||
152 | printk(KERN_ERR "aoe: NULL TARGETS!\n"); | ||
153 | return NULL; | ||
154 | } | ||
155 | t = d->tgt; | ||
156 | t++; | ||
157 | if (t >= &d->targets[NTARGETS] || !*t) | ||
158 | t = d->targets; | ||
159 | for (;;) { | ||
160 | if ((*t)->nout < (*t)->maxout | ||
161 | && t != d->htgt | ||
162 | && (*t)->ifp->nd) { | ||
163 | rf = NULL; | ||
164 | f = (*t)->frames; | ||
165 | e = f + (*t)->nframes; | ||
166 | for (; f < e; f++) { | ||
167 | if (f->tag != FREETAG) | ||
168 | continue; | ||
169 | skb = f->skb; | ||
170 | if (!skb | ||
171 | && !(f->skb = skb = new_skb(ETH_ZLEN))) | ||
172 | continue; | ||
173 | if (atomic_read(&skb_shinfo(skb)->dataref) | ||
174 | != 1) { | ||
175 | if (!rf) | ||
176 | rf = f; | ||
177 | continue; | ||
178 | } | ||
179 | gotone: skb_shinfo(skb)->nr_frags = skb->data_len = 0; | ||
180 | skb_trim(skb, 0); | ||
181 | d->tgt = t; | ||
182 | ifrotate(*t); | ||
183 | return f; | ||
184 | } | ||
185 | /* Work can be done, but the network layer is | ||
186 | holding our precious packets. Try to grab | ||
187 | one from the pool. */ | ||
188 | f = rf; | ||
189 | if (f == NULL) { /* more paranoia */ | ||
190 | printk(KERN_ERR | ||
191 | "aoe: freeframe: %s.\n", | ||
192 | "unexpected null rf"); | ||
193 | d->flags |= DEVFL_KICKME; | ||
194 | return NULL; | ||
195 | } | ||
196 | skb = skb_pool_get(d); | ||
197 | if (skb) { | ||
198 | skb_pool_put(d, f->skb); | ||
199 | f->skb = skb; | ||
200 | goto gotone; | ||
201 | } | ||
202 | (*t)->dataref++; | ||
203 | if ((*t)->nout == 0) | ||
204 | d->flags |= DEVFL_KICKME; | ||
205 | } | ||
206 | if (t == d->tgt) /* we've looped and found nada */ | ||
207 | break; | ||
208 | t++; | ||
209 | if (t >= &d->targets[NTARGETS] || !*t) | ||
210 | t = d->targets; | ||
211 | } | ||
212 | return NULL; | ||
213 | } | ||
214 | |||
215 | static int | ||
216 | aoecmd_ata_rw(struct aoedev *d) | ||
102 | { | 217 | { |
218 | struct frame *f; | ||
103 | struct aoe_hdr *h; | 219 | struct aoe_hdr *h; |
104 | struct aoe_atahdr *ah; | 220 | struct aoe_atahdr *ah; |
105 | struct buf *buf; | 221 | struct buf *buf; |
222 | struct bio_vec *bv; | ||
223 | struct aoetgt *t; | ||
106 | struct sk_buff *skb; | 224 | struct sk_buff *skb; |
107 | ulong bcnt; | 225 | ulong bcnt; |
108 | register sector_t sector; | ||
109 | char writebit, extbit; | 226 | char writebit, extbit; |
110 | 227 | ||
111 | writebit = 0x10; | 228 | writebit = 0x10; |
112 | extbit = 0x4; | 229 | extbit = 0x4; |
113 | 230 | ||
231 | f = freeframe(d); | ||
232 | if (f == NULL) | ||
233 | return 0; | ||
234 | t = *d->tgt; | ||
114 | buf = d->inprocess; | 235 | buf = d->inprocess; |
115 | 236 | bv = buf->bv; | |
116 | sector = buf->sector; | 237 | bcnt = t->ifp->maxbcnt; |
117 | bcnt = buf->bv_resid; | 238 | if (bcnt == 0) |
118 | if (bcnt > d->maxbcnt) | 239 | bcnt = DEFAULTBCNT; |
119 | bcnt = d->maxbcnt; | 240 | if (bcnt > buf->bv_resid) |
120 | 241 | bcnt = buf->bv_resid; | |
121 | /* initialize the headers & frame */ | 242 | /* initialize the headers & frame */ |
122 | skb = f->skb; | 243 | skb = f->skb; |
123 | h = (struct aoe_hdr *) skb_mac_header(skb); | 244 | h = (struct aoe_hdr *) skb_mac_header(skb); |
124 | ah = (struct aoe_atahdr *) (h+1); | 245 | ah = (struct aoe_atahdr *) (h+1); |
125 | skb_put(skb, sizeof *h + sizeof *ah); | 246 | skb_put(skb, sizeof *h + sizeof *ah); |
126 | memset(h, 0, skb->len); | 247 | memset(h, 0, skb->len); |
127 | f->tag = aoehdr_atainit(d, h); | 248 | f->tag = aoehdr_atainit(d, t, h); |
249 | t->nout++; | ||
128 | f->waited = 0; | 250 | f->waited = 0; |
129 | f->buf = buf; | 251 | f->buf = buf; |
130 | f->bufaddr = buf->bufaddr; | 252 | f->bufaddr = page_address(bv->bv_page) + buf->bv_off; |
131 | f->bcnt = bcnt; | 253 | f->bcnt = bcnt; |
132 | f->lba = sector; | 254 | f->lba = buf->sector; |
133 | 255 | ||
134 | /* set up ata header */ | 256 | /* set up ata header */ |
135 | ah->scnt = bcnt >> 9; | 257 | ah->scnt = bcnt >> 9; |
136 | put_lba(ah, sector); | 258 | put_lba(ah, buf->sector); |
137 | if (d->flags & DEVFL_EXT) { | 259 | if (d->flags & DEVFL_EXT) { |
138 | ah->aflags |= AOEAFL_EXT; | 260 | ah->aflags |= AOEAFL_EXT; |
139 | } else { | 261 | } else { |
@@ -141,14 +263,14 @@ aoecmd_ata_rw(struct aoedev *d, struct frame *f) | |||
141 | ah->lba3 &= 0x0f; | 263 | ah->lba3 &= 0x0f; |
142 | ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */ | 264 | ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */ |
143 | } | 265 | } |
144 | |||
145 | if (bio_data_dir(buf->bio) == WRITE) { | 266 | if (bio_data_dir(buf->bio) == WRITE) { |
146 | skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr), | 267 | skb_fill_page_desc(skb, 0, bv->bv_page, buf->bv_off, bcnt); |
147 | offset_in_page(f->bufaddr), bcnt); | ||
148 | ah->aflags |= AOEAFL_WRITE; | 268 | ah->aflags |= AOEAFL_WRITE; |
149 | skb->len += bcnt; | 269 | skb->len += bcnt; |
150 | skb->data_len = bcnt; | 270 | skb->data_len = bcnt; |
271 | t->wpkts++; | ||
151 | } else { | 272 | } else { |
273 | t->rpkts++; | ||
152 | writebit = 0; | 274 | writebit = 0; |
153 | } | 275 | } |
154 | 276 | ||
@@ -156,29 +278,29 @@ aoecmd_ata_rw(struct aoedev *d, struct frame *f) | |||
156 | 278 | ||
157 | /* mark all tracking fields and load out */ | 279 | /* mark all tracking fields and load out */ |
158 | buf->nframesout += 1; | 280 | buf->nframesout += 1; |
159 | buf->bufaddr += bcnt; | 281 | buf->bv_off += bcnt; |
160 | buf->bv_resid -= bcnt; | 282 | buf->bv_resid -= bcnt; |
161 | /* printk(KERN_DEBUG "aoe: bv_resid=%ld\n", buf->bv_resid); */ | ||
162 | buf->resid -= bcnt; | 283 | buf->resid -= bcnt; |
163 | buf->sector += bcnt >> 9; | 284 | buf->sector += bcnt >> 9; |
164 | if (buf->resid == 0) { | 285 | if (buf->resid == 0) { |
165 | d->inprocess = NULL; | 286 | d->inprocess = NULL; |
166 | } else if (buf->bv_resid == 0) { | 287 | } else if (buf->bv_resid == 0) { |
167 | buf->bv++; | 288 | buf->bv = ++bv; |
168 | WARN_ON(buf->bv->bv_len == 0); | 289 | buf->bv_resid = bv->bv_len; |
169 | buf->bv_resid = buf->bv->bv_len; | 290 | WARN_ON(buf->bv_resid == 0); |
170 | buf->bufaddr = page_address(buf->bv->bv_page) + buf->bv->bv_offset; | 291 | buf->bv_off = bv->bv_offset; |
171 | } | 292 | } |
172 | 293 | ||
173 | skb->dev = d->ifp; | 294 | skb->dev = t->ifp->nd; |
174 | skb = skb_clone(skb, GFP_ATOMIC); | 295 | skb = skb_clone(skb, GFP_ATOMIC); |
175 | if (skb == NULL) | 296 | if (skb) { |
176 | return; | 297 | if (d->sendq_hd) |
177 | if (d->sendq_hd) | 298 | d->sendq_tl->next = skb; |
178 | d->sendq_tl->next = skb; | 299 | else |
179 | else | 300 | d->sendq_hd = skb; |
180 | d->sendq_hd = skb; | 301 | d->sendq_tl = skb; |
181 | d->sendq_tl = skb; | 302 | } |
303 | return 1; | ||
182 | } | 304 | } |
183 | 305 | ||
184 | /* some callers cannot sleep, and they can call this function, | 306 | /* some callers cannot sleep, and they can call this function, |
@@ -232,62 +354,8 @@ cont: | |||
232 | return sl; | 354 | return sl; |
233 | } | 355 | } |
234 | 356 | ||
235 | static struct frame * | ||
236 | freeframe(struct aoedev *d) | ||
237 | { | ||
238 | struct frame *f, *e; | ||
239 | int n = 0; | ||
240 | |||
241 | f = d->frames; | ||
242 | e = f + d->nframes; | ||
243 | for (; f<e; f++) { | ||
244 | if (f->tag != FREETAG) | ||
245 | continue; | ||
246 | if (atomic_read(&skb_shinfo(f->skb)->dataref) == 1) { | ||
247 | skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0; | ||
248 | skb_trim(f->skb, 0); | ||
249 | return f; | ||
250 | } | ||
251 | n++; | ||
252 | } | ||
253 | if (n == d->nframes) /* wait for network layer */ | ||
254 | d->flags |= DEVFL_KICKME; | ||
255 | |||
256 | return NULL; | ||
257 | } | ||
258 | |||
259 | /* enters with d->lock held */ | ||
260 | void | ||
261 | aoecmd_work(struct aoedev *d) | ||
262 | { | ||
263 | struct frame *f; | ||
264 | struct buf *buf; | ||
265 | |||
266 | if (d->flags & DEVFL_PAUSE) { | ||
267 | if (!aoedev_isbusy(d)) | ||
268 | d->sendq_hd = aoecmd_cfg_pkts(d->aoemajor, | ||
269 | d->aoeminor, &d->sendq_tl); | ||
270 | return; | ||
271 | } | ||
272 | |||
273 | loop: | ||
274 | f = freeframe(d); | ||
275 | if (f == NULL) | ||
276 | return; | ||
277 | if (d->inprocess == NULL) { | ||
278 | if (list_empty(&d->bufq)) | ||
279 | return; | ||
280 | buf = container_of(d->bufq.next, struct buf, bufs); | ||
281 | list_del(d->bufq.next); | ||
282 | /*printk(KERN_DEBUG "aoe: bi_size=%ld\n", buf->bio->bi_size); */ | ||
283 | d->inprocess = buf; | ||
284 | } | ||
285 | aoecmd_ata_rw(d, f); | ||
286 | goto loop; | ||
287 | } | ||
288 | |||
289 | static void | 357 | static void |
290 | rexmit(struct aoedev *d, struct frame *f) | 358 | resend(struct aoedev *d, struct aoetgt *t, struct frame *f) |
291 | { | 359 | { |
292 | struct sk_buff *skb; | 360 | struct sk_buff *skb; |
293 | struct aoe_hdr *h; | 361 | struct aoe_hdr *h; |
@@ -295,41 +363,46 @@ rexmit(struct aoedev *d, struct frame *f) | |||
295 | char buf[128]; | 363 | char buf[128]; |
296 | u32 n; | 364 | u32 n; |
297 | 365 | ||
298 | n = newtag(d); | 366 | ifrotate(t); |
367 | n = newtag(t); | ||
368 | skb = f->skb; | ||
369 | h = (struct aoe_hdr *) skb_mac_header(skb); | ||
370 | ah = (struct aoe_atahdr *) (h+1); | ||
299 | 371 | ||
300 | snprintf(buf, sizeof buf, | 372 | snprintf(buf, sizeof buf, |
301 | "%15s e%ld.%ld oldtag=%08x@%08lx newtag=%08x\n", | 373 | "%15s e%ld.%d oldtag=%08x@%08lx newtag=%08x " |
302 | "retransmit", | 374 | "s=%012llx d=%012llx nout=%d\n", |
303 | d->aoemajor, d->aoeminor, f->tag, jiffies, n); | 375 | "retransmit", d->aoemajor, d->aoeminor, f->tag, jiffies, n, |
376 | mac_addr(h->src), | ||
377 | mac_addr(h->dst), t->nout); | ||
304 | aoechr_error(buf); | 378 | aoechr_error(buf); |
305 | 379 | ||
306 | skb = f->skb; | ||
307 | h = (struct aoe_hdr *) skb_mac_header(skb); | ||
308 | ah = (struct aoe_atahdr *) (h+1); | ||
309 | f->tag = n; | 380 | f->tag = n; |
310 | h->tag = cpu_to_be32(n); | 381 | h->tag = cpu_to_be32(n); |
311 | memcpy(h->dst, d->addr, sizeof h->dst); | 382 | memcpy(h->dst, t->addr, sizeof h->dst); |
312 | memcpy(h->src, d->ifp->dev_addr, sizeof h->src); | 383 | memcpy(h->src, t->ifp->nd->dev_addr, sizeof h->src); |
313 | 384 | ||
314 | n = DEFAULTBCNT / 512; | 385 | switch (ah->cmdstat) { |
315 | if (ah->scnt > n) { | 386 | default: |
316 | ah->scnt = n; | 387 | break; |
388 | case WIN_READ: | ||
389 | case WIN_READ_EXT: | ||
390 | case WIN_WRITE: | ||
391 | case WIN_WRITE_EXT: | ||
392 | put_lba(ah, f->lba); | ||
393 | |||
394 | n = f->bcnt; | ||
395 | if (n > DEFAULTBCNT) | ||
396 | n = DEFAULTBCNT; | ||
397 | ah->scnt = n >> 9; | ||
317 | if (ah->aflags & AOEAFL_WRITE) { | 398 | if (ah->aflags & AOEAFL_WRITE) { |
318 | skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr), | 399 | skb_fill_page_desc(skb, 0, virt_to_page(f->bufaddr), |
319 | offset_in_page(f->bufaddr), DEFAULTBCNT); | 400 | offset_in_page(f->bufaddr), n); |
320 | skb->len = sizeof *h + sizeof *ah + DEFAULTBCNT; | 401 | skb->len = sizeof *h + sizeof *ah + n; |
321 | skb->data_len = DEFAULTBCNT; | 402 | skb->data_len = n; |
322 | } | ||
323 | if (++d->lostjumbo > (d->nframes << 1)) | ||
324 | if (d->maxbcnt != DEFAULTBCNT) { | ||
325 | printk(KERN_INFO "aoe: e%ld.%ld: too many lost jumbo on %s - using 1KB frames.\n", | ||
326 | d->aoemajor, d->aoeminor, d->ifp->name); | ||
327 | d->maxbcnt = DEFAULTBCNT; | ||
328 | d->flags |= DEVFL_MAXBCNT; | ||
329 | } | 403 | } |
330 | } | 404 | } |
331 | 405 | skb->dev = t->ifp->nd; | |
332 | skb->dev = d->ifp; | ||
333 | skb = skb_clone(skb, GFP_ATOMIC); | 406 | skb = skb_clone(skb, GFP_ATOMIC); |
334 | if (skb == NULL) | 407 | if (skb == NULL) |
335 | return; | 408 | return; |
@@ -352,10 +425,92 @@ tsince(int tag) | |||
352 | return n; | 425 | return n; |
353 | } | 426 | } |
354 | 427 | ||
428 | static struct aoeif * | ||
429 | getif(struct aoetgt *t, struct net_device *nd) | ||
430 | { | ||
431 | struct aoeif *p, *e; | ||
432 | |||
433 | p = t->ifs; | ||
434 | e = p + NAOEIFS; | ||
435 | for (; p < e; p++) | ||
436 | if (p->nd == nd) | ||
437 | return p; | ||
438 | return NULL; | ||
439 | } | ||
440 | |||
441 | static struct aoeif * | ||
442 | addif(struct aoetgt *t, struct net_device *nd) | ||
443 | { | ||
444 | struct aoeif *p; | ||
445 | |||
446 | p = getif(t, NULL); | ||
447 | if (!p) | ||
448 | return NULL; | ||
449 | p->nd = nd; | ||
450 | p->maxbcnt = DEFAULTBCNT; | ||
451 | p->lost = 0; | ||
452 | p->lostjumbo = 0; | ||
453 | return p; | ||
454 | } | ||
455 | |||
456 | static void | ||
457 | ejectif(struct aoetgt *t, struct aoeif *ifp) | ||
458 | { | ||
459 | struct aoeif *e; | ||
460 | ulong n; | ||
461 | |||
462 | e = t->ifs + NAOEIFS - 1; | ||
463 | n = (e - ifp) * sizeof *ifp; | ||
464 | memmove(ifp, ifp+1, n); | ||
465 | e->nd = NULL; | ||
466 | } | ||
467 | |||
468 | static int | ||
469 | sthtith(struct aoedev *d) | ||
470 | { | ||
471 | struct frame *f, *e, *nf; | ||
472 | struct sk_buff *skb; | ||
473 | struct aoetgt *ht = *d->htgt; | ||
474 | |||
475 | f = ht->frames; | ||
476 | e = f + ht->nframes; | ||
477 | for (; f < e; f++) { | ||
478 | if (f->tag == FREETAG) | ||
479 | continue; | ||
480 | nf = freeframe(d); | ||
481 | if (!nf) | ||
482 | return 0; | ||
483 | skb = nf->skb; | ||
484 | *nf = *f; | ||
485 | f->skb = skb; | ||
486 | f->tag = FREETAG; | ||
487 | nf->waited = 0; | ||
488 | ht->nout--; | ||
489 | (*d->tgt)->nout++; | ||
490 | resend(d, *d->tgt, nf); | ||
491 | } | ||
492 | /* he's clean, he's useless. take away his interfaces */ | ||
493 | memset(ht->ifs, 0, sizeof ht->ifs); | ||
494 | d->htgt = NULL; | ||
495 | return 1; | ||
496 | } | ||
497 | |||
498 | static inline unsigned char | ||
499 | ata_scnt(unsigned char *packet) { | ||
500 | struct aoe_hdr *h; | ||
501 | struct aoe_atahdr *ah; | ||
502 | |||
503 | h = (struct aoe_hdr *) packet; | ||
504 | ah = (struct aoe_atahdr *) (h+1); | ||
505 | return ah->scnt; | ||
506 | } | ||
507 | |||
355 | static void | 508 | static void |
356 | rexmit_timer(ulong vp) | 509 | rexmit_timer(ulong vp) |
357 | { | 510 | { |
358 | struct aoedev *d; | 511 | struct aoedev *d; |
512 | struct aoetgt *t, **tt, **te; | ||
513 | struct aoeif *ifp; | ||
359 | struct frame *f, *e; | 514 | struct frame *f, *e; |
360 | struct sk_buff *sl; | 515 | struct sk_buff *sl; |
361 | register long timeout; | 516 | register long timeout; |
@@ -374,31 +529,79 @@ rexmit_timer(ulong vp) | |||
374 | spin_unlock_irqrestore(&d->lock, flags); | 529 | spin_unlock_irqrestore(&d->lock, flags); |
375 | return; | 530 | return; |
376 | } | 531 | } |
377 | f = d->frames; | 532 | tt = d->targets; |
378 | e = f + d->nframes; | 533 | te = tt + NTARGETS; |
379 | for (; f<e; f++) { | 534 | for (; tt < te && *tt; tt++) { |
380 | if (f->tag != FREETAG && tsince(f->tag) >= timeout) { | 535 | t = *tt; |
536 | f = t->frames; | ||
537 | e = f + t->nframes; | ||
538 | for (; f < e; f++) { | ||
539 | if (f->tag == FREETAG | ||
540 | || tsince(f->tag) < timeout) | ||
541 | continue; | ||
381 | n = f->waited += timeout; | 542 | n = f->waited += timeout; |
382 | n /= HZ; | 543 | n /= HZ; |
383 | if (n > aoe_deadsecs) { /* waited too long for response */ | 544 | if (n > aoe_deadsecs) { |
545 | /* waited too long. device failure. */ | ||
384 | aoedev_downdev(d); | 546 | aoedev_downdev(d); |
385 | break; | 547 | break; |
386 | } | 548 | } |
387 | rexmit(d, f); | 549 | |
550 | if (n > HELPWAIT /* see if another target can help */ | ||
551 | && (tt != d->targets || d->targets[1])) | ||
552 | d->htgt = tt; | ||
553 | |||
554 | if (t->nout == t->maxout) { | ||
555 | if (t->maxout > 1) | ||
556 | t->maxout--; | ||
557 | t->lastwadj = jiffies; | ||
558 | } | ||
559 | |||
560 | ifp = getif(t, f->skb->dev); | ||
561 | if (ifp && ++ifp->lost > (t->nframes << 1) | ||
562 | && (ifp != t->ifs || t->ifs[1].nd)) { | ||
563 | ejectif(t, ifp); | ||
564 | ifp = NULL; | ||
565 | } | ||
566 | |||
567 | if (ata_scnt(skb_mac_header(f->skb)) > DEFAULTBCNT / 512 | ||
568 | && ifp && ++ifp->lostjumbo > (t->nframes << 1) | ||
569 | && ifp->maxbcnt != DEFAULTBCNT) { | ||
570 | printk(KERN_INFO | ||
571 | "aoe: e%ld.%d: " | ||
572 | "too many lost jumbo on " | ||
573 | "%s:%012llx - " | ||
574 | "falling back to %d frames.\n", | ||
575 | d->aoemajor, d->aoeminor, | ||
576 | ifp->nd->name, mac_addr(t->addr), | ||
577 | DEFAULTBCNT); | ||
578 | ifp->maxbcnt = 0; | ||
579 | } | ||
580 | resend(d, t, f); | ||
581 | } | ||
582 | |||
583 | /* window check */ | ||
584 | if (t->nout == t->maxout | ||
585 | && t->maxout < t->nframes | ||
586 | && (jiffies - t->lastwadj)/HZ > 10) { | ||
587 | t->maxout++; | ||
588 | t->lastwadj = jiffies; | ||
388 | } | 589 | } |
389 | } | 590 | } |
390 | if (d->flags & DEVFL_KICKME) { | 591 | |
592 | if (d->sendq_hd) { | ||
593 | n = d->rttavg <<= 1; | ||
594 | if (n > MAXTIMER) | ||
595 | d->rttavg = MAXTIMER; | ||
596 | } | ||
597 | |||
598 | if (d->flags & DEVFL_KICKME || d->htgt) { | ||
391 | d->flags &= ~DEVFL_KICKME; | 599 | d->flags &= ~DEVFL_KICKME; |
392 | aoecmd_work(d); | 600 | aoecmd_work(d); |
393 | } | 601 | } |
394 | 602 | ||
395 | sl = d->sendq_hd; | 603 | sl = d->sendq_hd; |
396 | d->sendq_hd = d->sendq_tl = NULL; | 604 | d->sendq_hd = d->sendq_tl = NULL; |
397 | if (sl) { | ||
398 | n = d->rttavg <<= 1; | ||
399 | if (n > MAXTIMER) | ||
400 | d->rttavg = MAXTIMER; | ||
401 | } | ||
402 | 605 | ||
403 | d->timer.expires = jiffies + TIMERTICK; | 606 | d->timer.expires = jiffies + TIMERTICK; |
404 | add_timer(&d->timer); | 607 | add_timer(&d->timer); |
@@ -408,6 +611,25 @@ rexmit_timer(ulong vp) | |||
408 | aoenet_xmit(sl); | 611 | aoenet_xmit(sl); |
409 | } | 612 | } |
410 | 613 | ||
614 | /* enters with d->lock held */ | ||
615 | void | ||
616 | aoecmd_work(struct aoedev *d) | ||
617 | { | ||
618 | struct buf *buf; | ||
619 | loop: | ||
620 | if (d->htgt && !sthtith(d)) | ||
621 | return; | ||
622 | if (d->inprocess == NULL) { | ||
623 | if (list_empty(&d->bufq)) | ||
624 | return; | ||
625 | buf = container_of(d->bufq.next, struct buf, bufs); | ||
626 | list_del(d->bufq.next); | ||
627 | d->inprocess = buf; | ||
628 | } | ||
629 | if (aoecmd_ata_rw(d)) | ||
630 | goto loop; | ||
631 | } | ||
632 | |||
411 | /* this function performs work that has been deferred until sleeping is OK | 633 | /* this function performs work that has been deferred until sleeping is OK |
412 | */ | 634 | */ |
413 | void | 635 | void |
@@ -440,7 +662,7 @@ aoecmd_sleepwork(struct work_struct *work) | |||
440 | } | 662 | } |
441 | 663 | ||
442 | static void | 664 | static void |
443 | ataid_complete(struct aoedev *d, unsigned char *id) | 665 | ataid_complete(struct aoedev *d, struct aoetgt *t, unsigned char *id) |
444 | { | 666 | { |
445 | u64 ssize; | 667 | u64 ssize; |
446 | u16 n; | 668 | u16 n; |
@@ -475,24 +697,20 @@ ataid_complete(struct aoedev *d, unsigned char *id) | |||
475 | } | 697 | } |
476 | 698 | ||
477 | if (d->ssize != ssize) | 699 | if (d->ssize != ssize) |
478 | printk(KERN_INFO "aoe: %012llx e%lu.%lu v%04x has %llu sectors\n", | 700 | printk(KERN_INFO |
479 | (unsigned long long)mac_addr(d->addr), | 701 | "aoe: %012llx e%ld.%d v%04x has %llu sectors\n", |
702 | mac_addr(t->addr), | ||
480 | d->aoemajor, d->aoeminor, | 703 | d->aoemajor, d->aoeminor, |
481 | d->fw_ver, (long long)ssize); | 704 | d->fw_ver, (long long)ssize); |
482 | d->ssize = ssize; | 705 | d->ssize = ssize; |
483 | d->geo.start = 0; | 706 | d->geo.start = 0; |
707 | if (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE)) | ||
708 | return; | ||
484 | if (d->gd != NULL) { | 709 | if (d->gd != NULL) { |
485 | d->gd->capacity = ssize; | 710 | d->gd->capacity = ssize; |
486 | d->flags |= DEVFL_NEWSIZE; | 711 | d->flags |= DEVFL_NEWSIZE; |
487 | } else { | 712 | } else |
488 | if (d->flags & DEVFL_GDALLOC) { | ||
489 | printk(KERN_ERR "aoe: can't schedule work for e%lu.%lu, %s\n", | ||
490 | d->aoemajor, d->aoeminor, | ||
491 | "it's already on! This shouldn't happen.\n"); | ||
492 | return; | ||
493 | } | ||
494 | d->flags |= DEVFL_GDALLOC; | 713 | d->flags |= DEVFL_GDALLOC; |
495 | } | ||
496 | schedule_work(&d->work); | 714 | schedule_work(&d->work); |
497 | } | 715 | } |
498 | 716 | ||
@@ -519,6 +737,31 @@ calc_rttavg(struct aoedev *d, int rtt) | |||
519 | d->rttavg += n >> 2; | 737 | d->rttavg += n >> 2; |
520 | } | 738 | } |
521 | 739 | ||
740 | static struct aoetgt * | ||
741 | gettgt(struct aoedev *d, char *addr) | ||
742 | { | ||
743 | struct aoetgt **t, **e; | ||
744 | |||
745 | t = d->targets; | ||
746 | e = t + NTARGETS; | ||
747 | for (; t < e && *t; t++) | ||
748 | if (memcmp((*t)->addr, addr, sizeof((*t)->addr)) == 0) | ||
749 | return *t; | ||
750 | return NULL; | ||
751 | } | ||
752 | |||
753 | static inline void | ||
754 | diskstats(struct gendisk *disk, struct bio *bio, ulong duration, sector_t sector) | ||
755 | { | ||
756 | unsigned long n_sect = bio->bi_size >> 9; | ||
757 | const int rw = bio_data_dir(bio); | ||
758 | |||
759 | all_stat_inc(disk, ios[rw], sector); | ||
760 | all_stat_add(disk, ticks[rw], duration, sector); | ||
761 | all_stat_add(disk, sectors[rw], n_sect, sector); | ||
762 | all_stat_add(disk, io_ticks, duration, sector); | ||
763 | } | ||
764 | |||
522 | void | 765 | void |
523 | aoecmd_ata_rsp(struct sk_buff *skb) | 766 | aoecmd_ata_rsp(struct sk_buff *skb) |
524 | { | 767 | { |
@@ -528,6 +771,8 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
528 | struct frame *f; | 771 | struct frame *f; |
529 | struct buf *buf; | 772 | struct buf *buf; |
530 | struct sk_buff *sl; | 773 | struct sk_buff *sl; |
774 | struct aoetgt *t; | ||
775 | struct aoeif *ifp; | ||
531 | register long n; | 776 | register long n; |
532 | ulong flags; | 777 | ulong flags; |
533 | char ebuf[128]; | 778 | char ebuf[128]; |
@@ -547,7 +792,14 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
547 | spin_lock_irqsave(&d->lock, flags); | 792 | spin_lock_irqsave(&d->lock, flags); |
548 | 793 | ||
549 | n = be32_to_cpu(get_unaligned(&hin->tag)); | 794 | n = be32_to_cpu(get_unaligned(&hin->tag)); |
550 | f = getframe(d, n); | 795 | t = gettgt(d, hin->src); |
796 | if (t == NULL) { | ||
797 | printk(KERN_INFO "aoe: can't find target e%ld.%d:%012llx\n", | ||
798 | d->aoemajor, d->aoeminor, mac_addr(hin->src)); | ||
799 | spin_unlock_irqrestore(&d->lock, flags); | ||
800 | return; | ||
801 | } | ||
802 | f = getframe(t, n); | ||
551 | if (f == NULL) { | 803 | if (f == NULL) { |
552 | calc_rttavg(d, -tsince(n)); | 804 | calc_rttavg(d, -tsince(n)); |
553 | spin_unlock_irqrestore(&d->lock, flags); | 805 | spin_unlock_irqrestore(&d->lock, flags); |
@@ -569,24 +821,24 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
569 | ahout = (struct aoe_atahdr *) (hout+1); | 821 | ahout = (struct aoe_atahdr *) (hout+1); |
570 | buf = f->buf; | 822 | buf = f->buf; |
571 | 823 | ||
572 | if (ahout->cmdstat == WIN_IDENTIFY) | ||
573 | d->flags &= ~DEVFL_PAUSE; | ||
574 | if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */ | 824 | if (ahin->cmdstat & 0xa9) { /* these bits cleared on success */ |
575 | printk(KERN_ERR | 825 | printk(KERN_ERR |
576 | "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%ld\n", | 826 | "aoe: ata error cmd=%2.2Xh stat=%2.2Xh from e%ld.%d\n", |
577 | ahout->cmdstat, ahin->cmdstat, | 827 | ahout->cmdstat, ahin->cmdstat, |
578 | d->aoemajor, d->aoeminor); | 828 | d->aoemajor, d->aoeminor); |
579 | if (buf) | 829 | if (buf) |
580 | buf->flags |= BUFFL_FAIL; | 830 | buf->flags |= BUFFL_FAIL; |
581 | } else { | 831 | } else { |
832 | if (d->htgt && t == *d->htgt) /* I'll help myself, thank you. */ | ||
833 | d->htgt = NULL; | ||
582 | n = ahout->scnt << 9; | 834 | n = ahout->scnt << 9; |
583 | switch (ahout->cmdstat) { | 835 | switch (ahout->cmdstat) { |
584 | case WIN_READ: | 836 | case WIN_READ: |
585 | case WIN_READ_EXT: | 837 | case WIN_READ_EXT: |
586 | if (skb->len - sizeof *hin - sizeof *ahin < n) { | 838 | if (skb->len - sizeof *hin - sizeof *ahin < n) { |
587 | printk(KERN_ERR | 839 | printk(KERN_ERR |
588 | "aoe: runt data size in read. skb->len=%d\n", | 840 | "aoe: %s. skb->len=%d need=%ld\n", |
589 | skb->len); | 841 | "runt data size in read", skb->len, n); |
590 | /* fail frame f? just returning will rexmit. */ | 842 | /* fail frame f? just returning will rexmit. */ |
591 | spin_unlock_irqrestore(&d->lock, flags); | 843 | spin_unlock_irqrestore(&d->lock, flags); |
592 | return; | 844 | return; |
@@ -594,32 +846,18 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
594 | memcpy(f->bufaddr, ahin+1, n); | 846 | memcpy(f->bufaddr, ahin+1, n); |
595 | case WIN_WRITE: | 847 | case WIN_WRITE: |
596 | case WIN_WRITE_EXT: | 848 | case WIN_WRITE_EXT: |
849 | ifp = getif(t, skb->dev); | ||
850 | if (ifp) { | ||
851 | ifp->lost = 0; | ||
852 | if (n > DEFAULTBCNT) | ||
853 | ifp->lostjumbo = 0; | ||
854 | } | ||
597 | if (f->bcnt -= n) { | 855 | if (f->bcnt -= n) { |
598 | skb = f->skb; | 856 | f->lba += n >> 9; |
599 | f->bufaddr += n; | 857 | f->bufaddr += n; |
600 | put_lba(ahout, f->lba += ahout->scnt); | 858 | resend(d, t, f); |
601 | n = f->bcnt; | 859 | goto xmit; |
602 | if (n > DEFAULTBCNT) | ||
603 | n = DEFAULTBCNT; | ||
604 | ahout->scnt = n >> 9; | ||
605 | if (ahout->aflags & AOEAFL_WRITE) { | ||
606 | skb_fill_page_desc(skb, 0, | ||
607 | virt_to_page(f->bufaddr), | ||
608 | offset_in_page(f->bufaddr), n); | ||
609 | skb->len = sizeof *hout + sizeof *ahout + n; | ||
610 | skb->data_len = n; | ||
611 | } | ||
612 | f->tag = newtag(d); | ||
613 | hout->tag = cpu_to_be32(f->tag); | ||
614 | skb->dev = d->ifp; | ||
615 | skb = skb_clone(skb, GFP_ATOMIC); | ||
616 | spin_unlock_irqrestore(&d->lock, flags); | ||
617 | if (skb) | ||
618 | aoenet_xmit(skb); | ||
619 | return; | ||
620 | } | 860 | } |
621 | if (n > DEFAULTBCNT) | ||
622 | d->lostjumbo = 0; | ||
623 | break; | 861 | break; |
624 | case WIN_IDENTIFY: | 862 | case WIN_IDENTIFY: |
625 | if (skb->len - sizeof *hin - sizeof *ahin < 512) { | 863 | if (skb->len - sizeof *hin - sizeof *ahin < 512) { |
@@ -629,7 +867,7 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
629 | spin_unlock_irqrestore(&d->lock, flags); | 867 | spin_unlock_irqrestore(&d->lock, flags); |
630 | return; | 868 | return; |
631 | } | 869 | } |
632 | ataid_complete(d, (char *) (ahin+1)); | 870 | ataid_complete(d, t, (char *) (ahin+1)); |
633 | break; | 871 | break; |
634 | default: | 872 | default: |
635 | printk(KERN_INFO | 873 | printk(KERN_INFO |
@@ -640,28 +878,19 @@ aoecmd_ata_rsp(struct sk_buff *skb) | |||
640 | } | 878 | } |
641 | } | 879 | } |
642 | 880 | ||
643 | if (buf) { | 881 | if (buf && --buf->nframesout == 0 && buf->resid == 0) { |
644 | buf->nframesout -= 1; | 882 | diskstats(d->gd, buf->bio, jiffies - buf->stime, buf->sector); |
645 | if (buf->nframesout == 0 && buf->resid == 0) { | 883 | n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; |
646 | unsigned long duration = jiffies - buf->start_time; | 884 | bio_endio(buf->bio, n); |
647 | unsigned long n_sect = buf->bio->bi_size >> 9; | 885 | mempool_free(buf, d->bufpool); |
648 | struct gendisk *disk = d->gd; | ||
649 | const int rw = bio_data_dir(buf->bio); | ||
650 | |||
651 | disk_stat_inc(disk, ios[rw]); | ||
652 | disk_stat_add(disk, ticks[rw], duration); | ||
653 | disk_stat_add(disk, sectors[rw], n_sect); | ||
654 | disk_stat_add(disk, io_ticks, duration); | ||
655 | n = (buf->flags & BUFFL_FAIL) ? -EIO : 0; | ||
656 | bio_endio(buf->bio, n); | ||
657 | mempool_free(buf, d->bufpool); | ||
658 | } | ||
659 | } | 886 | } |
660 | 887 | ||
661 | f->buf = NULL; | 888 | f->buf = NULL; |
662 | f->tag = FREETAG; | 889 | f->tag = FREETAG; |
890 | t->nout--; | ||
663 | 891 | ||
664 | aoecmd_work(d); | 892 | aoecmd_work(d); |
893 | xmit: | ||
665 | sl = d->sendq_hd; | 894 | sl = d->sendq_hd; |
666 | d->sendq_hd = d->sendq_tl = NULL; | 895 | d->sendq_hd = d->sendq_tl = NULL; |
667 | 896 | ||
@@ -679,23 +908,20 @@ aoecmd_cfg(ushort aoemajor, unsigned char aoeminor) | |||
679 | aoenet_xmit(sl); | 908 | aoenet_xmit(sl); |
680 | } | 909 | } |
681 | 910 | ||
682 | /* | 911 | struct sk_buff * |
683 | * Since we only call this in one place (and it only prepares one frame) | ||
684 | * we just return the skb. Usually we'd chain it up to the aoedev sendq. | ||
685 | */ | ||
686 | static struct sk_buff * | ||
687 | aoecmd_ata_id(struct aoedev *d) | 912 | aoecmd_ata_id(struct aoedev *d) |
688 | { | 913 | { |
689 | struct aoe_hdr *h; | 914 | struct aoe_hdr *h; |
690 | struct aoe_atahdr *ah; | 915 | struct aoe_atahdr *ah; |
691 | struct frame *f; | 916 | struct frame *f; |
692 | struct sk_buff *skb; | 917 | struct sk_buff *skb; |
918 | struct aoetgt *t; | ||
693 | 919 | ||
694 | f = freeframe(d); | 920 | f = freeframe(d); |
695 | if (f == NULL) { | 921 | if (f == NULL) |
696 | printk(KERN_ERR "aoe: can't get a frame. This shouldn't happen.\n"); | ||
697 | return NULL; | 922 | return NULL; |
698 | } | 923 | |
924 | t = *d->tgt; | ||
699 | 925 | ||
700 | /* initialize the headers & frame */ | 926 | /* initialize the headers & frame */ |
701 | skb = f->skb; | 927 | skb = f->skb; |
@@ -703,7 +929,8 @@ aoecmd_ata_id(struct aoedev *d) | |||
703 | ah = (struct aoe_atahdr *) (h+1); | 929 | ah = (struct aoe_atahdr *) (h+1); |
704 | skb_put(skb, sizeof *h + sizeof *ah); | 930 | skb_put(skb, sizeof *h + sizeof *ah); |
705 | memset(h, 0, skb->len); | 931 | memset(h, 0, skb->len); |
706 | f->tag = aoehdr_atainit(d, h); | 932 | f->tag = aoehdr_atainit(d, t, h); |
933 | t->nout++; | ||
707 | f->waited = 0; | 934 | f->waited = 0; |
708 | 935 | ||
709 | /* set up ata header */ | 936 | /* set up ata header */ |
@@ -711,7 +938,7 @@ aoecmd_ata_id(struct aoedev *d) | |||
711 | ah->cmdstat = WIN_IDENTIFY; | 938 | ah->cmdstat = WIN_IDENTIFY; |
712 | ah->lba3 = 0xa0; | 939 | ah->lba3 = 0xa0; |
713 | 940 | ||
714 | skb->dev = d->ifp; | 941 | skb->dev = t->ifp->nd; |
715 | 942 | ||
716 | d->rttavg = MAXTIMER; | 943 | d->rttavg = MAXTIMER; |
717 | d->timer.function = rexmit_timer; | 944 | d->timer.function = rexmit_timer; |
@@ -719,15 +946,52 @@ aoecmd_ata_id(struct aoedev *d) | |||
719 | return skb_clone(skb, GFP_ATOMIC); | 946 | return skb_clone(skb, GFP_ATOMIC); |
720 | } | 947 | } |
721 | 948 | ||
949 | static struct aoetgt * | ||
950 | addtgt(struct aoedev *d, char *addr, ulong nframes) | ||
951 | { | ||
952 | struct aoetgt *t, **tt, **te; | ||
953 | struct frame *f, *e; | ||
954 | |||
955 | tt = d->targets; | ||
956 | te = tt + NTARGETS; | ||
957 | for (; tt < te && *tt; tt++) | ||
958 | ; | ||
959 | |||
960 | if (tt == te) { | ||
961 | printk(KERN_INFO | ||
962 | "aoe: device addtgt failure; too many targets\n"); | ||
963 | return NULL; | ||
964 | } | ||
965 | t = kcalloc(1, sizeof *t, GFP_ATOMIC); | ||
966 | f = kcalloc(nframes, sizeof *f, GFP_ATOMIC); | ||
967 | if (!t || !f) { | ||
968 | kfree(f); | ||
969 | kfree(t); | ||
970 | printk(KERN_INFO "aoe: cannot allocate memory to add target\n"); | ||
971 | return NULL; | ||
972 | } | ||
973 | |||
974 | t->nframes = nframes; | ||
975 | t->frames = f; | ||
976 | e = f + nframes; | ||
977 | for (; f < e; f++) | ||
978 | f->tag = FREETAG; | ||
979 | memcpy(t->addr, addr, sizeof t->addr); | ||
980 | t->ifp = t->ifs; | ||
981 | t->maxout = t->nframes; | ||
982 | return *tt = t; | ||
983 | } | ||
984 | |||
722 | void | 985 | void |
723 | aoecmd_cfg_rsp(struct sk_buff *skb) | 986 | aoecmd_cfg_rsp(struct sk_buff *skb) |
724 | { | 987 | { |
725 | struct aoedev *d; | 988 | struct aoedev *d; |
726 | struct aoe_hdr *h; | 989 | struct aoe_hdr *h; |
727 | struct aoe_cfghdr *ch; | 990 | struct aoe_cfghdr *ch; |
991 | struct aoetgt *t; | ||
992 | struct aoeif *ifp; | ||
728 | ulong flags, sysminor, aoemajor; | 993 | ulong flags, sysminor, aoemajor; |
729 | struct sk_buff *sl; | 994 | struct sk_buff *sl; |
730 | enum { MAXFRAMES = 16 }; | ||
731 | u16 n; | 995 | u16 n; |
732 | 996 | ||
733 | h = (struct aoe_hdr *) skb_mac_header(skb); | 997 | h = (struct aoe_hdr *) skb_mac_header(skb); |
@@ -752,10 +1016,10 @@ aoecmd_cfg_rsp(struct sk_buff *skb) | |||
752 | } | 1016 | } |
753 | 1017 | ||
754 | n = be16_to_cpu(ch->bufcnt); | 1018 | n = be16_to_cpu(ch->bufcnt); |
755 | if (n > MAXFRAMES) /* keep it reasonable */ | 1019 | if (n > aoe_maxout) /* keep it reasonable */ |
756 | n = MAXFRAMES; | 1020 | n = aoe_maxout; |
757 | 1021 | ||
758 | d = aoedev_by_sysminor_m(sysminor, n); | 1022 | d = aoedev_by_sysminor_m(sysminor); |
759 | if (d == NULL) { | 1023 | if (d == NULL) { |
760 | printk(KERN_INFO "aoe: device sysminor_m failure\n"); | 1024 | printk(KERN_INFO "aoe: device sysminor_m failure\n"); |
761 | return; | 1025 | return; |
@@ -763,38 +1027,74 @@ aoecmd_cfg_rsp(struct sk_buff *skb) | |||
763 | 1027 | ||
764 | spin_lock_irqsave(&d->lock, flags); | 1028 | spin_lock_irqsave(&d->lock, flags); |
765 | 1029 | ||
766 | /* permit device to migrate mac and network interface */ | 1030 | t = gettgt(d, h->src); |
767 | d->ifp = skb->dev; | 1031 | if (!t) { |
768 | memcpy(d->addr, h->src, sizeof d->addr); | 1032 | t = addtgt(d, h->src, n); |
769 | if (!(d->flags & DEVFL_MAXBCNT)) { | 1033 | if (!t) { |
770 | n = d->ifp->mtu; | 1034 | spin_unlock_irqrestore(&d->lock, flags); |
1035 | return; | ||
1036 | } | ||
1037 | } | ||
1038 | ifp = getif(t, skb->dev); | ||
1039 | if (!ifp) { | ||
1040 | ifp = addif(t, skb->dev); | ||
1041 | if (!ifp) { | ||
1042 | printk(KERN_INFO | ||
1043 | "aoe: device addif failure; " | ||
1044 | "too many interfaces?\n"); | ||
1045 | spin_unlock_irqrestore(&d->lock, flags); | ||
1046 | return; | ||
1047 | } | ||
1048 | } | ||
1049 | if (ifp->maxbcnt) { | ||
1050 | n = ifp->nd->mtu; | ||
771 | n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr); | 1051 | n -= sizeof (struct aoe_hdr) + sizeof (struct aoe_atahdr); |
772 | n /= 512; | 1052 | n /= 512; |
773 | if (n > ch->scnt) | 1053 | if (n > ch->scnt) |
774 | n = ch->scnt; | 1054 | n = ch->scnt; |
775 | n = n ? n * 512 : DEFAULTBCNT; | 1055 | n = n ? n * 512 : DEFAULTBCNT; |
776 | if (n != d->maxbcnt) { | 1056 | if (n != ifp->maxbcnt) { |
777 | printk(KERN_INFO | 1057 | printk(KERN_INFO |
778 | "aoe: e%ld.%ld: setting %d byte data frames on %s\n", | 1058 | "aoe: e%ld.%d: setting %d%s%s:%012llx\n", |
779 | d->aoemajor, d->aoeminor, n, d->ifp->name); | 1059 | d->aoemajor, d->aoeminor, n, |
780 | d->maxbcnt = n; | 1060 | " byte data frames on ", ifp->nd->name, |
1061 | mac_addr(t->addr)); | ||
1062 | ifp->maxbcnt = n; | ||
781 | } | 1063 | } |
782 | } | 1064 | } |
783 | 1065 | ||
784 | /* don't change users' perspective */ | 1066 | /* don't change users' perspective */ |
785 | if (d->nopen && !(d->flags & DEVFL_PAUSE)) { | 1067 | if (d->nopen) { |
786 | spin_unlock_irqrestore(&d->lock, flags); | 1068 | spin_unlock_irqrestore(&d->lock, flags); |
787 | return; | 1069 | return; |
788 | } | 1070 | } |
789 | d->flags |= DEVFL_PAUSE; /* force pause */ | ||
790 | d->mintimer = MINTIMER; | ||
791 | d->fw_ver = be16_to_cpu(ch->fwver); | 1071 | d->fw_ver = be16_to_cpu(ch->fwver); |
792 | 1072 | ||
793 | /* check for already outstanding ataid */ | 1073 | sl = aoecmd_ata_id(d); |
794 | sl = aoedev_isbusy(d) == 0 ? aoecmd_ata_id(d) : NULL; | ||
795 | 1074 | ||
796 | spin_unlock_irqrestore(&d->lock, flags); | 1075 | spin_unlock_irqrestore(&d->lock, flags); |
797 | 1076 | ||
798 | aoenet_xmit(sl); | 1077 | aoenet_xmit(sl); |
799 | } | 1078 | } |
800 | 1079 | ||
1080 | void | ||
1081 | aoecmd_cleanslate(struct aoedev *d) | ||
1082 | { | ||
1083 | struct aoetgt **t, **te; | ||
1084 | struct aoeif *p, *e; | ||
1085 | |||
1086 | d->mintimer = MINTIMER; | ||
1087 | |||
1088 | t = d->targets; | ||
1089 | te = t + NTARGETS; | ||
1090 | for (; t < te && *t; t++) { | ||
1091 | (*t)->maxout = (*t)->nframes; | ||
1092 | p = (*t)->ifs; | ||
1093 | e = p + NAOEIFS; | ||
1094 | for (; p < e; p++) { | ||
1095 | p->lostjumbo = 0; | ||
1096 | p->lost = 0; | ||
1097 | p->maxbcnt = DEFAULTBCNT; | ||
1098 | } | ||
1099 | } | ||
1100 | } | ||
diff --git a/drivers/block/aoe/aoedev.c b/drivers/block/aoe/aoedev.c index 51f50710e5fc..f9a1cd9edb77 100644 --- a/drivers/block/aoe/aoedev.c +++ b/drivers/block/aoe/aoedev.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ |
2 | /* | 2 | /* |
3 | * aoedev.c | 3 | * aoedev.c |
4 | * AoE device utility functions; maintains device list. | 4 | * AoE device utility functions; maintains device list. |
@@ -7,23 +7,32 @@ | |||
7 | #include <linux/hdreg.h> | 7 | #include <linux/hdreg.h> |
8 | #include <linux/blkdev.h> | 8 | #include <linux/blkdev.h> |
9 | #include <linux/netdevice.h> | 9 | #include <linux/netdevice.h> |
10 | #include <linux/delay.h> | ||
10 | #include "aoe.h" | 11 | #include "aoe.h" |
11 | 12 | ||
13 | static void dummy_timer(ulong); | ||
14 | static void aoedev_freedev(struct aoedev *); | ||
15 | static void freetgt(struct aoedev *d, struct aoetgt *t); | ||
16 | static void skbpoolfree(struct aoedev *d); | ||
17 | |||
12 | static struct aoedev *devlist; | 18 | static struct aoedev *devlist; |
13 | static spinlock_t devlist_lock; | 19 | static DEFINE_SPINLOCK(devlist_lock); |
14 | 20 | ||
15 | int | 21 | int |
16 | aoedev_isbusy(struct aoedev *d) | 22 | aoedev_isbusy(struct aoedev *d) |
17 | { | 23 | { |
24 | struct aoetgt **t, **te; | ||
18 | struct frame *f, *e; | 25 | struct frame *f, *e; |
19 | 26 | ||
20 | f = d->frames; | 27 | t = d->targets; |
21 | e = f + d->nframes; | 28 | te = t + NTARGETS; |
22 | do { | 29 | for (; t < te && *t; t++) { |
23 | if (f->tag != FREETAG) | 30 | f = (*t)->frames; |
24 | return 1; | 31 | e = f + (*t)->nframes; |
25 | } while (++f < e); | 32 | for (; f < e; f++) |
26 | 33 | if (f->tag != FREETAG) | |
34 | return 1; | ||
35 | } | ||
27 | return 0; | 36 | return 0; |
28 | } | 37 | } |
29 | 38 | ||
@@ -55,75 +64,41 @@ dummy_timer(ulong vp) | |||
55 | add_timer(&d->timer); | 64 | add_timer(&d->timer); |
56 | } | 65 | } |
57 | 66 | ||
58 | /* called with devlist lock held */ | ||
59 | static struct aoedev * | ||
60 | aoedev_newdev(ulong nframes) | ||
61 | { | ||
62 | struct aoedev *d; | ||
63 | struct frame *f, *e; | ||
64 | |||
65 | d = kzalloc(sizeof *d, GFP_ATOMIC); | ||
66 | f = kcalloc(nframes, sizeof *f, GFP_ATOMIC); | ||
67 | switch (!d || !f) { | ||
68 | case 0: | ||
69 | d->nframes = nframes; | ||
70 | d->frames = f; | ||
71 | e = f + nframes; | ||
72 | for (; f<e; f++) { | ||
73 | f->tag = FREETAG; | ||
74 | f->skb = new_skb(ETH_ZLEN); | ||
75 | if (!f->skb) | ||
76 | break; | ||
77 | } | ||
78 | if (f == e) | ||
79 | break; | ||
80 | while (f > d->frames) { | ||
81 | f--; | ||
82 | dev_kfree_skb(f->skb); | ||
83 | } | ||
84 | default: | ||
85 | if (f) | ||
86 | kfree(f); | ||
87 | if (d) | ||
88 | kfree(d); | ||
89 | return NULL; | ||
90 | } | ||
91 | INIT_WORK(&d->work, aoecmd_sleepwork); | ||
92 | spin_lock_init(&d->lock); | ||
93 | init_timer(&d->timer); | ||
94 | d->timer.data = (ulong) d; | ||
95 | d->timer.function = dummy_timer; | ||
96 | d->timer.expires = jiffies + HZ; | ||
97 | add_timer(&d->timer); | ||
98 | d->bufpool = NULL; /* defer to aoeblk_gdalloc */ | ||
99 | INIT_LIST_HEAD(&d->bufq); | ||
100 | d->next = devlist; | ||
101 | devlist = d; | ||
102 | |||
103 | return d; | ||
104 | } | ||
105 | |||
106 | void | 67 | void |
107 | aoedev_downdev(struct aoedev *d) | 68 | aoedev_downdev(struct aoedev *d) |
108 | { | 69 | { |
70 | struct aoetgt **t, **te; | ||
109 | struct frame *f, *e; | 71 | struct frame *f, *e; |
110 | struct buf *buf; | 72 | struct buf *buf; |
111 | struct bio *bio; | 73 | struct bio *bio; |
112 | 74 | ||
113 | f = d->frames; | 75 | t = d->targets; |
114 | e = f + d->nframes; | 76 | te = t + NTARGETS; |
115 | for (; f<e; f->tag = FREETAG, f->buf = NULL, f++) { | 77 | for (; t < te && *t; t++) { |
116 | if (f->tag == FREETAG || f->buf == NULL) | 78 | f = (*t)->frames; |
117 | continue; | 79 | e = f + (*t)->nframes; |
118 | buf = f->buf; | 80 | for (; f < e; f->tag = FREETAG, f->buf = NULL, f++) { |
119 | bio = buf->bio; | 81 | if (f->tag == FREETAG || f->buf == NULL) |
120 | if (--buf->nframesout == 0) { | 82 | continue; |
121 | mempool_free(buf, d->bufpool); | 83 | buf = f->buf; |
122 | bio_endio(bio, -EIO); | 84 | bio = buf->bio; |
85 | if (--buf->nframesout == 0 | ||
86 | && buf != d->inprocess) { | ||
87 | mempool_free(buf, d->bufpool); | ||
88 | bio_endio(bio, -EIO); | ||
89 | } | ||
123 | } | 90 | } |
124 | skb_shinfo(f->skb)->nr_frags = f->skb->data_len = 0; | 91 | (*t)->maxout = (*t)->nframes; |
92 | (*t)->nout = 0; | ||
93 | } | ||
94 | buf = d->inprocess; | ||
95 | if (buf) { | ||
96 | bio = buf->bio; | ||
97 | mempool_free(buf, d->bufpool); | ||
98 | bio_endio(bio, -EIO); | ||
125 | } | 99 | } |
126 | d->inprocess = NULL; | 100 | d->inprocess = NULL; |
101 | d->htgt = NULL; | ||
127 | 102 | ||
128 | while (!list_empty(&d->bufq)) { | 103 | while (!list_empty(&d->bufq)) { |
129 | buf = container_of(d->bufq.next, struct buf, bufs); | 104 | buf = container_of(d->bufq.next, struct buf, bufs); |
@@ -136,12 +111,114 @@ aoedev_downdev(struct aoedev *d) | |||
136 | if (d->gd) | 111 | if (d->gd) |
137 | d->gd->capacity = 0; | 112 | d->gd->capacity = 0; |
138 | 113 | ||
139 | d->flags &= ~(DEVFL_UP | DEVFL_PAUSE); | 114 | d->flags &= ~DEVFL_UP; |
115 | } | ||
116 | |||
117 | static void | ||
118 | aoedev_freedev(struct aoedev *d) | ||
119 | { | ||
120 | struct aoetgt **t, **e; | ||
121 | |||
122 | if (d->gd) { | ||
123 | aoedisk_rm_sysfs(d); | ||
124 | del_gendisk(d->gd); | ||
125 | put_disk(d->gd); | ||
126 | } | ||
127 | t = d->targets; | ||
128 | e = t + NTARGETS; | ||
129 | for (; t < e && *t; t++) | ||
130 | freetgt(d, *t); | ||
131 | if (d->bufpool) | ||
132 | mempool_destroy(d->bufpool); | ||
133 | skbpoolfree(d); | ||
134 | kfree(d); | ||
135 | } | ||
136 | |||
137 | int | ||
138 | aoedev_flush(const char __user *str, size_t cnt) | ||
139 | { | ||
140 | ulong flags; | ||
141 | struct aoedev *d, **dd; | ||
142 | struct aoedev *rmd = NULL; | ||
143 | char buf[16]; | ||
144 | int all = 0; | ||
145 | |||
146 | if (cnt >= 3) { | ||
147 | if (cnt > sizeof buf) | ||
148 | cnt = sizeof buf; | ||
149 | if (copy_from_user(buf, str, cnt)) | ||
150 | return -EFAULT; | ||
151 | all = !strncmp(buf, "all", 3); | ||
152 | } | ||
153 | |||
154 | flush_scheduled_work(); | ||
155 | spin_lock_irqsave(&devlist_lock, flags); | ||
156 | dd = &devlist; | ||
157 | while ((d = *dd)) { | ||
158 | spin_lock(&d->lock); | ||
159 | if ((!all && (d->flags & DEVFL_UP)) | ||
160 | || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE)) | ||
161 | || d->nopen) { | ||
162 | spin_unlock(&d->lock); | ||
163 | dd = &d->next; | ||
164 | continue; | ||
165 | } | ||
166 | *dd = d->next; | ||
167 | aoedev_downdev(d); | ||
168 | d->flags |= DEVFL_TKILL; | ||
169 | spin_unlock(&d->lock); | ||
170 | d->next = rmd; | ||
171 | rmd = d; | ||
172 | } | ||
173 | spin_unlock_irqrestore(&devlist_lock, flags); | ||
174 | while ((d = rmd)) { | ||
175 | rmd = d->next; | ||
176 | del_timer_sync(&d->timer); | ||
177 | aoedev_freedev(d); /* must be able to sleep */ | ||
178 | } | ||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | /* I'm not really sure that this is a realistic problem, but if the | ||
183 | network driver goes gonzo let's just leak memory after complaining. */ | ||
184 | static void | ||
185 | skbfree(struct sk_buff *skb) | ||
186 | { | ||
187 | enum { Sms = 100, Tms = 3*1000}; | ||
188 | int i = Tms / Sms; | ||
189 | |||
190 | if (skb == NULL) | ||
191 | return; | ||
192 | while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0) | ||
193 | msleep(Sms); | ||
194 | if (i <= 0) { | ||
195 | printk(KERN_ERR | ||
196 | "aoe: %s holds ref: %s\n", | ||
197 | skb->dev ? skb->dev->name : "netif", | ||
198 | "cannot free skb -- memory leaked."); | ||
199 | return; | ||
200 | } | ||
201 | skb_shinfo(skb)->nr_frags = skb->data_len = 0; | ||
202 | skb_trim(skb, 0); | ||
203 | dev_kfree_skb(skb); | ||
204 | } | ||
205 | |||
206 | static void | ||
207 | skbpoolfree(struct aoedev *d) | ||
208 | { | ||
209 | struct sk_buff *skb; | ||
210 | |||
211 | while ((skb = d->skbpool_hd)) { | ||
212 | d->skbpool_hd = skb->next; | ||
213 | skb->next = NULL; | ||
214 | skbfree(skb); | ||
215 | } | ||
216 | d->skbpool_tl = NULL; | ||
140 | } | 217 | } |
141 | 218 | ||
142 | /* find it or malloc it */ | 219 | /* find it or malloc it */ |
143 | struct aoedev * | 220 | struct aoedev * |
144 | aoedev_by_sysminor_m(ulong sysminor, ulong bufcnt) | 221 | aoedev_by_sysminor_m(ulong sysminor) |
145 | { | 222 | { |
146 | struct aoedev *d; | 223 | struct aoedev *d; |
147 | ulong flags; | 224 | ulong flags; |
@@ -151,43 +228,43 @@ aoedev_by_sysminor_m(ulong sysminor, ulong bufcnt) | |||
151 | for (d=devlist; d; d=d->next) | 228 | for (d=devlist; d; d=d->next) |
152 | if (d->sysminor == sysminor) | 229 | if (d->sysminor == sysminor) |
153 | break; | 230 | break; |
154 | 231 | if (d) | |
155 | if (d == NULL) { | 232 | goto out; |
156 | d = aoedev_newdev(bufcnt); | 233 | d = kcalloc(1, sizeof *d, GFP_ATOMIC); |
157 | if (d == NULL) { | 234 | if (!d) |
158 | spin_unlock_irqrestore(&devlist_lock, flags); | 235 | goto out; |
159 | printk(KERN_INFO "aoe: aoedev_newdev failure.\n"); | 236 | INIT_WORK(&d->work, aoecmd_sleepwork); |
160 | return NULL; | 237 | spin_lock_init(&d->lock); |
161 | } | 238 | init_timer(&d->timer); |
162 | d->sysminor = sysminor; | 239 | d->timer.data = (ulong) d; |
163 | d->aoemajor = AOEMAJOR(sysminor); | 240 | d->timer.function = dummy_timer; |
164 | d->aoeminor = AOEMINOR(sysminor); | 241 | d->timer.expires = jiffies + HZ; |
165 | } | 242 | add_timer(&d->timer); |
166 | 243 | d->bufpool = NULL; /* defer to aoeblk_gdalloc */ | |
244 | d->tgt = d->targets; | ||
245 | INIT_LIST_HEAD(&d->bufq); | ||
246 | d->sysminor = sysminor; | ||
247 | d->aoemajor = AOEMAJOR(sysminor); | ||
248 | d->aoeminor = AOEMINOR(sysminor); | ||
249 | d->mintimer = MINTIMER; | ||
250 | d->next = devlist; | ||
251 | devlist = d; | ||
252 | out: | ||
167 | spin_unlock_irqrestore(&devlist_lock, flags); | 253 | spin_unlock_irqrestore(&devlist_lock, flags); |
168 | return d; | 254 | return d; |
169 | } | 255 | } |
170 | 256 | ||
171 | static void | 257 | static void |
172 | aoedev_freedev(struct aoedev *d) | 258 | freetgt(struct aoedev *d, struct aoetgt *t) |
173 | { | 259 | { |
174 | struct frame *f, *e; | 260 | struct frame *f, *e; |
175 | 261 | ||
176 | if (d->gd) { | 262 | f = t->frames; |
177 | aoedisk_rm_sysfs(d); | 263 | e = f + t->nframes; |
178 | del_gendisk(d->gd); | 264 | for (; f < e; f++) |
179 | put_disk(d->gd); | 265 | skbfree(f->skb); |
180 | } | 266 | kfree(t->frames); |
181 | f = d->frames; | 267 | kfree(t); |
182 | e = f + d->nframes; | ||
183 | for (; f<e; f++) { | ||
184 | skb_shinfo(f->skb)->nr_frags = 0; | ||
185 | dev_kfree_skb(f->skb); | ||
186 | } | ||
187 | kfree(d->frames); | ||
188 | if (d->bufpool) | ||
189 | mempool_destroy(d->bufpool); | ||
190 | kfree(d); | ||
191 | } | 268 | } |
192 | 269 | ||
193 | void | 270 | void |
@@ -214,7 +291,5 @@ aoedev_exit(void) | |||
214 | int __init | 291 | int __init |
215 | aoedev_init(void) | 292 | aoedev_init(void) |
216 | { | 293 | { |
217 | spin_lock_init(&devlist_lock); | ||
218 | return 0; | 294 | return 0; |
219 | } | 295 | } |
220 | |||
diff --git a/drivers/block/aoe/aoemain.c b/drivers/block/aoe/aoemain.c index a04b7d613299..7b15a5e9cec0 100644 --- a/drivers/block/aoe/aoemain.c +++ b/drivers/block/aoe/aoemain.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ |
2 | /* | 2 | /* |
3 | * aoemain.c | 3 | * aoemain.c |
4 | * Module initialization routines, discover timer | 4 | * Module initialization routines, discover timer |
diff --git a/drivers/block/aoe/aoenet.c b/drivers/block/aoe/aoenet.c index 4e6deb7f5c24..8460ef736d56 100644 --- a/drivers/block/aoe/aoenet.c +++ b/drivers/block/aoe/aoenet.c | |||
@@ -1,4 +1,4 @@ | |||
1 | /* Copyright (c) 2006 Coraid, Inc. See COPYING for GPL terms. */ | 1 | /* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ |
2 | /* | 2 | /* |
3 | * aoenet.c | 3 | * aoenet.c |
4 | * Ethernet portion of AoE driver | 4 | * Ethernet portion of AoE driver |
@@ -83,7 +83,7 @@ set_aoe_iflist(const char __user *user_str, size_t size) | |||
83 | return 0; | 83 | return 0; |
84 | } | 84 | } |
85 | 85 | ||
86 | u64 | 86 | unsigned long long |
87 | mac_addr(char addr[6]) | 87 | mac_addr(char addr[6]) |
88 | { | 88 | { |
89 | __be64 n = 0; | 89 | __be64 n = 0; |
@@ -91,7 +91,7 @@ mac_addr(char addr[6]) | |||
91 | 91 | ||
92 | memcpy(p + 2, addr, 6); /* (sizeof addr != 6) */ | 92 | memcpy(p + 2, addr, 6); /* (sizeof addr != 6) */ |
93 | 93 | ||
94 | return __be64_to_cpu(n); | 94 | return (unsigned long long) __be64_to_cpu(n); |
95 | } | 95 | } |
96 | 96 | ||
97 | void | 97 | void |
@@ -137,9 +137,12 @@ aoenet_rcv(struct sk_buff *skb, struct net_device *ifp, struct packet_type *pt, | |||
137 | if (n > NECODES) | 137 | if (n > NECODES) |
138 | n = 0; | 138 | n = 0; |
139 | if (net_ratelimit()) | 139 | if (net_ratelimit()) |
140 | printk(KERN_ERR "aoe: error packet from %d.%d; ecode=%d '%s'\n", | 140 | printk(KERN_ERR |
141 | be16_to_cpu(get_unaligned(&h->major)), h->minor, | 141 | "%s%d.%d@%s; ecode=%d '%s'\n", |
142 | h->err, aoe_errlist[n]); | 142 | "aoe: error packet from ", |
143 | be16_to_cpu(get_unaligned(&h->major)), | ||
144 | h->minor, skb->dev->name, | ||
145 | h->err, aoe_errlist[n]); | ||
143 | goto exit; | 146 | goto exit; |
144 | } | 147 | } |
145 | 148 | ||
diff --git a/drivers/block/brd.c b/drivers/block/brd.c new file mode 100644 index 000000000000..85364804364f --- /dev/null +++ b/drivers/block/brd.c | |||
@@ -0,0 +1,583 @@ | |||
1 | /* | ||
2 | * Ram backed block device driver. | ||
3 | * | ||
4 | * Copyright (C) 2007 Nick Piggin | ||
5 | * Copyright (C) 2007 Novell Inc. | ||
6 | * | ||
7 | * Parts derived from drivers/block/rd.c, and drivers/block/loop.c, copyright | ||
8 | * of their respective owners. | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/module.h> | ||
13 | #include <linux/moduleparam.h> | ||
14 | #include <linux/major.h> | ||
15 | #include <linux/blkdev.h> | ||
16 | #include <linux/bio.h> | ||
17 | #include <linux/highmem.h> | ||
18 | #include <linux/gfp.h> | ||
19 | #include <linux/radix-tree.h> | ||
20 | #include <linux/buffer_head.h> /* invalidate_bh_lrus() */ | ||
21 | |||
22 | #include <asm/uaccess.h> | ||
23 | |||
24 | #define SECTOR_SHIFT 9 | ||
25 | #define PAGE_SECTORS_SHIFT (PAGE_SHIFT - SECTOR_SHIFT) | ||
26 | #define PAGE_SECTORS (1 << PAGE_SECTORS_SHIFT) | ||
27 | |||
28 | /* | ||
29 | * Each block ramdisk device has a radix_tree brd_pages of pages that stores | ||
30 | * the pages containing the block device's contents. A brd page's ->index is | ||
31 | * its offset in PAGE_SIZE units. This is similar to, but in no way connected | ||
32 | * with, the kernel's pagecache or buffer cache (which sit above our block | ||
33 | * device). | ||
34 | */ | ||
35 | struct brd_device { | ||
36 | int brd_number; | ||
37 | int brd_refcnt; | ||
38 | loff_t brd_offset; | ||
39 | loff_t brd_sizelimit; | ||
40 | unsigned brd_blocksize; | ||
41 | |||
42 | struct request_queue *brd_queue; | ||
43 | struct gendisk *brd_disk; | ||
44 | struct list_head brd_list; | ||
45 | |||
46 | /* | ||
47 | * Backing store of pages and lock to protect it. This is the contents | ||
48 | * of the block device. | ||
49 | */ | ||
50 | spinlock_t brd_lock; | ||
51 | struct radix_tree_root brd_pages; | ||
52 | }; | ||
53 | |||
54 | /* | ||
55 | * Look up and return a brd's page for a given sector. | ||
56 | */ | ||
57 | static struct page *brd_lookup_page(struct brd_device *brd, sector_t sector) | ||
58 | { | ||
59 | pgoff_t idx; | ||
60 | struct page *page; | ||
61 | |||
62 | /* | ||
63 | * The page lifetime is protected by the fact that we have opened the | ||
64 | * device node -- brd pages will never be deleted under us, so we | ||
65 | * don't need any further locking or refcounting. | ||
66 | * | ||
67 | * This is strictly true for the radix-tree nodes as well (ie. we | ||
68 | * don't actually need the rcu_read_lock()), however that is not a | ||
69 | * documented feature of the radix-tree API so it is better to be | ||
70 | * safe here (we don't have total exclusion from radix tree updates | ||
71 | * here, only deletes). | ||
72 | */ | ||
73 | rcu_read_lock(); | ||
74 | idx = sector >> PAGE_SECTORS_SHIFT; /* sector to page index */ | ||
75 | page = radix_tree_lookup(&brd->brd_pages, idx); | ||
76 | rcu_read_unlock(); | ||
77 | |||
78 | BUG_ON(page && page->index != idx); | ||
79 | |||
80 | return page; | ||
81 | } | ||
82 | |||
83 | /* | ||
84 | * Look up and return a brd's page for a given sector. | ||
85 | * If one does not exist, allocate an empty page, and insert that. Then | ||
86 | * return it. | ||
87 | */ | ||
88 | static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) | ||
89 | { | ||
90 | pgoff_t idx; | ||
91 | struct page *page; | ||
92 | gfp_t gfp_flags; | ||
93 | |||
94 | page = brd_lookup_page(brd, sector); | ||
95 | if (page) | ||
96 | return page; | ||
97 | |||
98 | /* | ||
99 | * Must use NOIO because we don't want to recurse back into the | ||
100 | * block or filesystem layers from page reclaim. | ||
101 | * | ||
102 | * Cannot support XIP and highmem, because our ->direct_access | ||
103 | * routine for XIP must return memory that is always addressable. | ||
104 | * If XIP was reworked to use pfns and kmap throughout, this | ||
105 | * restriction might be able to be lifted. | ||
106 | */ | ||
107 | gfp_flags = GFP_NOIO | __GFP_ZERO; | ||
108 | #ifndef CONFIG_BLK_DEV_XIP | ||
109 | gfp_flags |= __GFP_HIGHMEM; | ||
110 | #endif | ||
111 | page = alloc_page(GFP_NOIO | __GFP_HIGHMEM | __GFP_ZERO); | ||
112 | if (!page) | ||
113 | return NULL; | ||
114 | |||
115 | if (radix_tree_preload(GFP_NOIO)) { | ||
116 | __free_page(page); | ||
117 | return NULL; | ||
118 | } | ||
119 | |||
120 | spin_lock(&brd->brd_lock); | ||
121 | idx = sector >> PAGE_SECTORS_SHIFT; | ||
122 | if (radix_tree_insert(&brd->brd_pages, idx, page)) { | ||
123 | __free_page(page); | ||
124 | page = radix_tree_lookup(&brd->brd_pages, idx); | ||
125 | BUG_ON(!page); | ||
126 | BUG_ON(page->index != idx); | ||
127 | } else | ||
128 | page->index = idx; | ||
129 | spin_unlock(&brd->brd_lock); | ||
130 | |||
131 | radix_tree_preload_end(); | ||
132 | |||
133 | return page; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * Free all backing store pages and radix tree. This must only be called when | ||
138 | * there are no other users of the device. | ||
139 | */ | ||
140 | #define FREE_BATCH 16 | ||
141 | static void brd_free_pages(struct brd_device *brd) | ||
142 | { | ||
143 | unsigned long pos = 0; | ||
144 | struct page *pages[FREE_BATCH]; | ||
145 | int nr_pages; | ||
146 | |||
147 | do { | ||
148 | int i; | ||
149 | |||
150 | nr_pages = radix_tree_gang_lookup(&brd->brd_pages, | ||
151 | (void **)pages, pos, FREE_BATCH); | ||
152 | |||
153 | for (i = 0; i < nr_pages; i++) { | ||
154 | void *ret; | ||
155 | |||
156 | BUG_ON(pages[i]->index < pos); | ||
157 | pos = pages[i]->index; | ||
158 | ret = radix_tree_delete(&brd->brd_pages, pos); | ||
159 | BUG_ON(!ret || ret != pages[i]); | ||
160 | __free_page(pages[i]); | ||
161 | } | ||
162 | |||
163 | pos++; | ||
164 | |||
165 | /* | ||
166 | * This assumes radix_tree_gang_lookup always returns as | ||
167 | * many pages as possible. If the radix-tree code changes, | ||
168 | * so will this have to. | ||
169 | */ | ||
170 | } while (nr_pages == FREE_BATCH); | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * copy_to_brd_setup must be called before copy_to_brd. It may sleep. | ||
175 | */ | ||
176 | static int copy_to_brd_setup(struct brd_device *brd, sector_t sector, size_t n) | ||
177 | { | ||
178 | unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; | ||
179 | size_t copy; | ||
180 | |||
181 | copy = min_t(size_t, n, PAGE_SIZE - offset); | ||
182 | if (!brd_insert_page(brd, sector)) | ||
183 | return -ENOMEM; | ||
184 | if (copy < n) { | ||
185 | sector += copy >> SECTOR_SHIFT; | ||
186 | if (!brd_insert_page(brd, sector)) | ||
187 | return -ENOMEM; | ||
188 | } | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * Copy n bytes from src to the brd starting at sector. Does not sleep. | ||
194 | */ | ||
195 | static void copy_to_brd(struct brd_device *brd, const void *src, | ||
196 | sector_t sector, size_t n) | ||
197 | { | ||
198 | struct page *page; | ||
199 | void *dst; | ||
200 | unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; | ||
201 | size_t copy; | ||
202 | |||
203 | copy = min_t(size_t, n, PAGE_SIZE - offset); | ||
204 | page = brd_lookup_page(brd, sector); | ||
205 | BUG_ON(!page); | ||
206 | |||
207 | dst = kmap_atomic(page, KM_USER1); | ||
208 | memcpy(dst + offset, src, copy); | ||
209 | kunmap_atomic(dst, KM_USER1); | ||
210 | |||
211 | if (copy < n) { | ||
212 | src += copy; | ||
213 | sector += copy >> SECTOR_SHIFT; | ||
214 | copy = n - copy; | ||
215 | page = brd_lookup_page(brd, sector); | ||
216 | BUG_ON(!page); | ||
217 | |||
218 | dst = kmap_atomic(page, KM_USER1); | ||
219 | memcpy(dst, src, copy); | ||
220 | kunmap_atomic(dst, KM_USER1); | ||
221 | } | ||
222 | } | ||
223 | |||
224 | /* | ||
225 | * Copy n bytes to dst from the brd starting at sector. Does not sleep. | ||
226 | */ | ||
227 | static void copy_from_brd(void *dst, struct brd_device *brd, | ||
228 | sector_t sector, size_t n) | ||
229 | { | ||
230 | struct page *page; | ||
231 | void *src; | ||
232 | unsigned int offset = (sector & (PAGE_SECTORS-1)) << SECTOR_SHIFT; | ||
233 | size_t copy; | ||
234 | |||
235 | copy = min_t(size_t, n, PAGE_SIZE - offset); | ||
236 | page = brd_lookup_page(brd, sector); | ||
237 | if (page) { | ||
238 | src = kmap_atomic(page, KM_USER1); | ||
239 | memcpy(dst, src + offset, copy); | ||
240 | kunmap_atomic(src, KM_USER1); | ||
241 | } else | ||
242 | memset(dst, 0, copy); | ||
243 | |||
244 | if (copy < n) { | ||
245 | dst += copy; | ||
246 | sector += copy >> SECTOR_SHIFT; | ||
247 | copy = n - copy; | ||
248 | page = brd_lookup_page(brd, sector); | ||
249 | if (page) { | ||
250 | src = kmap_atomic(page, KM_USER1); | ||
251 | memcpy(dst, src, copy); | ||
252 | kunmap_atomic(src, KM_USER1); | ||
253 | } else | ||
254 | memset(dst, 0, copy); | ||
255 | } | ||
256 | } | ||
257 | |||
258 | /* | ||
259 | * Process a single bvec of a bio. | ||
260 | */ | ||
261 | static int brd_do_bvec(struct brd_device *brd, struct page *page, | ||
262 | unsigned int len, unsigned int off, int rw, | ||
263 | sector_t sector) | ||
264 | { | ||
265 | void *mem; | ||
266 | int err = 0; | ||
267 | |||
268 | if (rw != READ) { | ||
269 | err = copy_to_brd_setup(brd, sector, len); | ||
270 | if (err) | ||
271 | goto out; | ||
272 | } | ||
273 | |||
274 | mem = kmap_atomic(page, KM_USER0); | ||
275 | if (rw == READ) { | ||
276 | copy_from_brd(mem + off, brd, sector, len); | ||
277 | flush_dcache_page(page); | ||
278 | } else | ||
279 | copy_to_brd(brd, mem + off, sector, len); | ||
280 | kunmap_atomic(mem, KM_USER0); | ||
281 | |||
282 | out: | ||
283 | return err; | ||
284 | } | ||
285 | |||
286 | static int brd_make_request(struct request_queue *q, struct bio *bio) | ||
287 | { | ||
288 | struct block_device *bdev = bio->bi_bdev; | ||
289 | struct brd_device *brd = bdev->bd_disk->private_data; | ||
290 | int rw; | ||
291 | struct bio_vec *bvec; | ||
292 | sector_t sector; | ||
293 | int i; | ||
294 | int err = -EIO; | ||
295 | |||
296 | sector = bio->bi_sector; | ||
297 | if (sector + (bio->bi_size >> SECTOR_SHIFT) > | ||
298 | get_capacity(bdev->bd_disk)) | ||
299 | goto out; | ||
300 | |||
301 | rw = bio_rw(bio); | ||
302 | if (rw == READA) | ||
303 | rw = READ; | ||
304 | |||
305 | bio_for_each_segment(bvec, bio, i) { | ||
306 | unsigned int len = bvec->bv_len; | ||
307 | err = brd_do_bvec(brd, bvec->bv_page, len, | ||
308 | bvec->bv_offset, rw, sector); | ||
309 | if (err) | ||
310 | break; | ||
311 | sector += len >> SECTOR_SHIFT; | ||
312 | } | ||
313 | |||
314 | out: | ||
315 | bio_endio(bio, err); | ||
316 | |||
317 | return 0; | ||
318 | } | ||
319 | |||
320 | #ifdef CONFIG_BLK_DEV_XIP | ||
321 | static int brd_direct_access (struct block_device *bdev, sector_t sector, | ||
322 | unsigned long *data) | ||
323 | { | ||
324 | struct brd_device *brd = bdev->bd_disk->private_data; | ||
325 | struct page *page; | ||
326 | |||
327 | if (!brd) | ||
328 | return -ENODEV; | ||
329 | if (sector & (PAGE_SECTORS-1)) | ||
330 | return -EINVAL; | ||
331 | if (sector + PAGE_SECTORS > get_capacity(bdev->bd_disk)) | ||
332 | return -ERANGE; | ||
333 | page = brd_insert_page(brd, sector); | ||
334 | if (!page) | ||
335 | return -ENOMEM; | ||
336 | *data = (unsigned long)page_address(page); | ||
337 | |||
338 | return 0; | ||
339 | } | ||
340 | #endif | ||
341 | |||
342 | static int brd_ioctl(struct inode *inode, struct file *file, | ||
343 | unsigned int cmd, unsigned long arg) | ||
344 | { | ||
345 | int error; | ||
346 | struct block_device *bdev = inode->i_bdev; | ||
347 | struct brd_device *brd = bdev->bd_disk->private_data; | ||
348 | |||
349 | if (cmd != BLKFLSBUF) | ||
350 | return -ENOTTY; | ||
351 | |||
352 | /* | ||
353 | * ram device BLKFLSBUF has special semantics, we want to actually | ||
354 | * release and destroy the ramdisk data. | ||
355 | */ | ||
356 | mutex_lock(&bdev->bd_mutex); | ||
357 | error = -EBUSY; | ||
358 | if (bdev->bd_openers <= 1) { | ||
359 | /* | ||
360 | * Invalidate the cache first, so it isn't written | ||
361 | * back to the device. | ||
362 | * | ||
363 | * Another thread might instantiate more buffercache here, | ||
364 | * but there is not much we can do to close that race. | ||
365 | */ | ||
366 | invalidate_bh_lrus(); | ||
367 | truncate_inode_pages(bdev->bd_inode->i_mapping, 0); | ||
368 | brd_free_pages(brd); | ||
369 | error = 0; | ||
370 | } | ||
371 | mutex_unlock(&bdev->bd_mutex); | ||
372 | |||
373 | return error; | ||
374 | } | ||
375 | |||
376 | static struct block_device_operations brd_fops = { | ||
377 | .owner = THIS_MODULE, | ||
378 | .ioctl = brd_ioctl, | ||
379 | #ifdef CONFIG_BLK_DEV_XIP | ||
380 | .direct_access = brd_direct_access, | ||
381 | #endif | ||
382 | }; | ||
383 | |||
384 | /* | ||
385 | * And now the modules code and kernel interface. | ||
386 | */ | ||
387 | static int rd_nr; | ||
388 | int rd_size = CONFIG_BLK_DEV_RAM_SIZE; | ||
389 | module_param(rd_nr, int, 0); | ||
390 | MODULE_PARM_DESC(rd_nr, "Maximum number of brd devices"); | ||
391 | module_param(rd_size, int, 0); | ||
392 | MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); | ||
393 | MODULE_LICENSE("GPL"); | ||
394 | MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); | ||
395 | |||
396 | #ifndef MODULE | ||
397 | /* Legacy boot options - nonmodular */ | ||
398 | static int __init ramdisk_size(char *str) | ||
399 | { | ||
400 | rd_size = simple_strtol(str, NULL, 0); | ||
401 | return 1; | ||
402 | } | ||
403 | static int __init ramdisk_size2(char *str) | ||
404 | { | ||
405 | return ramdisk_size(str); | ||
406 | } | ||
407 | __setup("ramdisk=", ramdisk_size); | ||
408 | __setup("ramdisk_size=", ramdisk_size2); | ||
409 | #endif | ||
410 | |||
411 | /* | ||
412 | * The device scheme is derived from loop.c. Keep them in synch where possible | ||
413 | * (should share code eventually). | ||
414 | */ | ||
415 | static LIST_HEAD(brd_devices); | ||
416 | static DEFINE_MUTEX(brd_devices_mutex); | ||
417 | |||
418 | static struct brd_device *brd_alloc(int i) | ||
419 | { | ||
420 | struct brd_device *brd; | ||
421 | struct gendisk *disk; | ||
422 | |||
423 | brd = kzalloc(sizeof(*brd), GFP_KERNEL); | ||
424 | if (!brd) | ||
425 | goto out; | ||
426 | brd->brd_number = i; | ||
427 | spin_lock_init(&brd->brd_lock); | ||
428 | INIT_RADIX_TREE(&brd->brd_pages, GFP_ATOMIC); | ||
429 | |||
430 | brd->brd_queue = blk_alloc_queue(GFP_KERNEL); | ||
431 | if (!brd->brd_queue) | ||
432 | goto out_free_dev; | ||
433 | blk_queue_make_request(brd->brd_queue, brd_make_request); | ||
434 | blk_queue_max_sectors(brd->brd_queue, 1024); | ||
435 | blk_queue_bounce_limit(brd->brd_queue, BLK_BOUNCE_ANY); | ||
436 | |||
437 | disk = brd->brd_disk = alloc_disk(1); | ||
438 | if (!disk) | ||
439 | goto out_free_queue; | ||
440 | disk->major = RAMDISK_MAJOR; | ||
441 | disk->first_minor = i; | ||
442 | disk->fops = &brd_fops; | ||
443 | disk->private_data = brd; | ||
444 | disk->queue = brd->brd_queue; | ||
445 | sprintf(disk->disk_name, "ram%d", i); | ||
446 | set_capacity(disk, rd_size * 2); | ||
447 | |||
448 | return brd; | ||
449 | |||
450 | out_free_queue: | ||
451 | blk_cleanup_queue(brd->brd_queue); | ||
452 | out_free_dev: | ||
453 | kfree(brd); | ||
454 | out: | ||
455 | return NULL; | ||
456 | } | ||
457 | |||
458 | static void brd_free(struct brd_device *brd) | ||
459 | { | ||
460 | put_disk(brd->brd_disk); | ||
461 | blk_cleanup_queue(brd->brd_queue); | ||
462 | brd_free_pages(brd); | ||
463 | kfree(brd); | ||
464 | } | ||
465 | |||
466 | static struct brd_device *brd_init_one(int i) | ||
467 | { | ||
468 | struct brd_device *brd; | ||
469 | |||
470 | list_for_each_entry(brd, &brd_devices, brd_list) { | ||
471 | if (brd->brd_number == i) | ||
472 | goto out; | ||
473 | } | ||
474 | |||
475 | brd = brd_alloc(i); | ||
476 | if (brd) { | ||
477 | add_disk(brd->brd_disk); | ||
478 | list_add_tail(&brd->brd_list, &brd_devices); | ||
479 | } | ||
480 | out: | ||
481 | return brd; | ||
482 | } | ||
483 | |||
484 | static void brd_del_one(struct brd_device *brd) | ||
485 | { | ||
486 | list_del(&brd->brd_list); | ||
487 | del_gendisk(brd->brd_disk); | ||
488 | brd_free(brd); | ||
489 | } | ||
490 | |||
491 | static struct kobject *brd_probe(dev_t dev, int *part, void *data) | ||
492 | { | ||
493 | struct brd_device *brd; | ||
494 | struct kobject *kobj; | ||
495 | |||
496 | mutex_lock(&brd_devices_mutex); | ||
497 | brd = brd_init_one(dev & MINORMASK); | ||
498 | kobj = brd ? get_disk(brd->brd_disk) : ERR_PTR(-ENOMEM); | ||
499 | mutex_unlock(&brd_devices_mutex); | ||
500 | |||
501 | *part = 0; | ||
502 | return kobj; | ||
503 | } | ||
504 | |||
505 | static int __init brd_init(void) | ||
506 | { | ||
507 | int i, nr; | ||
508 | unsigned long range; | ||
509 | struct brd_device *brd, *next; | ||
510 | |||
511 | /* | ||
512 | * brd module now has a feature to instantiate underlying device | ||
513 | * structure on-demand, provided that there is an access dev node. | ||
514 | * However, this will not work well with user space tool that doesn't | ||
515 | * know about such "feature". In order to not break any existing | ||
516 | * tool, we do the following: | ||
517 | * | ||
518 | * (1) if rd_nr is specified, create that many upfront, and this | ||
519 | * also becomes a hard limit. | ||
520 | * (2) if rd_nr is not specified, create 1 rd device on module | ||
521 | * load, user can further extend brd device by create dev node | ||
522 | * themselves and have kernel automatically instantiate actual | ||
523 | * device on-demand. | ||
524 | */ | ||
525 | if (rd_nr > 1UL << MINORBITS) | ||
526 | return -EINVAL; | ||
527 | |||
528 | if (rd_nr) { | ||
529 | nr = rd_nr; | ||
530 | range = rd_nr; | ||
531 | } else { | ||
532 | nr = CONFIG_BLK_DEV_RAM_COUNT; | ||
533 | range = 1UL << MINORBITS; | ||
534 | } | ||
535 | |||
536 | if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) | ||
537 | return -EIO; | ||
538 | |||
539 | for (i = 0; i < nr; i++) { | ||
540 | brd = brd_alloc(i); | ||
541 | if (!brd) | ||
542 | goto out_free; | ||
543 | list_add_tail(&brd->brd_list, &brd_devices); | ||
544 | } | ||
545 | |||
546 | /* point of no return */ | ||
547 | |||
548 | list_for_each_entry(brd, &brd_devices, brd_list) | ||
549 | add_disk(brd->brd_disk); | ||
550 | |||
551 | blk_register_region(MKDEV(RAMDISK_MAJOR, 0), range, | ||
552 | THIS_MODULE, brd_probe, NULL, NULL); | ||
553 | |||
554 | printk(KERN_INFO "brd: module loaded\n"); | ||
555 | return 0; | ||
556 | |||
557 | out_free: | ||
558 | list_for_each_entry_safe(brd, next, &brd_devices, brd_list) { | ||
559 | list_del(&brd->brd_list); | ||
560 | brd_free(brd); | ||
561 | } | ||
562 | |||
563 | unregister_blkdev(RAMDISK_MAJOR, "brd"); | ||
564 | return -ENOMEM; | ||
565 | } | ||
566 | |||
567 | static void __exit brd_exit(void) | ||
568 | { | ||
569 | unsigned long range; | ||
570 | struct brd_device *brd, *next; | ||
571 | |||
572 | range = rd_nr ? rd_nr : 1UL << MINORBITS; | ||
573 | |||
574 | list_for_each_entry_safe(brd, next, &brd_devices, brd_list) | ||
575 | brd_del_one(brd); | ||
576 | |||
577 | blk_unregister_region(MKDEV(RAMDISK_MAJOR, 0), range); | ||
578 | unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); | ||
579 | } | ||
580 | |||
581 | module_init(brd_init); | ||
582 | module_exit(brd_exit); | ||
583 | |||
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index ae3106045ee5..018753c59b8e 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c | |||
@@ -54,7 +54,7 @@ static unsigned int debugflags; | |||
54 | #endif /* NDEBUG */ | 54 | #endif /* NDEBUG */ |
55 | 55 | ||
56 | static unsigned int nbds_max = 16; | 56 | static unsigned int nbds_max = 16; |
57 | static struct nbd_device nbd_dev[MAX_NBD]; | 57 | static struct nbd_device *nbd_dev; |
58 | 58 | ||
59 | /* | 59 | /* |
60 | * Use just one lock (or at most 1 per NIC). Two arguments for this: | 60 | * Use just one lock (or at most 1 per NIC). Two arguments for this: |
@@ -649,11 +649,9 @@ static int __init nbd_init(void) | |||
649 | 649 | ||
650 | BUILD_BUG_ON(sizeof(struct nbd_request) != 28); | 650 | BUILD_BUG_ON(sizeof(struct nbd_request) != 28); |
651 | 651 | ||
652 | if (nbds_max > MAX_NBD) { | 652 | nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); |
653 | printk(KERN_CRIT "nbd: cannot allocate more than %u nbds; %u requested.\n", MAX_NBD, | 653 | if (!nbd_dev) |
654 | nbds_max); | 654 | return -ENOMEM; |
655 | return -EINVAL; | ||
656 | } | ||
657 | 655 | ||
658 | for (i = 0; i < nbds_max; i++) { | 656 | for (i = 0; i < nbds_max; i++) { |
659 | struct gendisk *disk = alloc_disk(1); | 657 | struct gendisk *disk = alloc_disk(1); |
diff --git a/drivers/block/rd.c b/drivers/block/rd.c deleted file mode 100644 index 06e23be70904..000000000000 --- a/drivers/block/rd.c +++ /dev/null | |||
@@ -1,537 +0,0 @@ | |||
1 | /* | ||
2 | * ramdisk.c - Multiple RAM disk driver - gzip-loading version - v. 0.8 beta. | ||
3 | * | ||
4 | * (C) Chad Page, Theodore Ts'o, et. al, 1995. | ||
5 | * | ||
6 | * This RAM disk is designed to have filesystems created on it and mounted | ||
7 | * just like a regular floppy disk. | ||
8 | * | ||
9 | * It also does something suggested by Linus: use the buffer cache as the | ||
10 | * RAM disk data. This makes it possible to dynamically allocate the RAM disk | ||
11 | * buffer - with some consequences I have to deal with as I write this. | ||
12 | * | ||
13 | * This code is based on the original ramdisk.c, written mostly by | ||
14 | * Theodore Ts'o (TYT) in 1991. The code was largely rewritten by | ||
15 | * Chad Page to use the buffer cache to store the RAM disk data in | ||
16 | * 1995; Theodore then took over the driver again, and cleaned it up | ||
17 | * for inclusion in the mainline kernel. | ||
18 | * | ||
19 | * The original CRAMDISK code was written by Richard Lyons, and | ||
20 | * adapted by Chad Page to use the new RAM disk interface. Theodore | ||
21 | * Ts'o rewrote it so that both the compressed RAM disk loader and the | ||
22 | * kernel decompressor uses the same inflate.c codebase. The RAM disk | ||
23 | * loader now also loads into a dynamic (buffer cache based) RAM disk, | ||
24 | * not the old static RAM disk. Support for the old static RAM disk has | ||
25 | * been completely removed. | ||
26 | * | ||
27 | * Loadable module support added by Tom Dyas. | ||
28 | * | ||
29 | * Further cleanups by Chad Page (page0588@sundance.sjsu.edu): | ||
30 | * Cosmetic changes in #ifdef MODULE, code movement, etc. | ||
31 | * When the RAM disk module is removed, free the protected buffers | ||
32 | * Default RAM disk size changed to 2.88 MB | ||
33 | * | ||
34 | * Added initrd: Werner Almesberger & Hans Lermen, Feb '96 | ||
35 | * | ||
36 | * 4/25/96 : Made RAM disk size a parameter (default is now 4 MB) | ||
37 | * - Chad Page | ||
38 | * | ||
39 | * Add support for fs images split across >1 disk, Paul Gortmaker, Mar '98 | ||
40 | * | ||
41 | * Make block size and block size shift for RAM disks a global macro | ||
42 | * and set blk_size for -ENOSPC, Werner Fink <werner@suse.de>, Apr '99 | ||
43 | */ | ||
44 | |||
45 | #include <linux/string.h> | ||
46 | #include <linux/slab.h> | ||
47 | #include <asm/atomic.h> | ||
48 | #include <linux/bio.h> | ||
49 | #include <linux/module.h> | ||
50 | #include <linux/moduleparam.h> | ||
51 | #include <linux/init.h> | ||
52 | #include <linux/pagemap.h> | ||
53 | #include <linux/blkdev.h> | ||
54 | #include <linux/genhd.h> | ||
55 | #include <linux/buffer_head.h> /* for invalidate_bdev() */ | ||
56 | #include <linux/backing-dev.h> | ||
57 | #include <linux/blkpg.h> | ||
58 | #include <linux/writeback.h> | ||
59 | #include <linux/log2.h> | ||
60 | |||
61 | #include <asm/uaccess.h> | ||
62 | |||
63 | /* Various static variables go here. Most are used only in the RAM disk code. | ||
64 | */ | ||
65 | |||
66 | static struct gendisk *rd_disks[CONFIG_BLK_DEV_RAM_COUNT]; | ||
67 | static struct block_device *rd_bdev[CONFIG_BLK_DEV_RAM_COUNT];/* Protected device data */ | ||
68 | static struct request_queue *rd_queue[CONFIG_BLK_DEV_RAM_COUNT]; | ||
69 | |||
70 | /* | ||
71 | * Parameters for the boot-loading of the RAM disk. These are set by | ||
72 | * init/main.c (from arguments to the kernel command line) or from the | ||
73 | * architecture-specific setup routine (from the stored boot sector | ||
74 | * information). | ||
75 | */ | ||
76 | int rd_size = CONFIG_BLK_DEV_RAM_SIZE; /* Size of the RAM disks */ | ||
77 | /* | ||
78 | * It would be very desirable to have a soft-blocksize (that in the case | ||
79 | * of the ramdisk driver is also the hardblocksize ;) of PAGE_SIZE because | ||
80 | * doing that we'll achieve a far better MM footprint. Using a rd_blocksize of | ||
81 | * BLOCK_SIZE in the worst case we'll make PAGE_SIZE/BLOCK_SIZE buffer-pages | ||
82 | * unfreeable. With a rd_blocksize of PAGE_SIZE instead we are sure that only | ||
83 | * 1 page will be protected. Depending on the size of the ramdisk you | ||
84 | * may want to change the ramdisk blocksize to achieve a better or worse MM | ||
85 | * behaviour. The default is still BLOCK_SIZE (needed by rd_load_image that | ||
86 | * supposes the filesystem in the image uses a BLOCK_SIZE blocksize). | ||
87 | */ | ||
88 | static int rd_blocksize = CONFIG_BLK_DEV_RAM_BLOCKSIZE; | ||
89 | |||
90 | /* | ||
91 | * Copyright (C) 2000 Linus Torvalds. | ||
92 | * 2000 Transmeta Corp. | ||
93 | * aops copied from ramfs. | ||
94 | */ | ||
95 | |||
96 | /* | ||
97 | * If a ramdisk page has buffers, some may be uptodate and some may be not. | ||
98 | * To bring the page uptodate we zero out the non-uptodate buffers. The | ||
99 | * page must be locked. | ||
100 | */ | ||
101 | static void make_page_uptodate(struct page *page) | ||
102 | { | ||
103 | if (page_has_buffers(page)) { | ||
104 | struct buffer_head *bh = page_buffers(page); | ||
105 | struct buffer_head *head = bh; | ||
106 | |||
107 | do { | ||
108 | if (!buffer_uptodate(bh)) { | ||
109 | memset(bh->b_data, 0, bh->b_size); | ||
110 | /* | ||
111 | * akpm: I'm totally undecided about this. The | ||
112 | * buffer has just been magically brought "up to | ||
113 | * date", but nobody should want to be reading | ||
114 | * it anyway, because it hasn't been used for | ||
115 | * anything yet. It is still in a "not read | ||
116 | * from disk yet" state. | ||
117 | * | ||
118 | * But non-uptodate buffers against an uptodate | ||
119 | * page are against the rules. So do it anyway. | ||
120 | */ | ||
121 | set_buffer_uptodate(bh); | ||
122 | } | ||
123 | } while ((bh = bh->b_this_page) != head); | ||
124 | } else { | ||
125 | memset(page_address(page), 0, PAGE_CACHE_SIZE); | ||
126 | } | ||
127 | flush_dcache_page(page); | ||
128 | SetPageUptodate(page); | ||
129 | } | ||
130 | |||
131 | static int ramdisk_readpage(struct file *file, struct page *page) | ||
132 | { | ||
133 | if (!PageUptodate(page)) | ||
134 | make_page_uptodate(page); | ||
135 | unlock_page(page); | ||
136 | return 0; | ||
137 | } | ||
138 | |||
139 | static int ramdisk_prepare_write(struct file *file, struct page *page, | ||
140 | unsigned offset, unsigned to) | ||
141 | { | ||
142 | if (!PageUptodate(page)) | ||
143 | make_page_uptodate(page); | ||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | static int ramdisk_commit_write(struct file *file, struct page *page, | ||
148 | unsigned offset, unsigned to) | ||
149 | { | ||
150 | set_page_dirty(page); | ||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * ->writepage to the blockdev's mapping has to redirty the page so that the | ||
156 | * VM doesn't go and steal it. We return AOP_WRITEPAGE_ACTIVATE so that the VM | ||
157 | * won't try to (pointlessly) write the page again for a while. | ||
158 | * | ||
159 | * Really, these pages should not be on the LRU at all. | ||
160 | */ | ||
161 | static int ramdisk_writepage(struct page *page, struct writeback_control *wbc) | ||
162 | { | ||
163 | if (!PageUptodate(page)) | ||
164 | make_page_uptodate(page); | ||
165 | SetPageDirty(page); | ||
166 | if (wbc->for_reclaim) | ||
167 | return AOP_WRITEPAGE_ACTIVATE; | ||
168 | unlock_page(page); | ||
169 | return 0; | ||
170 | } | ||
171 | |||
172 | /* | ||
173 | * This is a little speedup thing: short-circuit attempts to write back the | ||
174 | * ramdisk blockdev inode to its non-existent backing store. | ||
175 | */ | ||
176 | static int ramdisk_writepages(struct address_space *mapping, | ||
177 | struct writeback_control *wbc) | ||
178 | { | ||
179 | return 0; | ||
180 | } | ||
181 | |||
182 | /* | ||
183 | * ramdisk blockdev pages have their own ->set_page_dirty() because we don't | ||
184 | * want them to contribute to dirty memory accounting. | ||
185 | */ | ||
186 | static int ramdisk_set_page_dirty(struct page *page) | ||
187 | { | ||
188 | if (!TestSetPageDirty(page)) | ||
189 | return 1; | ||
190 | return 0; | ||
191 | } | ||
192 | |||
193 | /* | ||
194 | * releasepage is called by pagevec_strip/try_to_release_page if | ||
195 | * buffers_heads_over_limit is true. Without a releasepage function | ||
196 | * try_to_free_buffers is called instead. That can unset the dirty | ||
197 | * bit of our ram disk pages, which will be eventually freed, even | ||
198 | * if the page is still in use. | ||
199 | */ | ||
200 | static int ramdisk_releasepage(struct page *page, gfp_t dummy) | ||
201 | { | ||
202 | return 0; | ||
203 | } | ||
204 | |||
205 | static const struct address_space_operations ramdisk_aops = { | ||
206 | .readpage = ramdisk_readpage, | ||
207 | .prepare_write = ramdisk_prepare_write, | ||
208 | .commit_write = ramdisk_commit_write, | ||
209 | .writepage = ramdisk_writepage, | ||
210 | .set_page_dirty = ramdisk_set_page_dirty, | ||
211 | .writepages = ramdisk_writepages, | ||
212 | .releasepage = ramdisk_releasepage, | ||
213 | }; | ||
214 | |||
215 | static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector, | ||
216 | struct address_space *mapping) | ||
217 | { | ||
218 | pgoff_t index = sector >> (PAGE_CACHE_SHIFT - 9); | ||
219 | unsigned int vec_offset = vec->bv_offset; | ||
220 | int offset = (sector << 9) & ~PAGE_CACHE_MASK; | ||
221 | int size = vec->bv_len; | ||
222 | int err = 0; | ||
223 | |||
224 | do { | ||
225 | int count; | ||
226 | struct page *page; | ||
227 | char *src; | ||
228 | char *dst; | ||
229 | |||
230 | count = PAGE_CACHE_SIZE - offset; | ||
231 | if (count > size) | ||
232 | count = size; | ||
233 | size -= count; | ||
234 | |||
235 | page = grab_cache_page(mapping, index); | ||
236 | if (!page) { | ||
237 | err = -ENOMEM; | ||
238 | goto out; | ||
239 | } | ||
240 | |||
241 | if (!PageUptodate(page)) | ||
242 | make_page_uptodate(page); | ||
243 | |||
244 | index++; | ||
245 | |||
246 | if (rw == READ) { | ||
247 | src = kmap_atomic(page, KM_USER0) + offset; | ||
248 | dst = kmap_atomic(vec->bv_page, KM_USER1) + vec_offset; | ||
249 | } else { | ||
250 | src = kmap_atomic(vec->bv_page, KM_USER0) + vec_offset; | ||
251 | dst = kmap_atomic(page, KM_USER1) + offset; | ||
252 | } | ||
253 | offset = 0; | ||
254 | vec_offset += count; | ||
255 | |||
256 | memcpy(dst, src, count); | ||
257 | |||
258 | kunmap_atomic(src, KM_USER0); | ||
259 | kunmap_atomic(dst, KM_USER1); | ||
260 | |||
261 | if (rw == READ) | ||
262 | flush_dcache_page(vec->bv_page); | ||
263 | else | ||
264 | set_page_dirty(page); | ||
265 | unlock_page(page); | ||
266 | put_page(page); | ||
267 | } while (size); | ||
268 | |||
269 | out: | ||
270 | return err; | ||
271 | } | ||
272 | |||
273 | /* | ||
274 | * Basically, my strategy here is to set up a buffer-head which can't be | ||
275 | * deleted, and make that my Ramdisk. If the request is outside of the | ||
276 | * allocated size, we must get rid of it... | ||
277 | * | ||
278 | * 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Added devfs support | ||
279 | * | ||
280 | */ | ||
281 | static int rd_make_request(struct request_queue *q, struct bio *bio) | ||
282 | { | ||
283 | struct block_device *bdev = bio->bi_bdev; | ||
284 | struct address_space * mapping = bdev->bd_inode->i_mapping; | ||
285 | sector_t sector = bio->bi_sector; | ||
286 | unsigned long len = bio->bi_size >> 9; | ||
287 | int rw = bio_data_dir(bio); | ||
288 | struct bio_vec *bvec; | ||
289 | int ret = 0, i; | ||
290 | |||
291 | if (sector + len > get_capacity(bdev->bd_disk)) | ||
292 | goto fail; | ||
293 | |||
294 | if (rw==READA) | ||
295 | rw=READ; | ||
296 | |||
297 | bio_for_each_segment(bvec, bio, i) { | ||
298 | ret |= rd_blkdev_pagecache_IO(rw, bvec, sector, mapping); | ||
299 | sector += bvec->bv_len >> 9; | ||
300 | } | ||
301 | if (ret) | ||
302 | goto fail; | ||
303 | |||
304 | bio_endio(bio, 0); | ||
305 | return 0; | ||
306 | fail: | ||
307 | bio_io_error(bio); | ||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | static int rd_ioctl(struct inode *inode, struct file *file, | ||
312 | unsigned int cmd, unsigned long arg) | ||
313 | { | ||
314 | int error; | ||
315 | struct block_device *bdev = inode->i_bdev; | ||
316 | |||
317 | if (cmd != BLKFLSBUF) | ||
318 | return -ENOTTY; | ||
319 | |||
320 | /* | ||
321 | * special: we want to release the ramdisk memory, it's not like with | ||
322 | * the other blockdevices where this ioctl only flushes away the buffer | ||
323 | * cache | ||
324 | */ | ||
325 | error = -EBUSY; | ||
326 | mutex_lock(&bdev->bd_mutex); | ||
327 | if (bdev->bd_openers <= 2) { | ||
328 | truncate_inode_pages(bdev->bd_inode->i_mapping, 0); | ||
329 | error = 0; | ||
330 | } | ||
331 | mutex_unlock(&bdev->bd_mutex); | ||
332 | return error; | ||
333 | } | ||
334 | |||
335 | /* | ||
336 | * This is the backing_dev_info for the blockdev inode itself. It doesn't need | ||
337 | * writeback and it does not contribute to dirty memory accounting. | ||
338 | */ | ||
339 | static struct backing_dev_info rd_backing_dev_info = { | ||
340 | .ra_pages = 0, /* No readahead */ | ||
341 | .capabilities = BDI_CAP_NO_ACCT_DIRTY | BDI_CAP_NO_WRITEBACK | BDI_CAP_MAP_COPY, | ||
342 | .unplug_io_fn = default_unplug_io_fn, | ||
343 | }; | ||
344 | |||
345 | /* | ||
346 | * This is the backing_dev_info for the files which live atop the ramdisk | ||
347 | * "device". These files do need writeback and they do contribute to dirty | ||
348 | * memory accounting. | ||
349 | */ | ||
350 | static struct backing_dev_info rd_file_backing_dev_info = { | ||
351 | .ra_pages = 0, /* No readahead */ | ||
352 | .capabilities = BDI_CAP_MAP_COPY, /* Does contribute to dirty memory */ | ||
353 | .unplug_io_fn = default_unplug_io_fn, | ||
354 | }; | ||
355 | |||
356 | static int rd_open(struct inode *inode, struct file *filp) | ||
357 | { | ||
358 | unsigned unit = iminor(inode); | ||
359 | |||
360 | if (rd_bdev[unit] == NULL) { | ||
361 | struct block_device *bdev = inode->i_bdev; | ||
362 | struct address_space *mapping; | ||
363 | unsigned bsize; | ||
364 | gfp_t gfp_mask; | ||
365 | |||
366 | inode = igrab(bdev->bd_inode); | ||
367 | rd_bdev[unit] = bdev; | ||
368 | bdev->bd_openers++; | ||
369 | bsize = bdev_hardsect_size(bdev); | ||
370 | bdev->bd_block_size = bsize; | ||
371 | inode->i_blkbits = blksize_bits(bsize); | ||
372 | inode->i_size = get_capacity(bdev->bd_disk)<<9; | ||
373 | |||
374 | mapping = inode->i_mapping; | ||
375 | mapping->a_ops = &ramdisk_aops; | ||
376 | mapping->backing_dev_info = &rd_backing_dev_info; | ||
377 | bdev->bd_inode_backing_dev_info = &rd_file_backing_dev_info; | ||
378 | |||
379 | /* | ||
380 | * Deep badness. rd_blkdev_pagecache_IO() needs to allocate | ||
381 | * pagecache pages within a request_fn. We cannot recur back | ||
382 | * into the filesystem which is mounted atop the ramdisk, because | ||
383 | * that would deadlock on fs locks. And we really don't want | ||
384 | * to reenter rd_blkdev_pagecache_IO when we're already within | ||
385 | * that function. | ||
386 | * | ||
387 | * So we turn off __GFP_FS and __GFP_IO. | ||
388 | * | ||
389 | * And to give this thing a hope of working, turn on __GFP_HIGH. | ||
390 | * Hopefully, there's enough regular memory allocation going on | ||
391 | * for the page allocator emergency pools to keep the ramdisk | ||
392 | * driver happy. | ||
393 | */ | ||
394 | gfp_mask = mapping_gfp_mask(mapping); | ||
395 | gfp_mask &= ~(__GFP_FS|__GFP_IO); | ||
396 | gfp_mask |= __GFP_HIGH; | ||
397 | mapping_set_gfp_mask(mapping, gfp_mask); | ||
398 | } | ||
399 | |||
400 | return 0; | ||
401 | } | ||
402 | |||
403 | static struct block_device_operations rd_bd_op = { | ||
404 | .owner = THIS_MODULE, | ||
405 | .open = rd_open, | ||
406 | .ioctl = rd_ioctl, | ||
407 | }; | ||
408 | |||
409 | /* | ||
410 | * Before freeing the module, invalidate all of the protected buffers! | ||
411 | */ | ||
412 | static void __exit rd_cleanup(void) | ||
413 | { | ||
414 | int i; | ||
415 | |||
416 | for (i = 0; i < CONFIG_BLK_DEV_RAM_COUNT; i++) { | ||
417 | struct block_device *bdev = rd_bdev[i]; | ||
418 | rd_bdev[i] = NULL; | ||
419 | if (bdev) { | ||
420 | invalidate_bdev(bdev); | ||
421 | blkdev_put(bdev); | ||
422 | } | ||
423 | del_gendisk(rd_disks[i]); | ||
424 | put_disk(rd_disks[i]); | ||
425 | blk_cleanup_queue(rd_queue[i]); | ||
426 | } | ||
427 | unregister_blkdev(RAMDISK_MAJOR, "ramdisk"); | ||
428 | |||
429 | bdi_destroy(&rd_file_backing_dev_info); | ||
430 | bdi_destroy(&rd_backing_dev_info); | ||
431 | } | ||
432 | |||
433 | /* | ||
434 | * This is the registration and initialization section of the RAM disk driver | ||
435 | */ | ||
436 | static int __init rd_init(void) | ||
437 | { | ||
438 | int i; | ||
439 | int err; | ||
440 | |||
441 | err = bdi_init(&rd_backing_dev_info); | ||
442 | if (err) | ||
443 | goto out2; | ||
444 | |||
445 | err = bdi_init(&rd_file_backing_dev_info); | ||
446 | if (err) { | ||
447 | bdi_destroy(&rd_backing_dev_info); | ||
448 | goto out2; | ||
449 | } | ||
450 | |||
451 | err = -ENOMEM; | ||
452 | |||
453 | if (rd_blocksize > PAGE_SIZE || rd_blocksize < 512 || | ||
454 | !is_power_of_2(rd_blocksize)) { | ||
455 | printk("RAMDISK: wrong blocksize %d, reverting to defaults\n", | ||
456 | rd_blocksize); | ||
457 | rd_blocksize = BLOCK_SIZE; | ||
458 | } | ||
459 | |||
460 | for (i = 0; i < CONFIG_BLK_DEV_RAM_COUNT; i++) { | ||
461 | rd_disks[i] = alloc_disk(1); | ||
462 | if (!rd_disks[i]) | ||
463 | goto out; | ||
464 | |||
465 | rd_queue[i] = blk_alloc_queue(GFP_KERNEL); | ||
466 | if (!rd_queue[i]) { | ||
467 | put_disk(rd_disks[i]); | ||
468 | goto out; | ||
469 | } | ||
470 | } | ||
471 | |||
472 | if (register_blkdev(RAMDISK_MAJOR, "ramdisk")) { | ||
473 | err = -EIO; | ||
474 | goto out; | ||
475 | } | ||
476 | |||
477 | for (i = 0; i < CONFIG_BLK_DEV_RAM_COUNT; i++) { | ||
478 | struct gendisk *disk = rd_disks[i]; | ||
479 | |||
480 | blk_queue_make_request(rd_queue[i], &rd_make_request); | ||
481 | blk_queue_hardsect_size(rd_queue[i], rd_blocksize); | ||
482 | |||
483 | /* rd_size is given in kB */ | ||
484 | disk->major = RAMDISK_MAJOR; | ||
485 | disk->first_minor = i; | ||
486 | disk->fops = &rd_bd_op; | ||
487 | disk->queue = rd_queue[i]; | ||
488 | disk->flags |= GENHD_FL_SUPPRESS_PARTITION_INFO; | ||
489 | sprintf(disk->disk_name, "ram%d", i); | ||
490 | set_capacity(disk, rd_size * 2); | ||
491 | add_disk(rd_disks[i]); | ||
492 | } | ||
493 | |||
494 | /* rd_size is given in kB */ | ||
495 | printk("RAMDISK driver initialized: " | ||
496 | "%d RAM disks of %dK size %d blocksize\n", | ||
497 | CONFIG_BLK_DEV_RAM_COUNT, rd_size, rd_blocksize); | ||
498 | |||
499 | return 0; | ||
500 | out: | ||
501 | while (i--) { | ||
502 | put_disk(rd_disks[i]); | ||
503 | blk_cleanup_queue(rd_queue[i]); | ||
504 | } | ||
505 | bdi_destroy(&rd_backing_dev_info); | ||
506 | bdi_destroy(&rd_file_backing_dev_info); | ||
507 | out2: | ||
508 | return err; | ||
509 | } | ||
510 | |||
511 | module_init(rd_init); | ||
512 | module_exit(rd_cleanup); | ||
513 | |||
514 | /* options - nonmodular */ | ||
515 | #ifndef MODULE | ||
516 | static int __init ramdisk_size(char *str) | ||
517 | { | ||
518 | rd_size = simple_strtol(str,NULL,0); | ||
519 | return 1; | ||
520 | } | ||
521 | static int __init ramdisk_blocksize(char *str) | ||
522 | { | ||
523 | rd_blocksize = simple_strtol(str,NULL,0); | ||
524 | return 1; | ||
525 | } | ||
526 | __setup("ramdisk_size=", ramdisk_size); | ||
527 | __setup("ramdisk_blocksize=", ramdisk_blocksize); | ||
528 | #endif | ||
529 | |||
530 | /* options - modular */ | ||
531 | module_param(rd_size, int, 0); | ||
532 | MODULE_PARM_DESC(rd_size, "Size of each RAM disk in kbytes."); | ||
533 | module_param(rd_blocksize, int, 0); | ||
534 | MODULE_PARM_DESC(rd_blocksize, "Blocksize of each RAM disk in bytes."); | ||
535 | MODULE_ALIAS_BLOCKDEV_MAJOR(RAMDISK_MAJOR); | ||
536 | |||
537 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c index 1f0b752e5de1..a7c4990b5b6b 100644 --- a/drivers/char/applicom.c +++ b/drivers/char/applicom.c | |||
@@ -57,7 +57,6 @@ | |||
57 | #define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002 | 57 | #define PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN 0x0002 |
58 | #define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003 | 58 | #define PCI_DEVICE_ID_APPLICOM_PCI2000PFB 0x0003 |
59 | #endif | 59 | #endif |
60 | #define MAX_PCI_DEVICE_NUM 3 | ||
61 | 60 | ||
62 | static char *applicom_pci_devnames[] = { | 61 | static char *applicom_pci_devnames[] = { |
63 | "PCI board", | 62 | "PCI board", |
@@ -66,12 +65,9 @@ static char *applicom_pci_devnames[] = { | |||
66 | }; | 65 | }; |
67 | 66 | ||
68 | static struct pci_device_id applicom_pci_tbl[] = { | 67 | static struct pci_device_id applicom_pci_tbl[] = { |
69 | { PCI_VENDOR_ID_APPLICOM, PCI_DEVICE_ID_APPLICOM_PCIGENERIC, | 68 | { PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCIGENERIC) }, |
70 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | 69 | { PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN) }, |
71 | { PCI_VENDOR_ID_APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000IBS_CAN, | 70 | { PCI_VDEVICE(APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000PFB) }, |
72 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | ||
73 | { PCI_VENDOR_ID_APPLICOM, PCI_DEVICE_ID_APPLICOM_PCI2000PFB, | ||
74 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, | ||
75 | { 0 } | 71 | { 0 } |
76 | }; | 72 | }; |
77 | MODULE_DEVICE_TABLE(pci, applicom_pci_tbl); | 73 | MODULE_DEVICE_TABLE(pci, applicom_pci_tbl); |
@@ -197,31 +193,29 @@ static int __init applicom_init(void) | |||
197 | 193 | ||
198 | while ( (dev = pci_get_class(PCI_CLASS_OTHERS << 16, dev))) { | 194 | while ( (dev = pci_get_class(PCI_CLASS_OTHERS << 16, dev))) { |
199 | 195 | ||
200 | if (dev->vendor != PCI_VENDOR_ID_APPLICOM) | 196 | if (!pci_match_id(applicom_pci_tbl, dev)) |
201 | continue; | ||
202 | |||
203 | if (dev->device > MAX_PCI_DEVICE_NUM || dev->device == 0) | ||
204 | continue; | 197 | continue; |
205 | 198 | ||
206 | if (pci_enable_device(dev)) | 199 | if (pci_enable_device(dev)) |
207 | return -EIO; | 200 | return -EIO; |
208 | 201 | ||
209 | RamIO = ioremap(dev->resource[0].start, LEN_RAM_IO); | 202 | RamIO = ioremap(pci_resource_start(dev, 0), LEN_RAM_IO); |
210 | 203 | ||
211 | if (!RamIO) { | 204 | if (!RamIO) { |
212 | printk(KERN_INFO "ac.o: Failed to ioremap PCI memory " | 205 | printk(KERN_INFO "ac.o: Failed to ioremap PCI memory " |
213 | "space at 0x%llx\n", | 206 | "space at 0x%llx\n", |
214 | (unsigned long long)dev->resource[0].start); | 207 | (unsigned long long)pci_resource_start(dev, 0)); |
215 | pci_disable_device(dev); | 208 | pci_disable_device(dev); |
216 | return -EIO; | 209 | return -EIO; |
217 | } | 210 | } |
218 | 211 | ||
219 | printk(KERN_INFO "Applicom %s found at mem 0x%llx, irq %d\n", | 212 | printk(KERN_INFO "Applicom %s found at mem 0x%llx, irq %d\n", |
220 | applicom_pci_devnames[dev->device-1], | 213 | applicom_pci_devnames[dev->device-1], |
221 | (unsigned long long)dev->resource[0].start, | 214 | (unsigned long long)pci_resource_start(dev, 0), |
222 | dev->irq); | 215 | dev->irq); |
223 | 216 | ||
224 | boardno = ac_register_board(dev->resource[0].start, RamIO,0); | 217 | boardno = ac_register_board(pci_resource_start(dev, 0), |
218 | RamIO, 0); | ||
225 | if (!boardno) { | 219 | if (!boardno) { |
226 | printk(KERN_INFO "ac.o: PCI Applicom device doesn't have correct signature.\n"); | 220 | printk(KERN_INFO "ac.o: PCI Applicom device doesn't have correct signature.\n"); |
227 | iounmap(RamIO); | 221 | iounmap(RamIO); |
diff --git a/drivers/char/moxa.c b/drivers/char/moxa.c index 2fc255a21486..64b7b2b18352 100644 --- a/drivers/char/moxa.c +++ b/drivers/char/moxa.c | |||
@@ -207,7 +207,7 @@ static int moxa_tiocmget(struct tty_struct *tty, struct file *file); | |||
207 | static int moxa_tiocmset(struct tty_struct *tty, struct file *file, | 207 | static int moxa_tiocmset(struct tty_struct *tty, struct file *file, |
208 | unsigned int set, unsigned int clear); | 208 | unsigned int set, unsigned int clear); |
209 | static void moxa_poll(unsigned long); | 209 | static void moxa_poll(unsigned long); |
210 | static void moxa_set_tty_param(struct tty_struct *); | 210 | static void moxa_set_tty_param(struct tty_struct *, struct ktermios *); |
211 | static int moxa_block_till_ready(struct tty_struct *, struct file *, | 211 | static int moxa_block_till_ready(struct tty_struct *, struct file *, |
212 | struct moxa_port *); | 212 | struct moxa_port *); |
213 | static void moxa_setup_empty_event(struct tty_struct *); | 213 | static void moxa_setup_empty_event(struct tty_struct *); |
@@ -500,7 +500,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp) | |||
500 | ch->tty = tty; | 500 | ch->tty = tty; |
501 | if (!(ch->asyncflags & ASYNC_INITIALIZED)) { | 501 | if (!(ch->asyncflags & ASYNC_INITIALIZED)) { |
502 | ch->statusflags = 0; | 502 | ch->statusflags = 0; |
503 | moxa_set_tty_param(tty); | 503 | moxa_set_tty_param(tty, tty->termios); |
504 | MoxaPortLineCtrl(ch->port, 1, 1); | 504 | MoxaPortLineCtrl(ch->port, 1, 1); |
505 | MoxaPortEnable(ch->port); | 505 | MoxaPortEnable(ch->port); |
506 | ch->asyncflags |= ASYNC_INITIALIZED; | 506 | ch->asyncflags |= ASYNC_INITIALIZED; |
@@ -803,7 +803,7 @@ static void moxa_set_termios(struct tty_struct *tty, | |||
803 | 803 | ||
804 | if (ch == NULL) | 804 | if (ch == NULL) |
805 | return; | 805 | return; |
806 | moxa_set_tty_param(tty); | 806 | moxa_set_tty_param(tty, old_termios); |
807 | if (!(old_termios->c_cflag & CLOCAL) && | 807 | if (!(old_termios->c_cflag & CLOCAL) && |
808 | (tty->termios->c_cflag & CLOCAL)) | 808 | (tty->termios->c_cflag & CLOCAL)) |
809 | wake_up_interruptible(&ch->open_wait); | 809 | wake_up_interruptible(&ch->open_wait); |
@@ -903,11 +903,11 @@ static void moxa_poll(unsigned long ignored) | |||
903 | 903 | ||
904 | /******************************************************************************/ | 904 | /******************************************************************************/ |
905 | 905 | ||
906 | static void moxa_set_tty_param(struct tty_struct *tty) | 906 | static void moxa_set_tty_param(struct tty_struct *tty, struct ktermios *old_termios) |
907 | { | 907 | { |
908 | register struct ktermios *ts; | 908 | register struct ktermios *ts; |
909 | struct moxa_port *ch; | 909 | struct moxa_port *ch; |
910 | int rts, cts, txflow, rxflow, xany; | 910 | int rts, cts, txflow, rxflow, xany, baud; |
911 | 911 | ||
912 | ch = (struct moxa_port *) tty->driver_data; | 912 | ch = (struct moxa_port *) tty->driver_data; |
913 | ts = tty->termios; | 913 | ts = tty->termios; |
@@ -924,8 +924,15 @@ static void moxa_set_tty_param(struct tty_struct *tty) | |||
924 | rxflow = 1; | 924 | rxflow = 1; |
925 | if (ts->c_iflag & IXANY) | 925 | if (ts->c_iflag & IXANY) |
926 | xany = 1; | 926 | xany = 1; |
927 | |||
928 | /* Clear the features we don't support */ | ||
929 | ts->c_cflag &= ~CMSPAR; | ||
927 | MoxaPortFlowCtrl(ch->port, rts, cts, txflow, rxflow, xany); | 930 | MoxaPortFlowCtrl(ch->port, rts, cts, txflow, rxflow, xany); |
928 | MoxaPortSetTermio(ch->port, ts, tty_get_baud_rate(tty)); | 931 | baud = MoxaPortSetTermio(ch->port, ts, tty_get_baud_rate(tty)); |
932 | if (baud == -1) | ||
933 | baud = tty_termios_baud_rate(old_termios); | ||
934 | /* Not put the baud rate into the termios data */ | ||
935 | tty_encode_baud_rate(tty, baud, baud); | ||
929 | } | 936 | } |
930 | 937 | ||
931 | static int moxa_block_till_ready(struct tty_struct *tty, struct file *filp, | 938 | static int moxa_block_till_ready(struct tty_struct *tty, struct file *filp, |
@@ -2065,7 +2072,7 @@ int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud) | |||
2065 | if (baud >= 921600L) | 2072 | if (baud >= 921600L) |
2066 | return (-1); | 2073 | return (-1); |
2067 | } | 2074 | } |
2068 | MoxaPortSetBaud(port, baud); | 2075 | baud = MoxaPortSetBaud(port, baud); |
2069 | 2076 | ||
2070 | if (termio->c_iflag & (IXON | IXOFF | IXANY)) { | 2077 | if (termio->c_iflag & (IXON | IXOFF | IXANY)) { |
2071 | writeb(termio->c_cc[VSTART], ofsAddr + FuncArg); | 2078 | writeb(termio->c_cc[VSTART], ofsAddr + FuncArg); |
@@ -2074,7 +2081,7 @@ int MoxaPortSetTermio(int port, struct ktermios *termio, speed_t baud) | |||
2074 | moxa_wait_finish(ofsAddr); | 2081 | moxa_wait_finish(ofsAddr); |
2075 | 2082 | ||
2076 | } | 2083 | } |
2077 | return (0); | 2084 | return (baud); |
2078 | } | 2085 | } |
2079 | 2086 | ||
2080 | int MoxaPortGetLineOut(int port, int *dtrState, int *rtsState) | 2087 | int MoxaPortGetLineOut(int port, int *dtrState, int *rtsState) |
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c index 90c3969012a3..46b2a1cc8b54 100644 --- a/drivers/char/n_tty.c +++ b/drivers/char/n_tty.c | |||
@@ -1,6 +1,6 @@ | |||
1 | /* | 1 | /* |
2 | * n_tty.c --- implements the N_TTY line discipline. | 2 | * n_tty.c --- implements the N_TTY line discipline. |
3 | * | 3 | * |
4 | * This code used to be in tty_io.c, but things are getting hairy | 4 | * This code used to be in tty_io.c, but things are getting hairy |
5 | * enough that it made sense to split things off. (The N_TTY | 5 | * enough that it made sense to split things off. (The N_TTY |
6 | * processing has changed so much that it's hardly recognizable, | 6 | * processing has changed so much that it's hardly recognizable, |
@@ -8,19 +8,19 @@ | |||
8 | * | 8 | * |
9 | * Note that the open routine for N_TTY is guaranteed never to return | 9 | * Note that the open routine for N_TTY is guaranteed never to return |
10 | * an error. This is because Linux will fall back to setting a line | 10 | * an error. This is because Linux will fall back to setting a line |
11 | * to N_TTY if it can not switch to any other line discipline. | 11 | * to N_TTY if it can not switch to any other line discipline. |
12 | * | 12 | * |
13 | * Written by Theodore Ts'o, Copyright 1994. | 13 | * Written by Theodore Ts'o, Copyright 1994. |
14 | * | 14 | * |
15 | * This file also contains code originally written by Linus Torvalds, | 15 | * This file also contains code originally written by Linus Torvalds, |
16 | * Copyright 1991, 1992, 1993, and by Julian Cowley, Copyright 1994. | 16 | * Copyright 1991, 1992, 1993, and by Julian Cowley, Copyright 1994. |
17 | * | 17 | * |
18 | * This file may be redistributed under the terms of the GNU General Public | 18 | * This file may be redistributed under the terms of the GNU General Public |
19 | * License. | 19 | * License. |
20 | * | 20 | * |
21 | * Reduced memory usage for older ARM systems - Russell King. | 21 | * Reduced memory usage for older ARM systems - Russell King. |
22 | * | 22 | * |
23 | * 2000/01/20 Fixed SMP locking on put_tty_queue using bits of | 23 | * 2000/01/20 Fixed SMP locking on put_tty_queue using bits of |
24 | * the patch by Andrew J. Kroll <ag784@freenet.buffalo.edu> | 24 | * the patch by Andrew J. Kroll <ag784@freenet.buffalo.edu> |
25 | * who actually finally proved there really was a race. | 25 | * who actually finally proved there really was a race. |
26 | * | 26 | * |
@@ -144,11 +144,11 @@ static void put_tty_queue(unsigned char c, struct tty_struct *tty) | |||
144 | * Can sleep, may be called under the atomic_read_lock mutex but | 144 | * Can sleep, may be called under the atomic_read_lock mutex but |
145 | * this is not guaranteed. | 145 | * this is not guaranteed. |
146 | */ | 146 | */ |
147 | 147 | ||
148 | static void check_unthrottle(struct tty_struct * tty) | 148 | static void check_unthrottle(struct tty_struct *tty) |
149 | { | 149 | { |
150 | if (tty->count && | 150 | if (tty->count && |
151 | test_and_clear_bit(TTY_THROTTLED, &tty->flags) && | 151 | test_and_clear_bit(TTY_THROTTLED, &tty->flags) && |
152 | tty->driver->unthrottle) | 152 | tty->driver->unthrottle) |
153 | tty->driver->unthrottle(tty); | 153 | tty->driver->unthrottle(tty); |
154 | } | 154 | } |
@@ -157,7 +157,7 @@ static void check_unthrottle(struct tty_struct * tty) | |||
157 | * reset_buffer_flags - reset buffer state | 157 | * reset_buffer_flags - reset buffer state |
158 | * @tty: terminal to reset | 158 | * @tty: terminal to reset |
159 | * | 159 | * |
160 | * Reset the read buffer counters, clear the flags, | 160 | * Reset the read buffer counters, clear the flags, |
161 | * and make sure the driver is unthrottled. Called | 161 | * and make sure the driver is unthrottled. Called |
162 | * from n_tty_open() and n_tty_flush_buffer(). | 162 | * from n_tty_open() and n_tty_flush_buffer(). |
163 | */ | 163 | */ |
@@ -186,12 +186,12 @@ static void reset_buffer_flags(struct tty_struct *tty) | |||
186 | * FIXME: tty->ctrl_status is not spinlocked and relies on | 186 | * FIXME: tty->ctrl_status is not spinlocked and relies on |
187 | * lock_kernel() still. | 187 | * lock_kernel() still. |
188 | */ | 188 | */ |
189 | 189 | ||
190 | static void n_tty_flush_buffer(struct tty_struct * tty) | 190 | static void n_tty_flush_buffer(struct tty_struct *tty) |
191 | { | 191 | { |
192 | /* clear everything and unthrottle the driver */ | 192 | /* clear everything and unthrottle the driver */ |
193 | reset_buffer_flags(tty); | 193 | reset_buffer_flags(tty); |
194 | 194 | ||
195 | if (!tty->link) | 195 | if (!tty->link) |
196 | return; | 196 | return; |
197 | 197 | ||
@@ -206,9 +206,9 @@ static void n_tty_flush_buffer(struct tty_struct * tty) | |||
206 | * @tty: tty device | 206 | * @tty: tty device |
207 | * | 207 | * |
208 | * Report the number of characters buffered to be delivered to user | 208 | * Report the number of characters buffered to be delivered to user |
209 | * at this instant in time. | 209 | * at this instant in time. |
210 | */ | 210 | */ |
211 | 211 | ||
212 | static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty) | 212 | static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty) |
213 | { | 213 | { |
214 | unsigned long flags; | 214 | unsigned long flags; |
@@ -234,7 +234,7 @@ static ssize_t n_tty_chars_in_buffer(struct tty_struct *tty) | |||
234 | * character. We use this to correctly compute the on screen size | 234 | * character. We use this to correctly compute the on screen size |
235 | * of the character when printing | 235 | * of the character when printing |
236 | */ | 236 | */ |
237 | 237 | ||
238 | static inline int is_utf8_continuation(unsigned char c) | 238 | static inline int is_utf8_continuation(unsigned char c) |
239 | { | 239 | { |
240 | return (c & 0xc0) == 0x80; | 240 | return (c & 0xc0) == 0x80; |
@@ -247,7 +247,7 @@ static inline int is_utf8_continuation(unsigned char c) | |||
247 | * Returns true if the utf8 character 'c' is a multibyte continuation | 247 | * Returns true if the utf8 character 'c' is a multibyte continuation |
248 | * character and the terminal is in unicode mode. | 248 | * character and the terminal is in unicode mode. |
249 | */ | 249 | */ |
250 | 250 | ||
251 | static inline int is_continuation(unsigned char c, struct tty_struct *tty) | 251 | static inline int is_continuation(unsigned char c, struct tty_struct *tty) |
252 | { | 252 | { |
253 | return I_IUTF8(tty) && is_utf8_continuation(c); | 253 | return I_IUTF8(tty) && is_utf8_continuation(c); |
@@ -266,7 +266,7 @@ static inline int is_continuation(unsigned char c, struct tty_struct *tty) | |||
266 | * Called from both the receive and transmit sides and can be called | 266 | * Called from both the receive and transmit sides and can be called |
267 | * re-entrantly. Relies on lock_kernel() still. | 267 | * re-entrantly. Relies on lock_kernel() still. |
268 | */ | 268 | */ |
269 | 269 | ||
270 | static int opost(unsigned char c, struct tty_struct *tty) | 270 | static int opost(unsigned char c, struct tty_struct *tty) |
271 | { | 271 | { |
272 | int space, spaces; | 272 | int space, spaces; |
@@ -339,9 +339,9 @@ static int opost(unsigned char c, struct tty_struct *tty) | |||
339 | * | 339 | * |
340 | * Called from write_chan under the tty layer write lock. | 340 | * Called from write_chan under the tty layer write lock. |
341 | */ | 341 | */ |
342 | 342 | ||
343 | static ssize_t opost_block(struct tty_struct * tty, | 343 | static ssize_t opost_block(struct tty_struct *tty, |
344 | const unsigned char * buf, unsigned int nr) | 344 | const unsigned char *buf, unsigned int nr) |
345 | { | 345 | { |
346 | int space; | 346 | int space; |
347 | int i; | 347 | int i; |
@@ -386,7 +386,7 @@ static ssize_t opost_block(struct tty_struct * tty, | |||
386 | break_out: | 386 | break_out: |
387 | if (tty->driver->flush_chars) | 387 | if (tty->driver->flush_chars) |
388 | tty->driver->flush_chars(tty); | 388 | tty->driver->flush_chars(tty); |
389 | i = tty->driver->write(tty, buf, i); | 389 | i = tty->driver->write(tty, buf, i); |
390 | return i; | 390 | return i; |
391 | } | 391 | } |
392 | 392 | ||
@@ -398,7 +398,7 @@ break_out: | |||
398 | * | 398 | * |
399 | * Queue a byte to the driver layer for output | 399 | * Queue a byte to the driver layer for output |
400 | */ | 400 | */ |
401 | 401 | ||
402 | static inline void put_char(unsigned char c, struct tty_struct *tty) | 402 | static inline void put_char(unsigned char c, struct tty_struct *tty) |
403 | { | 403 | { |
404 | tty->driver->put_char(tty, c); | 404 | tty->driver->put_char(tty, c); |
@@ -409,7 +409,7 @@ static inline void put_char(unsigned char c, struct tty_struct *tty) | |||
409 | * @c: unicode byte to echo | 409 | * @c: unicode byte to echo |
410 | * @tty: terminal device | 410 | * @tty: terminal device |
411 | * | 411 | * |
412 | * Echo user input back onto the screen. This must be called only when | 412 | * Echo user input back onto the screen. This must be called only when |
413 | * L_ECHO(tty) is true. Called from the driver receive_buf path. | 413 | * L_ECHO(tty) is true. Called from the driver receive_buf path. |
414 | */ | 414 | */ |
415 | 415 | ||
@@ -441,7 +441,7 @@ static inline void finish_erasing(struct tty_struct *tty) | |||
441 | * present in the stream from the driver layer. Handles the complexities | 441 | * present in the stream from the driver layer. Handles the complexities |
442 | * of UTF-8 multibyte symbols. | 442 | * of UTF-8 multibyte symbols. |
443 | */ | 443 | */ |
444 | 444 | ||
445 | static void eraser(unsigned char c, struct tty_struct *tty) | 445 | static void eraser(unsigned char c, struct tty_struct *tty) |
446 | { | 446 | { |
447 | enum { ERASE, WERASE, KILL } kill_type; | 447 | enum { ERASE, WERASE, KILL } kill_type; |
@@ -541,7 +541,7 @@ static void eraser(unsigned char c, struct tty_struct *tty) | |||
541 | 541 | ||
542 | /* should never happen */ | 542 | /* should never happen */ |
543 | if (tty->column > 0x80000000) | 543 | if (tty->column > 0x80000000) |
544 | tty->column = 0; | 544 | tty->column = 0; |
545 | 545 | ||
546 | /* Now backup to that column. */ | 546 | /* Now backup to that column. */ |
547 | while (tty->column > col) { | 547 | while (tty->column > col) { |
@@ -585,7 +585,7 @@ static void eraser(unsigned char c, struct tty_struct *tty) | |||
585 | * settings and character used. Called from the driver receive_buf | 585 | * settings and character used. Called from the driver receive_buf |
586 | * path so serialized. | 586 | * path so serialized. |
587 | */ | 587 | */ |
588 | 588 | ||
589 | static inline void isig(int sig, struct tty_struct *tty, int flush) | 589 | static inline void isig(int sig, struct tty_struct *tty, int flush) |
590 | { | 590 | { |
591 | if (tty->pgrp) | 591 | if (tty->pgrp) |
@@ -606,7 +606,7 @@ static inline void isig(int sig, struct tty_struct *tty, int flush) | |||
606 | * | 606 | * |
607 | * Called from the receive_buf path so single threaded. | 607 | * Called from the receive_buf path so single threaded. |
608 | */ | 608 | */ |
609 | 609 | ||
610 | static inline void n_tty_receive_break(struct tty_struct *tty) | 610 | static inline void n_tty_receive_break(struct tty_struct *tty) |
611 | { | 611 | { |
612 | if (I_IGNBRK(tty)) | 612 | if (I_IGNBRK(tty)) |
@@ -635,7 +635,7 @@ static inline void n_tty_receive_break(struct tty_struct *tty) | |||
635 | * need locking as num_overrun and overrun_time are function | 635 | * need locking as num_overrun and overrun_time are function |
636 | * private. | 636 | * private. |
637 | */ | 637 | */ |
638 | 638 | ||
639 | static inline void n_tty_receive_overrun(struct tty_struct *tty) | 639 | static inline void n_tty_receive_overrun(struct tty_struct *tty) |
640 | { | 640 | { |
641 | char buf[64]; | 641 | char buf[64]; |
@@ -662,9 +662,8 @@ static inline void n_tty_receive_overrun(struct tty_struct *tty) | |||
662 | static inline void n_tty_receive_parity_error(struct tty_struct *tty, | 662 | static inline void n_tty_receive_parity_error(struct tty_struct *tty, |
663 | unsigned char c) | 663 | unsigned char c) |
664 | { | 664 | { |
665 | if (I_IGNPAR(tty)) { | 665 | if (I_IGNPAR(tty)) |
666 | return; | 666 | return; |
667 | } | ||
668 | if (I_PARMRK(tty)) { | 667 | if (I_PARMRK(tty)) { |
669 | put_tty_queue('\377', tty); | 668 | put_tty_queue('\377', tty); |
670 | put_tty_queue('\0', tty); | 669 | put_tty_queue('\0', tty); |
@@ -682,7 +681,7 @@ static inline void n_tty_receive_parity_error(struct tty_struct *tty, | |||
682 | * @c: character | 681 | * @c: character |
683 | * | 682 | * |
684 | * Process an individual character of input received from the driver. | 683 | * Process an individual character of input received from the driver. |
685 | * This is serialized with respect to itself by the rules for the | 684 | * This is serialized with respect to itself by the rules for the |
686 | * driver above. | 685 | * driver above. |
687 | */ | 686 | */ |
688 | 687 | ||
@@ -694,7 +693,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) | |||
694 | put_tty_queue(c, tty); | 693 | put_tty_queue(c, tty); |
695 | return; | 694 | return; |
696 | } | 695 | } |
697 | 696 | ||
698 | if (I_ISTRIP(tty)) | 697 | if (I_ISTRIP(tty)) |
699 | c &= 0x7f; | 698 | c &= 0x7f; |
700 | if (I_IUCLC(tty) && L_IEXTEN(tty)) | 699 | if (I_IUCLC(tty) && L_IEXTEN(tty)) |
@@ -739,7 +738,7 @@ static inline void n_tty_receive_char(struct tty_struct *tty, unsigned char c) | |||
739 | put_tty_queue(c, tty); | 738 | put_tty_queue(c, tty); |
740 | return; | 739 | return; |
741 | } | 740 | } |
742 | 741 | ||
743 | if (c == '\r') { | 742 | if (c == '\r') { |
744 | if (I_IGNCR(tty)) | 743 | if (I_IGNCR(tty)) |
745 | return; | 744 | return; |
@@ -825,8 +824,8 @@ send_signal: | |||
825 | goto handle_newline; | 824 | goto handle_newline; |
826 | } | 825 | } |
827 | if (c == EOF_CHAR(tty)) { | 826 | if (c == EOF_CHAR(tty)) { |
828 | if (tty->canon_head != tty->read_head) | 827 | if (tty->canon_head != tty->read_head) |
829 | set_bit(TTY_PUSH, &tty->flags); | 828 | set_bit(TTY_PUSH, &tty->flags); |
830 | c = __DISABLED_CHAR; | 829 | c = __DISABLED_CHAR; |
831 | goto handle_newline; | 830 | goto handle_newline; |
832 | } | 831 | } |
@@ -850,7 +849,7 @@ send_signal: | |||
850 | if (I_PARMRK(tty) && c == (unsigned char) '\377') | 849 | if (I_PARMRK(tty) && c == (unsigned char) '\377') |
851 | put_tty_queue(c, tty); | 850 | put_tty_queue(c, tty); |
852 | 851 | ||
853 | handle_newline: | 852 | handle_newline: |
854 | spin_lock_irqsave(&tty->read_lock, flags); | 853 | spin_lock_irqsave(&tty->read_lock, flags); |
855 | set_bit(tty->read_head, tty->read_flags); | 854 | set_bit(tty->read_head, tty->read_flags); |
856 | put_tty_queue_nolock(c, tty); | 855 | put_tty_queue_nolock(c, tty); |
@@ -863,7 +862,7 @@ send_signal: | |||
863 | return; | 862 | return; |
864 | } | 863 | } |
865 | } | 864 | } |
866 | 865 | ||
867 | finish_erasing(tty); | 866 | finish_erasing(tty); |
868 | if (L_ECHO(tty)) { | 867 | if (L_ECHO(tty)) { |
869 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) { | 868 | if (tty->read_cnt >= N_TTY_BUF_SIZE-1) { |
@@ -884,7 +883,7 @@ send_signal: | |||
884 | put_tty_queue(c, tty); | 883 | put_tty_queue(c, tty); |
885 | 884 | ||
886 | put_tty_queue(c, tty); | 885 | put_tty_queue(c, tty); |
887 | } | 886 | } |
888 | 887 | ||
889 | 888 | ||
890 | /** | 889 | /** |
@@ -898,12 +897,10 @@ send_signal: | |||
898 | 897 | ||
899 | static void n_tty_write_wakeup(struct tty_struct *tty) | 898 | static void n_tty_write_wakeup(struct tty_struct *tty) |
900 | { | 899 | { |
901 | if (tty->fasync) | 900 | if (tty->fasync) { |
902 | { | 901 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); |
903 | set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); | ||
904 | kill_fasync(&tty->fasync, SIGIO, POLL_OUT); | 902 | kill_fasync(&tty->fasync, SIGIO, POLL_OUT); |
905 | } | 903 | } |
906 | return; | ||
907 | } | 904 | } |
908 | 905 | ||
909 | /** | 906 | /** |
@@ -918,7 +915,7 @@ static void n_tty_write_wakeup(struct tty_struct *tty) | |||
918 | * not from interrupt context. The driver is responsible for making | 915 | * not from interrupt context. The driver is responsible for making |
919 | * calls one at a time and in order (or using flush_to_ldisc) | 916 | * calls one at a time and in order (or using flush_to_ldisc) |
920 | */ | 917 | */ |
921 | 918 | ||
922 | static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, | 919 | static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, |
923 | char *fp, int count) | 920 | char *fp, int count) |
924 | { | 921 | { |
@@ -950,7 +947,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, | |||
950 | tty->read_cnt += i; | 947 | tty->read_cnt += i; |
951 | spin_unlock_irqrestore(&tty->read_lock, cpuflags); | 948 | spin_unlock_irqrestore(&tty->read_lock, cpuflags); |
952 | } else { | 949 | } else { |
953 | for (i=count, p = cp, f = fp; i; i--, p++) { | 950 | for (i = count, p = cp, f = fp; i; i--, p++) { |
954 | if (f) | 951 | if (f) |
955 | flags = *f++; | 952 | flags = *f++; |
956 | switch (flags) { | 953 | switch (flags) { |
@@ -968,7 +965,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, | |||
968 | n_tty_receive_overrun(tty); | 965 | n_tty_receive_overrun(tty); |
969 | break; | 966 | break; |
970 | default: | 967 | default: |
971 | printk("%s: unknown flag %d\n", | 968 | printk(KERN_ERR "%s: unknown flag %d\n", |
972 | tty_name(tty, buf), flags); | 969 | tty_name(tty, buf), flags); |
973 | break; | 970 | break; |
974 | } | 971 | } |
@@ -1001,7 +998,7 @@ static void n_tty_receive_buf(struct tty_struct *tty, const unsigned char *cp, | |||
1001 | int is_ignored(int sig) | 998 | int is_ignored(int sig) |
1002 | { | 999 | { |
1003 | return (sigismember(¤t->blocked, sig) || | 1000 | return (sigismember(¤t->blocked, sig) || |
1004 | current->sighand->action[sig-1].sa.sa_handler == SIG_IGN); | 1001 | current->sighand->action[sig-1].sa.sa_handler == SIG_IGN); |
1005 | } | 1002 | } |
1006 | 1003 | ||
1007 | /** | 1004 | /** |
@@ -1011,16 +1008,16 @@ int is_ignored(int sig) | |||
1011 | * | 1008 | * |
1012 | * Called by the tty layer when the user changes termios flags so | 1009 | * Called by the tty layer when the user changes termios flags so |
1013 | * that the line discipline can plan ahead. This function cannot sleep | 1010 | * that the line discipline can plan ahead. This function cannot sleep |
1014 | * and is protected from re-entry by the tty layer. The user is | 1011 | * and is protected from re-entry by the tty layer. The user is |
1015 | * guaranteed that this function will not be re-entered or in progress | 1012 | * guaranteed that this function will not be re-entered or in progress |
1016 | * when the ldisc is closed. | 1013 | * when the ldisc is closed. |
1017 | */ | 1014 | */ |
1018 | 1015 | ||
1019 | static void n_tty_set_termios(struct tty_struct *tty, struct ktermios * old) | 1016 | static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) |
1020 | { | 1017 | { |
1021 | if (!tty) | 1018 | if (!tty) |
1022 | return; | 1019 | return; |
1023 | 1020 | ||
1024 | tty->icanon = (L_ICANON(tty) != 0); | 1021 | tty->icanon = (L_ICANON(tty) != 0); |
1025 | if (test_bit(TTY_HW_COOK_IN, &tty->flags)) { | 1022 | if (test_bit(TTY_HW_COOK_IN, &tty->flags)) { |
1026 | tty->raw = 1; | 1023 | tty->raw = 1; |
@@ -1085,12 +1082,12 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios * old) | |||
1085 | * n_tty_close - close the ldisc for this tty | 1082 | * n_tty_close - close the ldisc for this tty |
1086 | * @tty: device | 1083 | * @tty: device |
1087 | * | 1084 | * |
1088 | * Called from the terminal layer when this line discipline is | 1085 | * Called from the terminal layer when this line discipline is |
1089 | * being shut down, either because of a close or becsuse of a | 1086 | * being shut down, either because of a close or becsuse of a |
1090 | * discipline change. The function will not be called while other | 1087 | * discipline change. The function will not be called while other |
1091 | * ldisc methods are in progress. | 1088 | * ldisc methods are in progress. |
1092 | */ | 1089 | */ |
1093 | 1090 | ||
1094 | static void n_tty_close(struct tty_struct *tty) | 1091 | static void n_tty_close(struct tty_struct *tty) |
1095 | { | 1092 | { |
1096 | n_tty_flush_buffer(tty); | 1093 | n_tty_flush_buffer(tty); |
@@ -1104,7 +1101,7 @@ static void n_tty_close(struct tty_struct *tty) | |||
1104 | * n_tty_open - open an ldisc | 1101 | * n_tty_open - open an ldisc |
1105 | * @tty: terminal to open | 1102 | * @tty: terminal to open |
1106 | * | 1103 | * |
1107 | * Called when this line discipline is being attached to the | 1104 | * Called when this line discipline is being attached to the |
1108 | * terminal device. Can sleep. Called serialized so that no | 1105 | * terminal device. Can sleep. Called serialized so that no |
1109 | * other events will occur in parallel. No further open will occur | 1106 | * other events will occur in parallel. No further open will occur |
1110 | * until a close. | 1107 | * until a close. |
@@ -1157,7 +1154,7 @@ static inline int input_available_p(struct tty_struct *tty, int amt) | |||
1157 | * Called under the tty->atomic_read_lock sem | 1154 | * Called under the tty->atomic_read_lock sem |
1158 | * | 1155 | * |
1159 | */ | 1156 | */ |
1160 | 1157 | ||
1161 | static int copy_from_read_buf(struct tty_struct *tty, | 1158 | static int copy_from_read_buf(struct tty_struct *tty, |
1162 | unsigned char __user **b, | 1159 | unsigned char __user **b, |
1163 | size_t *nr) | 1160 | size_t *nr) |
@@ -1186,7 +1183,8 @@ static int copy_from_read_buf(struct tty_struct *tty, | |||
1186 | return retval; | 1183 | return retval; |
1187 | } | 1184 | } |
1188 | 1185 | ||
1189 | extern ssize_t redirected_tty_write(struct file *,const char *,size_t,loff_t *); | 1186 | extern ssize_t redirected_tty_write(struct file *, const char *, |
1187 | size_t, loff_t *); | ||
1190 | 1188 | ||
1191 | /** | 1189 | /** |
1192 | * job_control - check job control | 1190 | * job_control - check job control |
@@ -1194,10 +1192,10 @@ extern ssize_t redirected_tty_write(struct file *,const char *,size_t,loff_t *); | |||
1194 | * @file: file handle | 1192 | * @file: file handle |
1195 | * | 1193 | * |
1196 | * Perform job control management checks on this file/tty descriptor | 1194 | * Perform job control management checks on this file/tty descriptor |
1197 | * and if appropriate send any needed signals and return a negative | 1195 | * and if appropriate send any needed signals and return a negative |
1198 | * error code if action should be taken. | 1196 | * error code if action should be taken. |
1199 | */ | 1197 | */ |
1200 | 1198 | ||
1201 | static int job_control(struct tty_struct *tty, struct file *file) | 1199 | static int job_control(struct tty_struct *tty, struct file *file) |
1202 | { | 1200 | { |
1203 | /* Job control check -- must be done at start and after | 1201 | /* Job control check -- must be done at start and after |
@@ -1208,7 +1206,7 @@ static int job_control(struct tty_struct *tty, struct file *file) | |||
1208 | if (file->f_op->write != redirected_tty_write && | 1206 | if (file->f_op->write != redirected_tty_write && |
1209 | current->signal->tty == tty) { | 1207 | current->signal->tty == tty) { |
1210 | if (!tty->pgrp) | 1208 | if (!tty->pgrp) |
1211 | printk("read_chan: no tty->pgrp!\n"); | 1209 | printk(KERN_ERR "read_chan: no tty->pgrp!\n"); |
1212 | else if (task_pgrp(current) != tty->pgrp) { | 1210 | else if (task_pgrp(current) != tty->pgrp) { |
1213 | if (is_ignored(SIGTTIN) || | 1211 | if (is_ignored(SIGTTIN) || |
1214 | is_current_pgrp_orphaned()) | 1212 | is_current_pgrp_orphaned()) |
@@ -1220,7 +1218,7 @@ static int job_control(struct tty_struct *tty, struct file *file) | |||
1220 | } | 1218 | } |
1221 | return 0; | 1219 | return 0; |
1222 | } | 1220 | } |
1223 | 1221 | ||
1224 | 1222 | ||
1225 | /** | 1223 | /** |
1226 | * read_chan - read function for tty | 1224 | * read_chan - read function for tty |
@@ -1236,7 +1234,7 @@ static int job_control(struct tty_struct *tty, struct file *file) | |||
1236 | * | 1234 | * |
1237 | * This code must be sure never to sleep through a hangup. | 1235 | * This code must be sure never to sleep through a hangup. |
1238 | */ | 1236 | */ |
1239 | 1237 | ||
1240 | static ssize_t read_chan(struct tty_struct *tty, struct file *file, | 1238 | static ssize_t read_chan(struct tty_struct *tty, struct file *file, |
1241 | unsigned char __user *buf, size_t nr) | 1239 | unsigned char __user *buf, size_t nr) |
1242 | { | 1240 | { |
@@ -1252,14 +1250,14 @@ static ssize_t read_chan(struct tty_struct *tty, struct file *file, | |||
1252 | do_it_again: | 1250 | do_it_again: |
1253 | 1251 | ||
1254 | if (!tty->read_buf) { | 1252 | if (!tty->read_buf) { |
1255 | printk("n_tty_read_chan: called with read_buf == NULL?!?\n"); | 1253 | printk(KERN_ERR "n_tty_read_chan: read_buf == NULL?!?\n"); |
1256 | return -EIO; | 1254 | return -EIO; |
1257 | } | 1255 | } |
1258 | 1256 | ||
1259 | c = job_control(tty, file); | 1257 | c = job_control(tty, file); |
1260 | if(c < 0) | 1258 | if (c < 0) |
1261 | return c; | 1259 | return c; |
1262 | 1260 | ||
1263 | minimum = time = 0; | 1261 | minimum = time = 0; |
1264 | timeout = MAX_SCHEDULE_TIMEOUT; | 1262 | timeout = MAX_SCHEDULE_TIMEOUT; |
1265 | if (!tty->icanon) { | 1263 | if (!tty->icanon) { |
@@ -1287,8 +1285,7 @@ do_it_again: | |||
1287 | if (file->f_flags & O_NONBLOCK) { | 1285 | if (file->f_flags & O_NONBLOCK) { |
1288 | if (!mutex_trylock(&tty->atomic_read_lock)) | 1286 | if (!mutex_trylock(&tty->atomic_read_lock)) |
1289 | return -EAGAIN; | 1287 | return -EAGAIN; |
1290 | } | 1288 | } else { |
1291 | else { | ||
1292 | if (mutex_lock_interruptible(&tty->atomic_read_lock)) | 1289 | if (mutex_lock_interruptible(&tty->atomic_read_lock)) |
1293 | return -ERESTARTSYS; | 1290 | return -ERESTARTSYS; |
1294 | } | 1291 | } |
@@ -1314,11 +1311,11 @@ do_it_again: | |||
1314 | so that any interrupt will set the state back to | 1311 | so that any interrupt will set the state back to |
1315 | TASK_RUNNING. */ | 1312 | TASK_RUNNING. */ |
1316 | set_current_state(TASK_INTERRUPTIBLE); | 1313 | set_current_state(TASK_INTERRUPTIBLE); |
1317 | 1314 | ||
1318 | if (((minimum - (b - buf)) < tty->minimum_to_wake) && | 1315 | if (((minimum - (b - buf)) < tty->minimum_to_wake) && |
1319 | ((minimum - (b - buf)) >= 1)) | 1316 | ((minimum - (b - buf)) >= 1)) |
1320 | tty->minimum_to_wake = (minimum - (b - buf)); | 1317 | tty->minimum_to_wake = (minimum - (b - buf)); |
1321 | 1318 | ||
1322 | if (!input_available_p(tty, 0)) { | 1319 | if (!input_available_p(tty, 0)) { |
1323 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { | 1320 | if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) { |
1324 | retval = -EIO; | 1321 | retval = -EIO; |
@@ -1355,7 +1352,7 @@ do_it_again: | |||
1355 | if (tty->icanon) { | 1352 | if (tty->icanon) { |
1356 | /* N.B. avoid overrun if nr == 0 */ | 1353 | /* N.B. avoid overrun if nr == 0 */ |
1357 | while (nr && tty->read_cnt) { | 1354 | while (nr && tty->read_cnt) { |
1358 | int eol; | 1355 | int eol; |
1359 | 1356 | ||
1360 | eol = test_and_clear_bit(tty->read_tail, | 1357 | eol = test_and_clear_bit(tty->read_tail, |
1361 | tty->read_flags); | 1358 | tty->read_flags); |
@@ -1427,7 +1424,7 @@ do_it_again: | |||
1427 | if (size) { | 1424 | if (size) { |
1428 | retval = size; | 1425 | retval = size; |
1429 | if (nr) | 1426 | if (nr) |
1430 | clear_bit(TTY_PUSH, &tty->flags); | 1427 | clear_bit(TTY_PUSH, &tty->flags); |
1431 | } else if (test_and_clear_bit(TTY_PUSH, &tty->flags)) | 1428 | } else if (test_and_clear_bit(TTY_PUSH, &tty->flags)) |
1432 | goto do_it_again; | 1429 | goto do_it_again; |
1433 | 1430 | ||
@@ -1450,9 +1447,9 @@ do_it_again: | |||
1450 | * | 1447 | * |
1451 | * This code must be sure never to sleep through a hangup. | 1448 | * This code must be sure never to sleep through a hangup. |
1452 | */ | 1449 | */ |
1453 | 1450 | ||
1454 | static ssize_t write_chan(struct tty_struct * tty, struct file * file, | 1451 | static ssize_t write_chan(struct tty_struct *tty, struct file *file, |
1455 | const unsigned char * buf, size_t nr) | 1452 | const unsigned char *buf, size_t nr) |
1456 | { | 1453 | { |
1457 | const unsigned char *b = buf; | 1454 | const unsigned char *b = buf; |
1458 | DECLARE_WAITQUEUE(wait, current); | 1455 | DECLARE_WAITQUEUE(wait, current); |
@@ -1542,8 +1539,9 @@ break_out: | |||
1542 | * recompute the new limits. Possibly set_termios should issue | 1539 | * recompute the new limits. Possibly set_termios should issue |
1543 | * a read wakeup to fix this bug. | 1540 | * a read wakeup to fix this bug. |
1544 | */ | 1541 | */ |
1545 | 1542 | ||
1546 | static unsigned int normal_poll(struct tty_struct * tty, struct file * file, poll_table *wait) | 1543 | static unsigned int normal_poll(struct tty_struct *tty, struct file *file, |
1544 | poll_table *wait) | ||
1547 | { | 1545 | { |
1548 | unsigned int mask = 0; | 1546 | unsigned int mask = 0; |
1549 | 1547 | ||
diff --git a/drivers/char/rocket.c b/drivers/char/rocket.c index 68c289fe2dc2..72f289279d8f 100644 --- a/drivers/char/rocket.c +++ b/drivers/char/rocket.c | |||
@@ -715,11 +715,10 @@ static void configure_r_port(struct r_port *info, | |||
715 | unsigned rocketMode; | 715 | unsigned rocketMode; |
716 | int bits, baud, divisor; | 716 | int bits, baud, divisor; |
717 | CHANNEL_t *cp; | 717 | CHANNEL_t *cp; |
718 | struct ktermios *t = info->tty->termios; | ||
718 | 719 | ||
719 | if (!info->tty || !info->tty->termios) | ||
720 | return; | ||
721 | cp = &info->channel; | 720 | cp = &info->channel; |
722 | cflag = info->tty->termios->c_cflag; | 721 | cflag = t->c_cflag; |
723 | 722 | ||
724 | /* Byte size and parity */ | 723 | /* Byte size and parity */ |
725 | if ((cflag & CSIZE) == CS8) { | 724 | if ((cflag & CSIZE) == CS8) { |
@@ -754,10 +753,7 @@ static void configure_r_port(struct r_port *info, | |||
754 | baud = 9600; | 753 | baud = 9600; |
755 | divisor = ((rp_baud_base[info->board] + (baud >> 1)) / baud) - 1; | 754 | divisor = ((rp_baud_base[info->board] + (baud >> 1)) / baud) - 1; |
756 | if ((divisor >= 8192 || divisor < 0) && old_termios) { | 755 | if ((divisor >= 8192 || divisor < 0) && old_termios) { |
757 | info->tty->termios->c_cflag &= ~CBAUD; | 756 | baud = tty_termios_baud_rate(old_termios); |
758 | info->tty->termios->c_cflag |= | ||
759 | (old_termios->c_cflag & CBAUD); | ||
760 | baud = tty_get_baud_rate(info->tty); | ||
761 | if (!baud) | 757 | if (!baud) |
762 | baud = 9600; | 758 | baud = 9600; |
763 | divisor = (rp_baud_base[info->board] / baud) - 1; | 759 | divisor = (rp_baud_base[info->board] / baud) - 1; |
@@ -769,6 +765,9 @@ static void configure_r_port(struct r_port *info, | |||
769 | info->cps = baud / bits; | 765 | info->cps = baud / bits; |
770 | sSetBaud(cp, divisor); | 766 | sSetBaud(cp, divisor); |
771 | 767 | ||
768 | /* FIXME: Should really back compute a baud rate from the divisor */ | ||
769 | tty_encode_baud_rate(info->tty, baud, baud); | ||
770 | |||
772 | if (cflag & CRTSCTS) { | 771 | if (cflag & CRTSCTS) { |
773 | info->intmask |= DELTA_CTS; | 772 | info->intmask |= DELTA_CTS; |
774 | sEnCTSFlowCtl(cp); | 773 | sEnCTSFlowCtl(cp); |
@@ -1202,15 +1201,14 @@ static void rp_set_termios(struct tty_struct *tty, | |||
1202 | 1201 | ||
1203 | cflag = tty->termios->c_cflag; | 1202 | cflag = tty->termios->c_cflag; |
1204 | 1203 | ||
1205 | if (cflag == old_termios->c_cflag) | ||
1206 | return; | ||
1207 | |||
1208 | /* | 1204 | /* |
1209 | * This driver doesn't support CS5 or CS6 | 1205 | * This driver doesn't support CS5 or CS6 |
1210 | */ | 1206 | */ |
1211 | if (((cflag & CSIZE) == CS5) || ((cflag & CSIZE) == CS6)) | 1207 | if (((cflag & CSIZE) == CS5) || ((cflag & CSIZE) == CS6)) |
1212 | tty->termios->c_cflag = | 1208 | tty->termios->c_cflag = |
1213 | ((cflag & ~CSIZE) | (old_termios->c_cflag & CSIZE)); | 1209 | ((cflag & ~CSIZE) | (old_termios->c_cflag & CSIZE)); |
1210 | /* Or CMSPAR */ | ||
1211 | tty->termios->c_cflag &= ~CMSPAR; | ||
1214 | 1212 | ||
1215 | configure_r_port(info, old_termios); | 1213 | configure_r_port(info, old_termios); |
1216 | 1214 | ||
@@ -1401,6 +1399,9 @@ static int reset_rm2(struct r_port *info, void __user *arg) | |||
1401 | { | 1399 | { |
1402 | int reset; | 1400 | int reset; |
1403 | 1401 | ||
1402 | if (!capable(CAP_SYS_ADMIN)) | ||
1403 | return -EPERM; | ||
1404 | |||
1404 | if (copy_from_user(&reset, arg, sizeof (int))) | 1405 | if (copy_from_user(&reset, arg, sizeof (int))) |
1405 | return -EFAULT; | 1406 | return -EFAULT; |
1406 | if (reset) | 1407 | if (reset) |
diff --git a/drivers/char/tty_audit.c b/drivers/char/tty_audit.c index bacded0eefab..7722466e052f 100644 --- a/drivers/char/tty_audit.c +++ b/drivers/char/tty_audit.c | |||
@@ -27,7 +27,7 @@ static struct tty_audit_buf *tty_audit_buf_alloc(int major, int minor, | |||
27 | { | 27 | { |
28 | struct tty_audit_buf *buf; | 28 | struct tty_audit_buf *buf; |
29 | 29 | ||
30 | buf = kmalloc(sizeof (*buf), GFP_KERNEL); | 30 | buf = kmalloc(sizeof(*buf), GFP_KERNEL); |
31 | if (!buf) | 31 | if (!buf) |
32 | goto err; | 32 | goto err; |
33 | if (PAGE_SIZE != N_TTY_BUF_SIZE) | 33 | if (PAGE_SIZE != N_TTY_BUF_SIZE) |
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c index 79c86c47947f..613ec816ce60 100644 --- a/drivers/char/tty_io.c +++ b/drivers/char/tty_io.c | |||
@@ -19,7 +19,7 @@ | |||
19 | * Also restructured routines so that there is more of a separation | 19 | * Also restructured routines so that there is more of a separation |
20 | * between the high-level tty routines (tty_io.c and tty_ioctl.c) and | 20 | * between the high-level tty routines (tty_io.c and tty_ioctl.c) and |
21 | * the low-level tty routines (serial.c, pty.c, console.c). This | 21 | * the low-level tty routines (serial.c, pty.c, console.c). This |
22 | * makes for cleaner and more compact code. -TYT, 9/17/92 | 22 | * makes for cleaner and more compact code. -TYT, 9/17/92 |
23 | * | 23 | * |
24 | * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines | 24 | * Modified by Fred N. van Kempen, 01/29/93, to add line disciplines |
25 | * which can be dynamically activated and de-activated by the line | 25 | * which can be dynamically activated and de-activated by the line |
@@ -41,7 +41,7 @@ | |||
41 | * | 41 | * |
42 | * New TIOCLINUX variants added. | 42 | * New TIOCLINUX variants added. |
43 | * -- mj@k332.feld.cvut.cz, 19-Nov-95 | 43 | * -- mj@k332.feld.cvut.cz, 19-Nov-95 |
44 | * | 44 | * |
45 | * Restrict vt switching via ioctl() | 45 | * Restrict vt switching via ioctl() |
46 | * -- grif@cs.ucr.edu, 5-Dec-95 | 46 | * -- grif@cs.ucr.edu, 5-Dec-95 |
47 | * | 47 | * |
@@ -62,7 +62,8 @@ | |||
62 | * -- Russell King <rmk@arm.linux.org.uk> | 62 | * -- Russell King <rmk@arm.linux.org.uk> |
63 | * | 63 | * |
64 | * Move do_SAK() into process context. Less stack use in devfs functions. | 64 | * Move do_SAK() into process context. Less stack use in devfs functions. |
65 | * alloc_tty_struct() always uses kmalloc() -- Andrew Morton <andrewm@uow.edu.eu> 17Mar01 | 65 | * alloc_tty_struct() always uses kmalloc() |
66 | * -- Andrew Morton <andrewm@uow.edu.eu> 17Mar01 | ||
66 | */ | 67 | */ |
67 | 68 | ||
68 | #include <linux/types.h> | 69 | #include <linux/types.h> |
@@ -126,7 +127,7 @@ EXPORT_SYMBOL(tty_std_termios); | |||
126 | /* This list gets poked at by procfs and various bits of boot up code. This | 127 | /* This list gets poked at by procfs and various bits of boot up code. This |
127 | could do with some rationalisation such as pulling the tty proc function | 128 | could do with some rationalisation such as pulling the tty proc function |
128 | into this file */ | 129 | into this file */ |
129 | 130 | ||
130 | LIST_HEAD(tty_drivers); /* linked list of tty drivers */ | 131 | LIST_HEAD(tty_drivers); /* linked list of tty drivers */ |
131 | 132 | ||
132 | /* Mutex to protect creating and releasing a tty. This is shared with | 133 | /* Mutex to protect creating and releasing a tty. This is shared with |
@@ -136,7 +137,7 @@ EXPORT_SYMBOL(tty_mutex); | |||
136 | 137 | ||
137 | #ifdef CONFIG_UNIX98_PTYS | 138 | #ifdef CONFIG_UNIX98_PTYS |
138 | extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */ | 139 | extern struct tty_driver *ptm_driver; /* Unix98 pty masters; for /dev/ptmx */ |
139 | extern int pty_limit; /* Config limit on Unix98 ptys */ | 140 | extern int pty_limit; /* Config limit on Unix98 ptys */ |
140 | static DEFINE_IDR(allocated_ptys); | 141 | static DEFINE_IDR(allocated_ptys); |
141 | static DEFINE_MUTEX(allocated_ptys_lock); | 142 | static DEFINE_MUTEX(allocated_ptys_lock); |
142 | static int ptmx_open(struct inode *, struct file *); | 143 | static int ptmx_open(struct inode *, struct file *); |
@@ -146,19 +147,20 @@ static void initialize_tty_struct(struct tty_struct *tty); | |||
146 | 147 | ||
147 | static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); | 148 | static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); |
148 | static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); | 149 | static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); |
149 | ssize_t redirected_tty_write(struct file *, const char __user *, size_t, loff_t *); | 150 | ssize_t redirected_tty_write(struct file *, const char __user *, |
151 | size_t, loff_t *); | ||
150 | static unsigned int tty_poll(struct file *, poll_table *); | 152 | static unsigned int tty_poll(struct file *, poll_table *); |
151 | static int tty_open(struct inode *, struct file *); | 153 | static int tty_open(struct inode *, struct file *); |
152 | static int tty_release(struct inode *, struct file *); | 154 | static int tty_release(struct inode *, struct file *); |
153 | int tty_ioctl(struct inode * inode, struct file * file, | 155 | int tty_ioctl(struct inode *inode, struct file *file, |
154 | unsigned int cmd, unsigned long arg); | 156 | unsigned int cmd, unsigned long arg); |
155 | #ifdef CONFIG_COMPAT | 157 | #ifdef CONFIG_COMPAT |
156 | static long tty_compat_ioctl(struct file * file, unsigned int cmd, | 158 | static long tty_compat_ioctl(struct file *file, unsigned int cmd, |
157 | unsigned long arg); | 159 | unsigned long arg); |
158 | #else | 160 | #else |
159 | #define tty_compat_ioctl NULL | 161 | #define tty_compat_ioctl NULL |
160 | #endif | 162 | #endif |
161 | static int tty_fasync(int fd, struct file * filp, int on); | 163 | static int tty_fasync(int fd, struct file *filp, int on); |
162 | static void release_tty(struct tty_struct *tty, int idx); | 164 | static void release_tty(struct tty_struct *tty, int idx); |
163 | static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty); | 165 | static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty); |
164 | static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty); | 166 | static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty); |
@@ -244,7 +246,7 @@ static int check_tty_count(struct tty_struct *tty, const char *routine) | |||
244 | #ifdef CHECK_TTY_COUNT | 246 | #ifdef CHECK_TTY_COUNT |
245 | struct list_head *p; | 247 | struct list_head *p; |
246 | int count = 0; | 248 | int count = 0; |
247 | 249 | ||
248 | file_list_lock(); | 250 | file_list_lock(); |
249 | list_for_each(p, &tty->tty_files) { | 251 | list_for_each(p, &tty->tty_files) { |
250 | count++; | 252 | count++; |
@@ -281,11 +283,11 @@ static int check_tty_count(struct tty_struct *tty, const char *routine) | |||
281 | static void tty_buffer_free_all(struct tty_struct *tty) | 283 | static void tty_buffer_free_all(struct tty_struct *tty) |
282 | { | 284 | { |
283 | struct tty_buffer *thead; | 285 | struct tty_buffer *thead; |
284 | while((thead = tty->buf.head) != NULL) { | 286 | while ((thead = tty->buf.head) != NULL) { |
285 | tty->buf.head = thead->next; | 287 | tty->buf.head = thead->next; |
286 | kfree(thead); | 288 | kfree(thead); |
287 | } | 289 | } |
288 | while((thead = tty->buf.free) != NULL) { | 290 | while ((thead = tty->buf.free) != NULL) { |
289 | tty->buf.free = thead->next; | 291 | tty->buf.free = thead->next; |
290 | kfree(thead); | 292 | kfree(thead); |
291 | } | 293 | } |
@@ -331,7 +333,7 @@ static struct tty_buffer *tty_buffer_alloc(struct tty_struct *tty, size_t size) | |||
331 | if (tty->buf.memory_used + size > 65536) | 333 | if (tty->buf.memory_used + size > 65536) |
332 | return NULL; | 334 | return NULL; |
333 | p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); | 335 | p = kmalloc(sizeof(struct tty_buffer) + 2 * size, GFP_ATOMIC); |
334 | if(p == NULL) | 336 | if (p == NULL) |
335 | return NULL; | 337 | return NULL; |
336 | p->used = 0; | 338 | p->used = 0; |
337 | p->size = size; | 339 | p->size = size; |
@@ -361,7 +363,7 @@ static void tty_buffer_free(struct tty_struct *tty, struct tty_buffer *b) | |||
361 | tty->buf.memory_used -= b->size; | 363 | tty->buf.memory_used -= b->size; |
362 | WARN_ON(tty->buf.memory_used < 0); | 364 | WARN_ON(tty->buf.memory_used < 0); |
363 | 365 | ||
364 | if(b->size >= 512) | 366 | if (b->size >= 512) |
365 | kfree(b); | 367 | kfree(b); |
366 | else { | 368 | else { |
367 | b->next = tty->buf.free; | 369 | b->next = tty->buf.free; |
@@ -384,7 +386,7 @@ static void __tty_buffer_flush(struct tty_struct *tty) | |||
384 | { | 386 | { |
385 | struct tty_buffer *thead; | 387 | struct tty_buffer *thead; |
386 | 388 | ||
387 | while((thead = tty->buf.head) != NULL) { | 389 | while ((thead = tty->buf.head) != NULL) { |
388 | tty->buf.head = thead->next; | 390 | tty->buf.head = thead->next; |
389 | tty_buffer_free(tty, thead); | 391 | tty_buffer_free(tty, thead); |
390 | } | 392 | } |
@@ -436,9 +438,9 @@ static void tty_buffer_flush(struct tty_struct *tty) | |||
436 | static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) | 438 | static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) |
437 | { | 439 | { |
438 | struct tty_buffer **tbh = &tty->buf.free; | 440 | struct tty_buffer **tbh = &tty->buf.free; |
439 | while((*tbh) != NULL) { | 441 | while ((*tbh) != NULL) { |
440 | struct tty_buffer *t = *tbh; | 442 | struct tty_buffer *t = *tbh; |
441 | if(t->size >= size) { | 443 | if (t->size >= size) { |
442 | *tbh = t->next; | 444 | *tbh = t->next; |
443 | t->next = NULL; | 445 | t->next = NULL; |
444 | t->used = 0; | 446 | t->used = 0; |
@@ -450,7 +452,7 @@ static struct tty_buffer *tty_buffer_find(struct tty_struct *tty, size_t size) | |||
450 | tbh = &((*tbh)->next); | 452 | tbh = &((*tbh)->next); |
451 | } | 453 | } |
452 | /* Round the buffer size out */ | 454 | /* Round the buffer size out */ |
453 | size = (size + 0xFF) & ~ 0xFF; | 455 | size = (size + 0xFF) & ~0xFF; |
454 | return tty_buffer_alloc(tty, size); | 456 | return tty_buffer_alloc(tty, size); |
455 | /* Should possibly check if this fails for the largest buffer we | 457 | /* Should possibly check if this fails for the largest buffer we |
456 | have queued and recycle that ? */ | 458 | have queued and recycle that ? */ |
@@ -520,7 +522,7 @@ int tty_insert_flip_string(struct tty_struct *tty, const unsigned char *chars, | |||
520 | int space = tty_buffer_request_room(tty, size - copied); | 522 | int space = tty_buffer_request_room(tty, size - copied); |
521 | struct tty_buffer *tb = tty->buf.tail; | 523 | struct tty_buffer *tb = tty->buf.tail; |
522 | /* If there is no space then tb may be NULL */ | 524 | /* If there is no space then tb may be NULL */ |
523 | if(unlikely(space == 0)) | 525 | if (unlikely(space == 0)) |
524 | break; | 526 | break; |
525 | memcpy(tb->char_buf_ptr + tb->used, chars, space); | 527 | memcpy(tb->char_buf_ptr + tb->used, chars, space); |
526 | memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space); | 528 | memset(tb->flag_buf_ptr + tb->used, TTY_NORMAL, space); |
@@ -556,7 +558,7 @@ int tty_insert_flip_string_flags(struct tty_struct *tty, | |||
556 | int space = tty_buffer_request_room(tty, size - copied); | 558 | int space = tty_buffer_request_room(tty, size - copied); |
557 | struct tty_buffer *tb = tty->buf.tail; | 559 | struct tty_buffer *tb = tty->buf.tail; |
558 | /* If there is no space then tb may be NULL */ | 560 | /* If there is no space then tb may be NULL */ |
559 | if(unlikely(space == 0)) | 561 | if (unlikely(space == 0)) |
560 | break; | 562 | break; |
561 | memcpy(tb->char_buf_ptr + tb->used, chars, space); | 563 | memcpy(tb->char_buf_ptr + tb->used, chars, space); |
562 | memcpy(tb->flag_buf_ptr + tb->used, flags, space); | 564 | memcpy(tb->flag_buf_ptr + tb->used, flags, space); |
@@ -608,7 +610,8 @@ EXPORT_SYMBOL(tty_schedule_flip); | |||
608 | * Locking: May call functions taking tty->buf.lock | 610 | * Locking: May call functions taking tty->buf.lock |
609 | */ | 611 | */ |
610 | 612 | ||
611 | int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, size_t size) | 613 | int tty_prepare_flip_string(struct tty_struct *tty, unsigned char **chars, |
614 | size_t size) | ||
612 | { | 615 | { |
613 | int space = tty_buffer_request_room(tty, size); | 616 | int space = tty_buffer_request_room(tty, size); |
614 | if (likely(space)) { | 617 | if (likely(space)) { |
@@ -638,7 +641,8 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string); | |||
638 | * Locking: May call functions taking tty->buf.lock | 641 | * Locking: May call functions taking tty->buf.lock |
639 | */ | 642 | */ |
640 | 643 | ||
641 | int tty_prepare_flip_string_flags(struct tty_struct *tty, unsigned char **chars, char **flags, size_t size) | 644 | int tty_prepare_flip_string_flags(struct tty_struct *tty, |
645 | unsigned char **chars, char **flags, size_t size) | ||
642 | { | 646 | { |
643 | int space = tty_buffer_request_room(tty, size); | 647 | int space = tty_buffer_request_room(tty, size); |
644 | if (likely(space)) { | 648 | if (likely(space)) { |
@@ -660,12 +664,12 @@ EXPORT_SYMBOL_GPL(tty_prepare_flip_string_flags); | |||
660 | * @num: line discipline number | 664 | * @num: line discipline number |
661 | * | 665 | * |
662 | * This is probably overkill for real world processors but | 666 | * This is probably overkill for real world processors but |
663 | * they are not on hot paths so a little discipline won't do | 667 | * they are not on hot paths so a little discipline won't do |
664 | * any harm. | 668 | * any harm. |
665 | * | 669 | * |
666 | * Locking: takes termios_mutex | 670 | * Locking: takes termios_mutex |
667 | */ | 671 | */ |
668 | 672 | ||
669 | static void tty_set_termios_ldisc(struct tty_struct *tty, int num) | 673 | static void tty_set_termios_ldisc(struct tty_struct *tty, int num) |
670 | { | 674 | { |
671 | mutex_lock(&tty->termios_mutex); | 675 | mutex_lock(&tty->termios_mutex); |
@@ -678,10 +682,11 @@ static void tty_set_termios_ldisc(struct tty_struct *tty, int num) | |||
678 | * must be taken with irqs off because there are hangup path | 682 | * must be taken with irqs off because there are hangup path |
679 | * callers who will do ldisc lookups and cannot sleep. | 683 | * callers who will do ldisc lookups and cannot sleep. |
680 | */ | 684 | */ |
681 | 685 | ||
682 | static DEFINE_SPINLOCK(tty_ldisc_lock); | 686 | static DEFINE_SPINLOCK(tty_ldisc_lock); |
683 | static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); | 687 | static DECLARE_WAIT_QUEUE_HEAD(tty_ldisc_wait); |
684 | static struct tty_ldisc tty_ldiscs[NR_LDISCS]; /* line disc dispatch table */ | 688 | /* Line disc dispatch table */ |
689 | static struct tty_ldisc tty_ldiscs[NR_LDISCS]; | ||
685 | 690 | ||
686 | /** | 691 | /** |
687 | * tty_register_ldisc - install a line discipline | 692 | * tty_register_ldisc - install a line discipline |
@@ -700,17 +705,17 @@ int tty_register_ldisc(int disc, struct tty_ldisc *new_ldisc) | |||
700 | { | 705 | { |
701 | unsigned long flags; | 706 | unsigned long flags; |
702 | int ret = 0; | 707 | int ret = 0; |
703 | 708 | ||
704 | if (disc < N_TTY || disc >= NR_LDISCS) | 709 | if (disc < N_TTY || disc >= NR_LDISCS) |
705 | return -EINVAL; | 710 | return -EINVAL; |
706 | 711 | ||
707 | spin_lock_irqsave(&tty_ldisc_lock, flags); | 712 | spin_lock_irqsave(&tty_ldisc_lock, flags); |
708 | tty_ldiscs[disc] = *new_ldisc; | 713 | tty_ldiscs[disc] = *new_ldisc; |
709 | tty_ldiscs[disc].num = disc; | 714 | tty_ldiscs[disc].num = disc; |
710 | tty_ldiscs[disc].flags |= LDISC_FLAG_DEFINED; | 715 | tty_ldiscs[disc].flags |= LDISC_FLAG_DEFINED; |
711 | tty_ldiscs[disc].refcount = 0; | 716 | tty_ldiscs[disc].refcount = 0; |
712 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | 717 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); |
713 | 718 | ||
714 | return ret; | 719 | return ret; |
715 | } | 720 | } |
716 | EXPORT_SYMBOL(tty_register_ldisc); | 721 | EXPORT_SYMBOL(tty_register_ldisc); |
@@ -766,20 +771,18 @@ struct tty_ldisc *tty_ldisc_get(int disc) | |||
766 | 771 | ||
767 | if (disc < N_TTY || disc >= NR_LDISCS) | 772 | if (disc < N_TTY || disc >= NR_LDISCS) |
768 | return NULL; | 773 | return NULL; |
769 | 774 | ||
770 | spin_lock_irqsave(&tty_ldisc_lock, flags); | 775 | spin_lock_irqsave(&tty_ldisc_lock, flags); |
771 | 776 | ||
772 | ld = &tty_ldiscs[disc]; | 777 | ld = &tty_ldiscs[disc]; |
773 | /* Check the entry is defined */ | 778 | /* Check the entry is defined */ |
774 | if(ld->flags & LDISC_FLAG_DEFINED) | 779 | if (ld->flags & LDISC_FLAG_DEFINED) { |
775 | { | ||
776 | /* If the module is being unloaded we can't use it */ | 780 | /* If the module is being unloaded we can't use it */ |
777 | if (!try_module_get(ld->owner)) | 781 | if (!try_module_get(ld->owner)) |
778 | ld = NULL; | 782 | ld = NULL; |
779 | else /* lock it */ | 783 | else /* lock it */ |
780 | ld->refcount++; | 784 | ld->refcount++; |
781 | } | 785 | } else |
782 | else | ||
783 | ld = NULL; | 786 | ld = NULL; |
784 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | 787 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); |
785 | return ld; | 788 | return ld; |
@@ -802,9 +805,9 @@ void tty_ldisc_put(int disc) | |||
802 | { | 805 | { |
803 | struct tty_ldisc *ld; | 806 | struct tty_ldisc *ld; |
804 | unsigned long flags; | 807 | unsigned long flags; |
805 | 808 | ||
806 | BUG_ON(disc < N_TTY || disc >= NR_LDISCS); | 809 | BUG_ON(disc < N_TTY || disc >= NR_LDISCS); |
807 | 810 | ||
808 | spin_lock_irqsave(&tty_ldisc_lock, flags); | 811 | spin_lock_irqsave(&tty_ldisc_lock, flags); |
809 | ld = &tty_ldiscs[disc]; | 812 | ld = &tty_ldiscs[disc]; |
810 | BUG_ON(ld->refcount == 0); | 813 | BUG_ON(ld->refcount == 0); |
@@ -812,7 +815,7 @@ void tty_ldisc_put(int disc) | |||
812 | module_put(ld->owner); | 815 | module_put(ld->owner); |
813 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | 816 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); |
814 | } | 817 | } |
815 | 818 | ||
816 | EXPORT_SYMBOL_GPL(tty_ldisc_put); | 819 | EXPORT_SYMBOL_GPL(tty_ldisc_put); |
817 | 820 | ||
818 | /** | 821 | /** |
@@ -851,11 +854,10 @@ static int tty_ldisc_try(struct tty_struct *tty) | |||
851 | unsigned long flags; | 854 | unsigned long flags; |
852 | struct tty_ldisc *ld; | 855 | struct tty_ldisc *ld; |
853 | int ret = 0; | 856 | int ret = 0; |
854 | 857 | ||
855 | spin_lock_irqsave(&tty_ldisc_lock, flags); | 858 | spin_lock_irqsave(&tty_ldisc_lock, flags); |
856 | ld = &tty->ldisc; | 859 | ld = &tty->ldisc; |
857 | if(test_bit(TTY_LDISC, &tty->flags)) | 860 | if (test_bit(TTY_LDISC, &tty->flags)) { |
858 | { | ||
859 | ld->refcount++; | 861 | ld->refcount++; |
860 | ret = 1; | 862 | ret = 1; |
861 | } | 863 | } |
@@ -867,8 +869,8 @@ static int tty_ldisc_try(struct tty_struct *tty) | |||
867 | * tty_ldisc_ref_wait - wait for the tty ldisc | 869 | * tty_ldisc_ref_wait - wait for the tty ldisc |
868 | * @tty: tty device | 870 | * @tty: tty device |
869 | * | 871 | * |
870 | * Dereference the line discipline for the terminal and take a | 872 | * Dereference the line discipline for the terminal and take a |
871 | * reference to it. If the line discipline is in flux then | 873 | * reference to it. If the line discipline is in flux then |
872 | * wait patiently until it changes. | 874 | * wait patiently until it changes. |
873 | * | 875 | * |
874 | * Note: Must not be called from an IRQ/timer context. The caller | 876 | * Note: Must not be called from an IRQ/timer context. The caller |
@@ -878,12 +880,12 @@ static int tty_ldisc_try(struct tty_struct *tty) | |||
878 | * | 880 | * |
879 | * Locking: call functions take tty_ldisc_lock | 881 | * Locking: call functions take tty_ldisc_lock |
880 | */ | 882 | */ |
881 | 883 | ||
882 | struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) | 884 | struct tty_ldisc *tty_ldisc_ref_wait(struct tty_struct *tty) |
883 | { | 885 | { |
884 | /* wait_event is a macro */ | 886 | /* wait_event is a macro */ |
885 | wait_event(tty_ldisc_wait, tty_ldisc_try(tty)); | 887 | wait_event(tty_ldisc_wait, tty_ldisc_try(tty)); |
886 | if(tty->ldisc.refcount == 0) | 888 | if (tty->ldisc.refcount == 0) |
887 | printk(KERN_ERR "tty_ldisc_ref_wait\n"); | 889 | printk(KERN_ERR "tty_ldisc_ref_wait\n"); |
888 | return &tty->ldisc; | 890 | return &tty->ldisc; |
889 | } | 891 | } |
@@ -894,16 +896,16 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref_wait); | |||
894 | * tty_ldisc_ref - get the tty ldisc | 896 | * tty_ldisc_ref - get the tty ldisc |
895 | * @tty: tty device | 897 | * @tty: tty device |
896 | * | 898 | * |
897 | * Dereference the line discipline for the terminal and take a | 899 | * Dereference the line discipline for the terminal and take a |
898 | * reference to it. If the line discipline is in flux then | 900 | * reference to it. If the line discipline is in flux then |
899 | * return NULL. Can be called from IRQ and timer functions. | 901 | * return NULL. Can be called from IRQ and timer functions. |
900 | * | 902 | * |
901 | * Locking: called functions take tty_ldisc_lock | 903 | * Locking: called functions take tty_ldisc_lock |
902 | */ | 904 | */ |
903 | 905 | ||
904 | struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty) | 906 | struct tty_ldisc *tty_ldisc_ref(struct tty_struct *tty) |
905 | { | 907 | { |
906 | if(tty_ldisc_try(tty)) | 908 | if (tty_ldisc_try(tty)) |
907 | return &tty->ldisc; | 909 | return &tty->ldisc; |
908 | return NULL; | 910 | return NULL; |
909 | } | 911 | } |
@@ -919,19 +921,19 @@ EXPORT_SYMBOL_GPL(tty_ldisc_ref); | |||
919 | * | 921 | * |
920 | * Locking: takes tty_ldisc_lock | 922 | * Locking: takes tty_ldisc_lock |
921 | */ | 923 | */ |
922 | 924 | ||
923 | void tty_ldisc_deref(struct tty_ldisc *ld) | 925 | void tty_ldisc_deref(struct tty_ldisc *ld) |
924 | { | 926 | { |
925 | unsigned long flags; | 927 | unsigned long flags; |
926 | 928 | ||
927 | BUG_ON(ld == NULL); | 929 | BUG_ON(ld == NULL); |
928 | 930 | ||
929 | spin_lock_irqsave(&tty_ldisc_lock, flags); | 931 | spin_lock_irqsave(&tty_ldisc_lock, flags); |
930 | if(ld->refcount == 0) | 932 | if (ld->refcount == 0) |
931 | printk(KERN_ERR "tty_ldisc_deref: no references.\n"); | 933 | printk(KERN_ERR "tty_ldisc_deref: no references.\n"); |
932 | else | 934 | else |
933 | ld->refcount--; | 935 | ld->refcount--; |
934 | if(ld->refcount == 0) | 936 | if (ld->refcount == 0) |
935 | wake_up(&tty_ldisc_wait); | 937 | wake_up(&tty_ldisc_wait); |
936 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | 938 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); |
937 | } | 939 | } |
@@ -954,7 +956,7 @@ static void tty_ldisc_enable(struct tty_struct *tty) | |||
954 | set_bit(TTY_LDISC, &tty->flags); | 956 | set_bit(TTY_LDISC, &tty->flags); |
955 | wake_up(&tty_ldisc_wait); | 957 | wake_up(&tty_ldisc_wait); |
956 | } | 958 | } |
957 | 959 | ||
958 | /** | 960 | /** |
959 | * tty_set_ldisc - set line discipline | 961 | * tty_set_ldisc - set line discipline |
960 | * @tty: the terminal to set | 962 | * @tty: the terminal to set |
@@ -966,7 +968,7 @@ static void tty_ldisc_enable(struct tty_struct *tty) | |||
966 | * Locking: takes tty_ldisc_lock. | 968 | * Locking: takes tty_ldisc_lock. |
967 | * called functions take termios_mutex | 969 | * called functions take termios_mutex |
968 | */ | 970 | */ |
969 | 971 | ||
970 | static int tty_set_ldisc(struct tty_struct *tty, int ldisc) | 972 | static int tty_set_ldisc(struct tty_struct *tty, int ldisc) |
971 | { | 973 | { |
972 | int retval = 0; | 974 | int retval = 0; |
@@ -1022,7 +1024,7 @@ restart: | |||
1022 | 1024 | ||
1023 | spin_lock_irqsave(&tty_ldisc_lock, flags); | 1025 | spin_lock_irqsave(&tty_ldisc_lock, flags); |
1024 | if (tty->ldisc.refcount || (o_tty && o_tty->ldisc.refcount)) { | 1026 | if (tty->ldisc.refcount || (o_tty && o_tty->ldisc.refcount)) { |
1025 | if(tty->ldisc.refcount) { | 1027 | if (tty->ldisc.refcount) { |
1026 | /* Free the new ldisc we grabbed. Must drop the lock | 1028 | /* Free the new ldisc we grabbed. Must drop the lock |
1027 | first. */ | 1029 | first. */ |
1028 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | 1030 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); |
@@ -1031,14 +1033,14 @@ restart: | |||
1031 | * There are several reasons we may be busy, including | 1033 | * There are several reasons we may be busy, including |
1032 | * random momentary I/O traffic. We must therefore | 1034 | * random momentary I/O traffic. We must therefore |
1033 | * retry. We could distinguish between blocking ops | 1035 | * retry. We could distinguish between blocking ops |
1034 | * and retries if we made tty_ldisc_wait() smarter. That | 1036 | * and retries if we made tty_ldisc_wait() smarter. |
1035 | * is up for discussion. | 1037 | * That is up for discussion. |
1036 | */ | 1038 | */ |
1037 | if (wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount == 0) < 0) | 1039 | if (wait_event_interruptible(tty_ldisc_wait, tty->ldisc.refcount == 0) < 0) |
1038 | return -ERESTARTSYS; | 1040 | return -ERESTARTSYS; |
1039 | goto restart; | 1041 | goto restart; |
1040 | } | 1042 | } |
1041 | if(o_tty && o_tty->ldisc.refcount) { | 1043 | if (o_tty && o_tty->ldisc.refcount) { |
1042 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | 1044 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); |
1043 | tty_ldisc_put(ldisc); | 1045 | tty_ldisc_put(ldisc); |
1044 | if (wait_event_interruptible(tty_ldisc_wait, o_tty->ldisc.refcount == 0) < 0) | 1046 | if (wait_event_interruptible(tty_ldisc_wait, o_tty->ldisc.refcount == 0) < 0) |
@@ -1046,9 +1048,10 @@ restart: | |||
1046 | goto restart; | 1048 | goto restart; |
1047 | } | 1049 | } |
1048 | } | 1050 | } |
1049 | 1051 | /* | |
1050 | /* if the TTY_LDISC bit is set, then we are racing against another ldisc change */ | 1052 | * If the TTY_LDISC bit is set, then we are racing against |
1051 | 1053 | * another ldisc change | |
1054 | */ | ||
1052 | if (!test_bit(TTY_LDISC, &tty->flags)) { | 1055 | if (!test_bit(TTY_LDISC, &tty->flags)) { |
1053 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | 1056 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); |
1054 | tty_ldisc_put(ldisc); | 1057 | tty_ldisc_put(ldisc); |
@@ -1072,7 +1075,6 @@ restart: | |||
1072 | /* | 1075 | /* |
1073 | * Wait for ->hangup_work and ->buf.work handlers to terminate | 1076 | * Wait for ->hangup_work and ->buf.work handlers to terminate |
1074 | */ | 1077 | */ |
1075 | |||
1076 | flush_scheduled_work(); | 1078 | flush_scheduled_work(); |
1077 | /* Shutdown the current discipline. */ | 1079 | /* Shutdown the current discipline. */ |
1078 | if (tty->ldisc.close) | 1080 | if (tty->ldisc.close) |
@@ -1106,21 +1108,21 @@ restart: | |||
1106 | /* At this point we hold a reference to the new ldisc and a | 1108 | /* At this point we hold a reference to the new ldisc and a |
1107 | a reference to the old ldisc. If we ended up flipping back | 1109 | a reference to the old ldisc. If we ended up flipping back |
1108 | to the existing ldisc we have two references to it */ | 1110 | to the existing ldisc we have two references to it */ |
1109 | 1111 | ||
1110 | if (tty->ldisc.num != o_ldisc.num && tty->driver->set_ldisc) | 1112 | if (tty->ldisc.num != o_ldisc.num && tty->driver->set_ldisc) |
1111 | tty->driver->set_ldisc(tty); | 1113 | tty->driver->set_ldisc(tty); |
1112 | 1114 | ||
1113 | tty_ldisc_put(o_ldisc.num); | 1115 | tty_ldisc_put(o_ldisc.num); |
1114 | 1116 | ||
1115 | /* | 1117 | /* |
1116 | * Allow ldisc referencing to occur as soon as the driver | 1118 | * Allow ldisc referencing to occur as soon as the driver |
1117 | * ldisc callback completes. | 1119 | * ldisc callback completes. |
1118 | */ | 1120 | */ |
1119 | 1121 | ||
1120 | tty_ldisc_enable(tty); | 1122 | tty_ldisc_enable(tty); |
1121 | if (o_tty) | 1123 | if (o_tty) |
1122 | tty_ldisc_enable(o_tty); | 1124 | tty_ldisc_enable(o_tty); |
1123 | 1125 | ||
1124 | /* Restart it in case no characters kick it off. Safe if | 1126 | /* Restart it in case no characters kick it off. Safe if |
1125 | already running */ | 1127 | already running */ |
1126 | if (work) | 1128 | if (work) |
@@ -1164,7 +1166,7 @@ static struct tty_driver *get_tty_driver(dev_t device, int *index) | |||
1164 | * Locking: none | 1166 | * Locking: none |
1165 | */ | 1167 | */ |
1166 | 1168 | ||
1167 | int tty_check_change(struct tty_struct * tty) | 1169 | int tty_check_change(struct tty_struct *tty) |
1168 | { | 1170 | { |
1169 | if (current->signal->tty != tty) | 1171 | if (current->signal->tty != tty) |
1170 | return 0; | 1172 | return 0; |
@@ -1185,31 +1187,31 @@ int tty_check_change(struct tty_struct * tty) | |||
1185 | 1187 | ||
1186 | EXPORT_SYMBOL(tty_check_change); | 1188 | EXPORT_SYMBOL(tty_check_change); |
1187 | 1189 | ||
1188 | static ssize_t hung_up_tty_read(struct file * file, char __user * buf, | 1190 | static ssize_t hung_up_tty_read(struct file *file, char __user *buf, |
1189 | size_t count, loff_t *ppos) | 1191 | size_t count, loff_t *ppos) |
1190 | { | 1192 | { |
1191 | return 0; | 1193 | return 0; |
1192 | } | 1194 | } |
1193 | 1195 | ||
1194 | static ssize_t hung_up_tty_write(struct file * file, const char __user * buf, | 1196 | static ssize_t hung_up_tty_write(struct file *file, const char __user *buf, |
1195 | size_t count, loff_t *ppos) | 1197 | size_t count, loff_t *ppos) |
1196 | { | 1198 | { |
1197 | return -EIO; | 1199 | return -EIO; |
1198 | } | 1200 | } |
1199 | 1201 | ||
1200 | /* No kernel lock held - none needed ;) */ | 1202 | /* No kernel lock held - none needed ;) */ |
1201 | static unsigned int hung_up_tty_poll(struct file * filp, poll_table * wait) | 1203 | static unsigned int hung_up_tty_poll(struct file *filp, poll_table *wait) |
1202 | { | 1204 | { |
1203 | return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM; | 1205 | return POLLIN | POLLOUT | POLLERR | POLLHUP | POLLRDNORM | POLLWRNORM; |
1204 | } | 1206 | } |
1205 | 1207 | ||
1206 | static int hung_up_tty_ioctl(struct inode * inode, struct file * file, | 1208 | static int hung_up_tty_ioctl(struct inode *inode, struct file *file, |
1207 | unsigned int cmd, unsigned long arg) | 1209 | unsigned int cmd, unsigned long arg) |
1208 | { | 1210 | { |
1209 | return cmd == TIOCSPGRP ? -ENOTTY : -EIO; | 1211 | return cmd == TIOCSPGRP ? -ENOTTY : -EIO; |
1210 | } | 1212 | } |
1211 | 1213 | ||
1212 | static long hung_up_tty_compat_ioctl(struct file * file, | 1214 | static long hung_up_tty_compat_ioctl(struct file *file, |
1213 | unsigned int cmd, unsigned long arg) | 1215 | unsigned int cmd, unsigned long arg) |
1214 | { | 1216 | { |
1215 | return cmd == TIOCSPGRP ? -ENOTTY : -EIO; | 1217 | return cmd == TIOCSPGRP ? -ENOTTY : -EIO; |
@@ -1274,15 +1276,15 @@ static struct file *redirect; | |||
1274 | * informs the line discipline if present that the driver is ready | 1276 | * informs the line discipline if present that the driver is ready |
1275 | * to receive more output data. | 1277 | * to receive more output data. |
1276 | */ | 1278 | */ |
1277 | 1279 | ||
1278 | void tty_wakeup(struct tty_struct *tty) | 1280 | void tty_wakeup(struct tty_struct *tty) |
1279 | { | 1281 | { |
1280 | struct tty_ldisc *ld; | 1282 | struct tty_ldisc *ld; |
1281 | 1283 | ||
1282 | if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) { | 1284 | if (test_bit(TTY_DO_WRITE_WAKEUP, &tty->flags)) { |
1283 | ld = tty_ldisc_ref(tty); | 1285 | ld = tty_ldisc_ref(tty); |
1284 | if(ld) { | 1286 | if (ld) { |
1285 | if(ld->write_wakeup) | 1287 | if (ld->write_wakeup) |
1286 | ld->write_wakeup(tty); | 1288 | ld->write_wakeup(tty); |
1287 | tty_ldisc_deref(ld); | 1289 | tty_ldisc_deref(ld); |
1288 | } | 1290 | } |
@@ -1299,12 +1301,12 @@ EXPORT_SYMBOL_GPL(tty_wakeup); | |||
1299 | * Flush the line discipline queue (if any) for this tty. If there | 1301 | * Flush the line discipline queue (if any) for this tty. If there |
1300 | * is no line discipline active this is a no-op. | 1302 | * is no line discipline active this is a no-op. |
1301 | */ | 1303 | */ |
1302 | 1304 | ||
1303 | void tty_ldisc_flush(struct tty_struct *tty) | 1305 | void tty_ldisc_flush(struct tty_struct *tty) |
1304 | { | 1306 | { |
1305 | struct tty_ldisc *ld = tty_ldisc_ref(tty); | 1307 | struct tty_ldisc *ld = tty_ldisc_ref(tty); |
1306 | if(ld) { | 1308 | if (ld) { |
1307 | if(ld->flush_buffer) | 1309 | if (ld->flush_buffer) |
1308 | ld->flush_buffer(tty); | 1310 | ld->flush_buffer(tty); |
1309 | tty_ldisc_deref(ld); | 1311 | tty_ldisc_deref(ld); |
1310 | } | 1312 | } |
@@ -1328,7 +1330,7 @@ static void tty_reset_termios(struct tty_struct *tty) | |||
1328 | tty->termios->c_ospeed = tty_termios_baud_rate(tty->termios); | 1330 | tty->termios->c_ospeed = tty_termios_baud_rate(tty->termios); |
1329 | mutex_unlock(&tty->termios_mutex); | 1331 | mutex_unlock(&tty->termios_mutex); |
1330 | } | 1332 | } |
1331 | 1333 | ||
1332 | /** | 1334 | /** |
1333 | * do_tty_hangup - actual handler for hangup events | 1335 | * do_tty_hangup - actual handler for hangup events |
1334 | * @work: tty device | 1336 | * @work: tty device |
@@ -1355,7 +1357,7 @@ static void do_tty_hangup(struct work_struct *work) | |||
1355 | { | 1357 | { |
1356 | struct tty_struct *tty = | 1358 | struct tty_struct *tty = |
1357 | container_of(work, struct tty_struct, hangup_work); | 1359 | container_of(work, struct tty_struct, hangup_work); |
1358 | struct file * cons_filp = NULL; | 1360 | struct file *cons_filp = NULL; |
1359 | struct file *filp, *f = NULL; | 1361 | struct file *filp, *f = NULL; |
1360 | struct task_struct *p; | 1362 | struct task_struct *p; |
1361 | struct tty_ldisc *ld; | 1363 | struct tty_ldisc *ld; |
@@ -1373,7 +1375,7 @@ static void do_tty_hangup(struct work_struct *work) | |||
1373 | redirect = NULL; | 1375 | redirect = NULL; |
1374 | } | 1376 | } |
1375 | spin_unlock(&redirect_lock); | 1377 | spin_unlock(&redirect_lock); |
1376 | 1378 | ||
1377 | check_tty_count(tty, "do_tty_hangup"); | 1379 | check_tty_count(tty, "do_tty_hangup"); |
1378 | file_list_lock(); | 1380 | file_list_lock(); |
1379 | /* This breaks for file handles being sent over AF_UNIX sockets ? */ | 1381 | /* This breaks for file handles being sent over AF_UNIX sockets ? */ |
@@ -1387,13 +1389,14 @@ static void do_tty_hangup(struct work_struct *work) | |||
1387 | filp->f_op = &hung_up_tty_fops; | 1389 | filp->f_op = &hung_up_tty_fops; |
1388 | } | 1390 | } |
1389 | file_list_unlock(); | 1391 | file_list_unlock(); |
1390 | 1392 | /* | |
1391 | /* FIXME! What are the locking issues here? This may me overdoing things.. | 1393 | * FIXME! What are the locking issues here? This may me overdoing |
1392 | * this question is especially important now that we've removed the irqlock. */ | 1394 | * things... This question is especially important now that we've |
1393 | 1395 | * removed the irqlock. | |
1396 | */ | ||
1394 | ld = tty_ldisc_ref(tty); | 1397 | ld = tty_ldisc_ref(tty); |
1395 | if(ld != NULL) /* We may have no line discipline at this point */ | 1398 | if (ld != NULL) { |
1396 | { | 1399 | /* We may have no line discipline at this point */ |
1397 | if (ld->flush_buffer) | 1400 | if (ld->flush_buffer) |
1398 | ld->flush_buffer(tty); | 1401 | ld->flush_buffer(tty); |
1399 | if (tty->driver->flush_buffer) | 1402 | if (tty->driver->flush_buffer) |
@@ -1404,26 +1407,24 @@ static void do_tty_hangup(struct work_struct *work) | |||
1404 | if (ld->hangup) | 1407 | if (ld->hangup) |
1405 | ld->hangup(tty); | 1408 | ld->hangup(tty); |
1406 | } | 1409 | } |
1407 | 1410 | /* | |
1408 | /* FIXME: Once we trust the LDISC code better we can wait here for | 1411 | * FIXME: Once we trust the LDISC code better we can wait here for |
1409 | ldisc completion and fix the driver call race */ | 1412 | * ldisc completion and fix the driver call race |
1410 | 1413 | */ | |
1411 | wake_up_interruptible(&tty->write_wait); | 1414 | wake_up_interruptible(&tty->write_wait); |
1412 | wake_up_interruptible(&tty->read_wait); | 1415 | wake_up_interruptible(&tty->read_wait); |
1413 | |||
1414 | /* | 1416 | /* |
1415 | * Shutdown the current line discipline, and reset it to | 1417 | * Shutdown the current line discipline, and reset it to |
1416 | * N_TTY. | 1418 | * N_TTY. |
1417 | */ | 1419 | */ |
1418 | if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) | 1420 | if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) |
1419 | tty_reset_termios(tty); | 1421 | tty_reset_termios(tty); |
1420 | |||
1421 | /* Defer ldisc switch */ | 1422 | /* Defer ldisc switch */ |
1422 | /* tty_deferred_ldisc_switch(N_TTY); | 1423 | /* tty_deferred_ldisc_switch(N_TTY); |
1423 | 1424 | ||
1424 | This should get done automatically when the port closes and | 1425 | This should get done automatically when the port closes and |
1425 | tty_release is called */ | 1426 | tty_release is called */ |
1426 | 1427 | ||
1427 | read_lock(&tasklist_lock); | 1428 | read_lock(&tasklist_lock); |
1428 | if (tty->session) { | 1429 | if (tty->session) { |
1429 | do_each_pid_task(tty->session, PIDTYPE_SID, p) { | 1430 | do_each_pid_task(tty->session, PIDTYPE_SID, p) { |
@@ -1451,10 +1452,10 @@ static void do_tty_hangup(struct work_struct *work) | |||
1451 | tty->pgrp = NULL; | 1452 | tty->pgrp = NULL; |
1452 | tty->ctrl_status = 0; | 1453 | tty->ctrl_status = 0; |
1453 | /* | 1454 | /* |
1454 | * If one of the devices matches a console pointer, we | 1455 | * If one of the devices matches a console pointer, we |
1455 | * cannot just call hangup() because that will cause | 1456 | * cannot just call hangup() because that will cause |
1456 | * tty->count and state->count to go out of sync. | 1457 | * tty->count and state->count to go out of sync. |
1457 | * So we just call close() the right number of times. | 1458 | * So we just call close() the right number of times. |
1458 | */ | 1459 | */ |
1459 | if (cons_filp) { | 1460 | if (cons_filp) { |
1460 | if (tty->driver->close) | 1461 | if (tty->driver->close) |
@@ -1462,12 +1463,12 @@ static void do_tty_hangup(struct work_struct *work) | |||
1462 | tty->driver->close(tty, cons_filp); | 1463 | tty->driver->close(tty, cons_filp); |
1463 | } else if (tty->driver->hangup) | 1464 | } else if (tty->driver->hangup) |
1464 | (tty->driver->hangup)(tty); | 1465 | (tty->driver->hangup)(tty); |
1465 | 1466 | /* | |
1466 | /* We don't want to have driver/ldisc interactions beyond | 1467 | * We don't want to have driver/ldisc interactions beyond |
1467 | the ones we did here. The driver layer expects no | 1468 | * the ones we did here. The driver layer expects no |
1468 | calls after ->hangup() from the ldisc side. However we | 1469 | * calls after ->hangup() from the ldisc side. However we |
1469 | can't yet guarantee all that */ | 1470 | * can't yet guarantee all that. |
1470 | 1471 | */ | |
1471 | set_bit(TTY_HUPPED, &tty->flags); | 1472 | set_bit(TTY_HUPPED, &tty->flags); |
1472 | if (ld) { | 1473 | if (ld) { |
1473 | tty_ldisc_enable(tty); | 1474 | tty_ldisc_enable(tty); |
@@ -1486,11 +1487,10 @@ static void do_tty_hangup(struct work_struct *work) | |||
1486 | * schedule a hangup sequence to run after this event. | 1487 | * schedule a hangup sequence to run after this event. |
1487 | */ | 1488 | */ |
1488 | 1489 | ||
1489 | void tty_hangup(struct tty_struct * tty) | 1490 | void tty_hangup(struct tty_struct *tty) |
1490 | { | 1491 | { |
1491 | #ifdef TTY_DEBUG_HANGUP | 1492 | #ifdef TTY_DEBUG_HANGUP |
1492 | char buf[64]; | 1493 | char buf[64]; |
1493 | |||
1494 | printk(KERN_DEBUG "%s hangup...\n", tty_name(tty, buf)); | 1494 | printk(KERN_DEBUG "%s hangup...\n", tty_name(tty, buf)); |
1495 | #endif | 1495 | #endif |
1496 | schedule_work(&tty->hangup_work); | 1496 | schedule_work(&tty->hangup_work); |
@@ -1507,7 +1507,7 @@ EXPORT_SYMBOL(tty_hangup); | |||
1507 | * is complete. That guarantee is necessary for security reasons. | 1507 | * is complete. That guarantee is necessary for security reasons. |
1508 | */ | 1508 | */ |
1509 | 1509 | ||
1510 | void tty_vhangup(struct tty_struct * tty) | 1510 | void tty_vhangup(struct tty_struct *tty) |
1511 | { | 1511 | { |
1512 | #ifdef TTY_DEBUG_HANGUP | 1512 | #ifdef TTY_DEBUG_HANGUP |
1513 | char buf[64]; | 1513 | char buf[64]; |
@@ -1516,6 +1516,7 @@ void tty_vhangup(struct tty_struct * tty) | |||
1516 | #endif | 1516 | #endif |
1517 | do_tty_hangup(&tty->hangup_work); | 1517 | do_tty_hangup(&tty->hangup_work); |
1518 | } | 1518 | } |
1519 | |||
1519 | EXPORT_SYMBOL(tty_vhangup); | 1520 | EXPORT_SYMBOL(tty_vhangup); |
1520 | 1521 | ||
1521 | /** | 1522 | /** |
@@ -1526,7 +1527,7 @@ EXPORT_SYMBOL(tty_vhangup); | |||
1526 | * loss | 1527 | * loss |
1527 | */ | 1528 | */ |
1528 | 1529 | ||
1529 | int tty_hung_up_p(struct file * filp) | 1530 | int tty_hung_up_p(struct file *filp) |
1530 | { | 1531 | { |
1531 | return (filp->f_op == &hung_up_tty_fops); | 1532 | return (filp->f_op == &hung_up_tty_fops); |
1532 | } | 1533 | } |
@@ -1534,8 +1535,12 @@ int tty_hung_up_p(struct file * filp) | |||
1534 | EXPORT_SYMBOL(tty_hung_up_p); | 1535 | EXPORT_SYMBOL(tty_hung_up_p); |
1535 | 1536 | ||
1536 | /** | 1537 | /** |
1537 | * is_tty - checker whether file is a TTY | 1538 | * is_tty - checker whether file is a TTY |
1539 | * @filp: file handle that may be a tty | ||
1540 | * | ||
1541 | * Check if the file handle is a tty handle. | ||
1538 | */ | 1542 | */ |
1543 | |||
1539 | int is_tty(struct file *filp) | 1544 | int is_tty(struct file *filp) |
1540 | { | 1545 | { |
1541 | return filp->f_op->read == tty_read | 1546 | return filp->f_op->read == tty_read |
@@ -1601,7 +1606,7 @@ void disassociate_ctty(int on_exit) | |||
1601 | put_pid(old_pgrp); | 1606 | put_pid(old_pgrp); |
1602 | } | 1607 | } |
1603 | mutex_unlock(&tty_mutex); | 1608 | mutex_unlock(&tty_mutex); |
1604 | unlock_kernel(); | 1609 | unlock_kernel(); |
1605 | return; | 1610 | return; |
1606 | } | 1611 | } |
1607 | if (tty_pgrp) { | 1612 | if (tty_pgrp) { |
@@ -1711,7 +1716,6 @@ void start_tty(struct tty_struct *tty) | |||
1711 | } | 1716 | } |
1712 | if (tty->driver->start) | 1717 | if (tty->driver->start) |
1713 | (tty->driver->start)(tty); | 1718 | (tty->driver->start)(tty); |
1714 | |||
1715 | /* If we have a running line discipline it may need kicking */ | 1719 | /* If we have a running line discipline it may need kicking */ |
1716 | tty_wakeup(tty); | 1720 | tty_wakeup(tty); |
1717 | } | 1721 | } |
@@ -1735,11 +1739,11 @@ EXPORT_SYMBOL(start_tty); | |||
1735 | * in new code. Multiple read calls may be outstanding in parallel. | 1739 | * in new code. Multiple read calls may be outstanding in parallel. |
1736 | */ | 1740 | */ |
1737 | 1741 | ||
1738 | static ssize_t tty_read(struct file * file, char __user * buf, size_t count, | 1742 | static ssize_t tty_read(struct file *file, char __user *buf, size_t count, |
1739 | loff_t *ppos) | 1743 | loff_t *ppos) |
1740 | { | 1744 | { |
1741 | int i; | 1745 | int i; |
1742 | struct tty_struct * tty; | 1746 | struct tty_struct *tty; |
1743 | struct inode *inode; | 1747 | struct inode *inode; |
1744 | struct tty_ldisc *ld; | 1748 | struct tty_ldisc *ld; |
1745 | 1749 | ||
@@ -1755,7 +1759,7 @@ static ssize_t tty_read(struct file * file, char __user * buf, size_t count, | |||
1755 | ld = tty_ldisc_ref_wait(tty); | 1759 | ld = tty_ldisc_ref_wait(tty); |
1756 | lock_kernel(); | 1760 | lock_kernel(); |
1757 | if (ld->read) | 1761 | if (ld->read) |
1758 | i = (ld->read)(tty,file,buf,count); | 1762 | i = (ld->read)(tty, file, buf, count); |
1759 | else | 1763 | else |
1760 | i = -EIO; | 1764 | i = -EIO; |
1761 | tty_ldisc_deref(ld); | 1765 | tty_ldisc_deref(ld); |
@@ -1795,7 +1799,7 @@ static inline ssize_t do_tty_write( | |||
1795 | { | 1799 | { |
1796 | ssize_t ret, written = 0; | 1800 | ssize_t ret, written = 0; |
1797 | unsigned int chunk; | 1801 | unsigned int chunk; |
1798 | 1802 | ||
1799 | ret = tty_write_lock(tty, file->f_flags & O_NDELAY); | 1803 | ret = tty_write_lock(tty, file->f_flags & O_NDELAY); |
1800 | if (ret < 0) | 1804 | if (ret < 0) |
1801 | return ret; | 1805 | return ret; |
@@ -1891,21 +1895,22 @@ out: | |||
1891 | * kernel lock for historical reasons. New code should not rely on this. | 1895 | * kernel lock for historical reasons. New code should not rely on this. |
1892 | */ | 1896 | */ |
1893 | 1897 | ||
1894 | static ssize_t tty_write(struct file * file, const char __user * buf, size_t count, | 1898 | static ssize_t tty_write(struct file *file, const char __user *buf, |
1895 | loff_t *ppos) | 1899 | size_t count, loff_t *ppos) |
1896 | { | 1900 | { |
1897 | struct tty_struct * tty; | 1901 | struct tty_struct *tty; |
1898 | struct inode *inode = file->f_path.dentry->d_inode; | 1902 | struct inode *inode = file->f_path.dentry->d_inode; |
1899 | ssize_t ret; | 1903 | ssize_t ret; |
1900 | struct tty_ldisc *ld; | 1904 | struct tty_ldisc *ld; |
1901 | 1905 | ||
1902 | tty = (struct tty_struct *)file->private_data; | 1906 | tty = (struct tty_struct *)file->private_data; |
1903 | if (tty_paranoia_check(tty, inode, "tty_write")) | 1907 | if (tty_paranoia_check(tty, inode, "tty_write")) |
1904 | return -EIO; | 1908 | return -EIO; |
1905 | if (!tty || !tty->driver->write || (test_bit(TTY_IO_ERROR, &tty->flags))) | 1909 | if (!tty || !tty->driver->write || |
1906 | return -EIO; | 1910 | (test_bit(TTY_IO_ERROR, &tty->flags))) |
1911 | return -EIO; | ||
1907 | 1912 | ||
1908 | ld = tty_ldisc_ref_wait(tty); | 1913 | ld = tty_ldisc_ref_wait(tty); |
1909 | if (!ld->write) | 1914 | if (!ld->write) |
1910 | ret = -EIO; | 1915 | ret = -EIO; |
1911 | else | 1916 | else |
@@ -1914,8 +1919,8 @@ static ssize_t tty_write(struct file * file, const char __user * buf, size_t cou | |||
1914 | return ret; | 1919 | return ret; |
1915 | } | 1920 | } |
1916 | 1921 | ||
1917 | ssize_t redirected_tty_write(struct file * file, const char __user * buf, size_t count, | 1922 | ssize_t redirected_tty_write(struct file *file, const char __user *buf, |
1918 | loff_t *ppos) | 1923 | size_t count, loff_t *ppos) |
1919 | { | 1924 | { |
1920 | struct file *p = NULL; | 1925 | struct file *p = NULL; |
1921 | 1926 | ||
@@ -1932,7 +1937,6 @@ ssize_t redirected_tty_write(struct file * file, const char __user * buf, size_t | |||
1932 | fput(p); | 1937 | fput(p); |
1933 | return res; | 1938 | return res; |
1934 | } | 1939 | } |
1935 | |||
1936 | return tty_write(file, buf, count, ppos); | 1940 | return tty_write(file, buf, count, ppos); |
1937 | } | 1941 | } |
1938 | 1942 | ||
@@ -1954,8 +1958,8 @@ static void pty_line_name(struct tty_driver *driver, int index, char *p) | |||
1954 | int i = index + driver->name_base; | 1958 | int i = index + driver->name_base; |
1955 | /* ->name is initialized to "ttyp", but "tty" is expected */ | 1959 | /* ->name is initialized to "ttyp", but "tty" is expected */ |
1956 | sprintf(p, "%s%c%x", | 1960 | sprintf(p, "%s%c%x", |
1957 | driver->subtype == PTY_TYPE_SLAVE ? "tty" : driver->name, | 1961 | driver->subtype == PTY_TYPE_SLAVE ? "tty" : driver->name, |
1958 | ptychar[i >> 4 & 0xf], i & 0xf); | 1962 | ptychar[i >> 4 & 0xf], i & 0xf); |
1959 | } | 1963 | } |
1960 | 1964 | ||
1961 | /** | 1965 | /** |
@@ -2034,7 +2038,7 @@ static int init_dev(struct tty_driver *driver, int idx, | |||
2034 | * First time open is complex, especially for PTY devices. | 2038 | * First time open is complex, especially for PTY devices. |
2035 | * This code guarantees that either everything succeeds and the | 2039 | * This code guarantees that either everything succeeds and the |
2036 | * TTY is ready for operation, or else the table slots are vacated | 2040 | * TTY is ready for operation, or else the table slots are vacated |
2037 | * and the allocated memory released. (Except that the termios | 2041 | * and the allocated memory released. (Except that the termios |
2038 | * and locked termios may be retained.) | 2042 | * and locked termios may be retained.) |
2039 | */ | 2043 | */ |
2040 | 2044 | ||
@@ -2048,7 +2052,7 @@ static int init_dev(struct tty_driver *driver, int idx, | |||
2048 | ltp = o_ltp = NULL; | 2052 | ltp = o_ltp = NULL; |
2049 | 2053 | ||
2050 | tty = alloc_tty_struct(); | 2054 | tty = alloc_tty_struct(); |
2051 | if(!tty) | 2055 | if (!tty) |
2052 | goto fail_no_mem; | 2056 | goto fail_no_mem; |
2053 | initialize_tty_struct(tty); | 2057 | initialize_tty_struct(tty); |
2054 | tty->driver = driver; | 2058 | tty->driver = driver; |
@@ -2109,9 +2113,8 @@ static int init_dev(struct tty_driver *driver, int idx, | |||
2109 | /* | 2113 | /* |
2110 | * Everything allocated ... set up the o_tty structure. | 2114 | * Everything allocated ... set up the o_tty structure. |
2111 | */ | 2115 | */ |
2112 | if (!(driver->other->flags & TTY_DRIVER_DEVPTS_MEM)) { | 2116 | if (!(driver->other->flags & TTY_DRIVER_DEVPTS_MEM)) |
2113 | driver->other->ttys[idx] = o_tty; | 2117 | driver->other->ttys[idx] = o_tty; |
2114 | } | ||
2115 | if (!*o_tp_loc) | 2118 | if (!*o_tp_loc) |
2116 | *o_tp_loc = o_tp; | 2119 | *o_tp_loc = o_tp; |
2117 | if (!*o_ltp_loc) | 2120 | if (!*o_ltp_loc) |
@@ -2127,15 +2130,14 @@ static int init_dev(struct tty_driver *driver, int idx, | |||
2127 | o_tty->link = tty; | 2130 | o_tty->link = tty; |
2128 | } | 2131 | } |
2129 | 2132 | ||
2130 | /* | 2133 | /* |
2131 | * All structures have been allocated, so now we install them. | 2134 | * All structures have been allocated, so now we install them. |
2132 | * Failures after this point use release_tty to clean up, so | 2135 | * Failures after this point use release_tty to clean up, so |
2133 | * there's no need to null out the local pointers. | 2136 | * there's no need to null out the local pointers. |
2134 | */ | 2137 | */ |
2135 | if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM)) { | 2138 | if (!(driver->flags & TTY_DRIVER_DEVPTS_MEM)) |
2136 | driver->ttys[idx] = tty; | 2139 | driver->ttys[idx] = tty; |
2137 | } | 2140 | |
2138 | |||
2139 | if (!*tp_loc) | 2141 | if (!*tp_loc) |
2140 | *tp_loc = tp; | 2142 | *tp_loc = tp; |
2141 | if (!*ltp_loc) | 2143 | if (!*ltp_loc) |
@@ -2148,7 +2150,7 @@ static int init_dev(struct tty_driver *driver, int idx, | |||
2148 | driver->refcount++; | 2150 | driver->refcount++; |
2149 | tty->count++; | 2151 | tty->count++; |
2150 | 2152 | ||
2151 | /* | 2153 | /* |
2152 | * Structures all installed ... call the ldisc open routines. | 2154 | * Structures all installed ... call the ldisc open routines. |
2153 | * If we fail here just call release_tty to clean up. No need | 2155 | * If we fail here just call release_tty to clean up. No need |
2154 | * to decrement the use counts, as release_tty doesn't care. | 2156 | * to decrement the use counts, as release_tty doesn't care. |
@@ -2185,7 +2187,7 @@ fast_track: | |||
2185 | if (driver->type == TTY_DRIVER_TYPE_PTY && | 2187 | if (driver->type == TTY_DRIVER_TYPE_PTY && |
2186 | driver->subtype == PTY_TYPE_MASTER) { | 2188 | driver->subtype == PTY_TYPE_MASTER) { |
2187 | /* | 2189 | /* |
2188 | * special case for PTY masters: only one open permitted, | 2190 | * special case for PTY masters: only one open permitted, |
2189 | * and the slave side open count is incremented as well. | 2191 | * and the slave side open count is incremented as well. |
2190 | */ | 2192 | */ |
2191 | if (tty->count) { | 2193 | if (tty->count) { |
@@ -2198,11 +2200,11 @@ fast_track: | |||
2198 | tty->driver = driver; /* N.B. why do this every time?? */ | 2200 | tty->driver = driver; /* N.B. why do this every time?? */ |
2199 | 2201 | ||
2200 | /* FIXME */ | 2202 | /* FIXME */ |
2201 | if(!test_bit(TTY_LDISC, &tty->flags)) | 2203 | if (!test_bit(TTY_LDISC, &tty->flags)) |
2202 | printk(KERN_ERR "init_dev but no ldisc\n"); | 2204 | printk(KERN_ERR "init_dev but no ldisc\n"); |
2203 | success: | 2205 | success: |
2204 | *ret_tty = tty; | 2206 | *ret_tty = tty; |
2205 | 2207 | ||
2206 | /* All paths come through here to release the mutex */ | 2208 | /* All paths come through here to release the mutex */ |
2207 | end_init: | 2209 | end_init: |
2208 | return retval; | 2210 | return retval; |
@@ -2304,7 +2306,7 @@ static void release_tty(struct tty_struct *tty, int idx) | |||
2304 | * WSH 09/09/97: rewritten to avoid some nasty race conditions that could | 2306 | * WSH 09/09/97: rewritten to avoid some nasty race conditions that could |
2305 | * lead to double frees or releasing memory still in use. | 2307 | * lead to double frees or releasing memory still in use. |
2306 | */ | 2308 | */ |
2307 | static void release_dev(struct file * filp) | 2309 | static void release_dev(struct file *filp) |
2308 | { | 2310 | { |
2309 | struct tty_struct *tty, *o_tty; | 2311 | struct tty_struct *tty, *o_tty; |
2310 | int pty_master, tty_closing, o_tty_closing, do_sleep; | 2312 | int pty_master, tty_closing, o_tty_closing, do_sleep; |
@@ -2312,9 +2314,10 @@ static void release_dev(struct file * filp) | |||
2312 | int idx; | 2314 | int idx; |
2313 | char buf[64]; | 2315 | char buf[64]; |
2314 | unsigned long flags; | 2316 | unsigned long flags; |
2315 | 2317 | ||
2316 | tty = (struct tty_struct *)filp->private_data; | 2318 | tty = (struct tty_struct *)filp->private_data; |
2317 | if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "release_dev")) | 2319 | if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, |
2320 | "release_dev")) | ||
2318 | return; | 2321 | return; |
2319 | 2322 | ||
2320 | check_tty_count(tty, "release_dev"); | 2323 | check_tty_count(tty, "release_dev"); |
@@ -2374,7 +2377,7 @@ static void release_dev(struct file * filp) | |||
2374 | idx, tty->name); | 2377 | idx, tty->name); |
2375 | return; | 2378 | return; |
2376 | } | 2379 | } |
2377 | if (o_tty->termios_locked != | 2380 | if (o_tty->termios_locked != |
2378 | tty->driver->other->termios_locked[idx]) { | 2381 | tty->driver->other->termios_locked[idx]) { |
2379 | printk(KERN_DEBUG "release_dev: other->termios_locked[" | 2382 | printk(KERN_DEBUG "release_dev: other->termios_locked[" |
2380 | "%d] not o_termios_locked for (%s)\n", | 2383 | "%d] not o_termios_locked for (%s)\n", |
@@ -2410,7 +2413,7 @@ static void release_dev(struct file * filp) | |||
2410 | while (1) { | 2413 | while (1) { |
2411 | /* Guard against races with tty->count changes elsewhere and | 2414 | /* Guard against races with tty->count changes elsewhere and |
2412 | opens on /dev/tty */ | 2415 | opens on /dev/tty */ |
2413 | 2416 | ||
2414 | mutex_lock(&tty_mutex); | 2417 | mutex_lock(&tty_mutex); |
2415 | tty_closing = tty->count <= 1; | 2418 | tty_closing = tty->count <= 1; |
2416 | o_tty_closing = o_tty && | 2419 | o_tty_closing = o_tty && |
@@ -2444,11 +2447,11 @@ static void release_dev(struct file * filp) | |||
2444 | "active!\n", tty_name(tty, buf)); | 2447 | "active!\n", tty_name(tty, buf)); |
2445 | mutex_unlock(&tty_mutex); | 2448 | mutex_unlock(&tty_mutex); |
2446 | schedule(); | 2449 | schedule(); |
2447 | } | 2450 | } |
2448 | 2451 | ||
2449 | /* | 2452 | /* |
2450 | * The closing flags are now consistent with the open counts on | 2453 | * The closing flags are now consistent with the open counts on |
2451 | * both sides, and we've completed the last operation that could | 2454 | * both sides, and we've completed the last operation that could |
2452 | * block, so it's safe to proceed with closing. | 2455 | * block, so it's safe to proceed with closing. |
2453 | */ | 2456 | */ |
2454 | if (pty_master) { | 2457 | if (pty_master) { |
@@ -2464,7 +2467,7 @@ static void release_dev(struct file * filp) | |||
2464 | tty->count, tty_name(tty, buf)); | 2467 | tty->count, tty_name(tty, buf)); |
2465 | tty->count = 0; | 2468 | tty->count = 0; |
2466 | } | 2469 | } |
2467 | 2470 | ||
2468 | /* | 2471 | /* |
2469 | * We've decremented tty->count, so we need to remove this file | 2472 | * We've decremented tty->count, so we need to remove this file |
2470 | * descriptor off the tty->tty_files list; this serves two | 2473 | * descriptor off the tty->tty_files list; this serves two |
@@ -2484,9 +2487,9 @@ static void release_dev(struct file * filp) | |||
2484 | * case of a pty we may have to wait around for the other side | 2487 | * case of a pty we may have to wait around for the other side |
2485 | * to close, and TTY_CLOSING makes sure we can't be reopened. | 2488 | * to close, and TTY_CLOSING makes sure we can't be reopened. |
2486 | */ | 2489 | */ |
2487 | if(tty_closing) | 2490 | if (tty_closing) |
2488 | set_bit(TTY_CLOSING, &tty->flags); | 2491 | set_bit(TTY_CLOSING, &tty->flags); |
2489 | if(o_tty_closing) | 2492 | if (o_tty_closing) |
2490 | set_bit(TTY_CLOSING, &o_tty->flags); | 2493 | set_bit(TTY_CLOSING, &o_tty->flags); |
2491 | 2494 | ||
2492 | /* | 2495 | /* |
@@ -2507,7 +2510,7 @@ static void release_dev(struct file * filp) | |||
2507 | /* check whether both sides are closing ... */ | 2510 | /* check whether both sides are closing ... */ |
2508 | if (!tty_closing || (o_tty && !o_tty_closing)) | 2511 | if (!tty_closing || (o_tty && !o_tty_closing)) |
2509 | return; | 2512 | return; |
2510 | 2513 | ||
2511 | #ifdef TTY_DEBUG_HANGUP | 2514 | #ifdef TTY_DEBUG_HANGUP |
2512 | printk(KERN_DEBUG "freeing tty structure..."); | 2515 | printk(KERN_DEBUG "freeing tty structure..."); |
2513 | #endif | 2516 | #endif |
@@ -2522,17 +2525,16 @@ static void release_dev(struct file * filp) | |||
2522 | /* | 2525 | /* |
2523 | * Wait for ->hangup_work and ->buf.work handlers to terminate | 2526 | * Wait for ->hangup_work and ->buf.work handlers to terminate |
2524 | */ | 2527 | */ |
2525 | 2528 | ||
2526 | flush_scheduled_work(); | 2529 | flush_scheduled_work(); |
2527 | 2530 | ||
2528 | /* | 2531 | /* |
2529 | * Wait for any short term users (we know they are just driver | 2532 | * Wait for any short term users (we know they are just driver |
2530 | * side waiters as the file is closing so user count on the file | 2533 | * side waiters as the file is closing so user count on the file |
2531 | * side is zero. | 2534 | * side is zero. |
2532 | */ | 2535 | */ |
2533 | spin_lock_irqsave(&tty_ldisc_lock, flags); | 2536 | spin_lock_irqsave(&tty_ldisc_lock, flags); |
2534 | while(tty->ldisc.refcount) | 2537 | while (tty->ldisc.refcount) { |
2535 | { | ||
2536 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); | 2538 | spin_unlock_irqrestore(&tty_ldisc_lock, flags); |
2537 | wait_event(tty_ldisc_wait, tty->ldisc.refcount == 0); | 2539 | wait_event(tty_ldisc_wait, tty->ldisc.refcount == 0); |
2538 | spin_lock_irqsave(&tty_ldisc_lock, flags); | 2540 | spin_lock_irqsave(&tty_ldisc_lock, flags); |
@@ -2547,12 +2549,12 @@ static void release_dev(struct file * filp) | |||
2547 | if (tty->ldisc.close) | 2549 | if (tty->ldisc.close) |
2548 | (tty->ldisc.close)(tty); | 2550 | (tty->ldisc.close)(tty); |
2549 | tty_ldisc_put(tty->ldisc.num); | 2551 | tty_ldisc_put(tty->ldisc.num); |
2550 | 2552 | ||
2551 | /* | 2553 | /* |
2552 | * Switch the line discipline back | 2554 | * Switch the line discipline back |
2553 | */ | 2555 | */ |
2554 | tty_ldisc_assign(tty, tty_ldisc_get(N_TTY)); | 2556 | tty_ldisc_assign(tty, tty_ldisc_get(N_TTY)); |
2555 | tty_set_termios_ldisc(tty,N_TTY); | 2557 | tty_set_termios_ldisc(tty, N_TTY); |
2556 | if (o_tty) { | 2558 | if (o_tty) { |
2557 | /* FIXME: could o_tty be in setldisc here ? */ | 2559 | /* FIXME: could o_tty be in setldisc here ? */ |
2558 | clear_bit(TTY_LDISC, &o_tty->flags); | 2560 | clear_bit(TTY_LDISC, &o_tty->flags); |
@@ -2560,7 +2562,7 @@ static void release_dev(struct file * filp) | |||
2560 | (o_tty->ldisc.close)(o_tty); | 2562 | (o_tty->ldisc.close)(o_tty); |
2561 | tty_ldisc_put(o_tty->ldisc.num); | 2563 | tty_ldisc_put(o_tty->ldisc.num); |
2562 | tty_ldisc_assign(o_tty, tty_ldisc_get(N_TTY)); | 2564 | tty_ldisc_assign(o_tty, tty_ldisc_get(N_TTY)); |
2563 | tty_set_termios_ldisc(o_tty,N_TTY); | 2565 | tty_set_termios_ldisc(o_tty, N_TTY); |
2564 | } | 2566 | } |
2565 | /* | 2567 | /* |
2566 | * The release_tty function takes care of the details of clearing | 2568 | * The release_tty function takes care of the details of clearing |
@@ -2600,7 +2602,7 @@ static void release_dev(struct file * filp) | |||
2600 | * ->siglock protects ->signal/->sighand | 2602 | * ->siglock protects ->signal/->sighand |
2601 | */ | 2603 | */ |
2602 | 2604 | ||
2603 | static int tty_open(struct inode * inode, struct file * filp) | 2605 | static int tty_open(struct inode *inode, struct file *filp) |
2604 | { | 2606 | { |
2605 | struct tty_struct *tty; | 2607 | struct tty_struct *tty; |
2606 | int noctty, retval; | 2608 | int noctty, retval; |
@@ -2610,15 +2612,15 @@ static int tty_open(struct inode * inode, struct file * filp) | |||
2610 | unsigned short saved_flags = filp->f_flags; | 2612 | unsigned short saved_flags = filp->f_flags; |
2611 | 2613 | ||
2612 | nonseekable_open(inode, filp); | 2614 | nonseekable_open(inode, filp); |
2613 | 2615 | ||
2614 | retry_open: | 2616 | retry_open: |
2615 | noctty = filp->f_flags & O_NOCTTY; | 2617 | noctty = filp->f_flags & O_NOCTTY; |
2616 | index = -1; | 2618 | index = -1; |
2617 | retval = 0; | 2619 | retval = 0; |
2618 | 2620 | ||
2619 | mutex_lock(&tty_mutex); | 2621 | mutex_lock(&tty_mutex); |
2620 | 2622 | ||
2621 | if (device == MKDEV(TTYAUX_MAJOR,0)) { | 2623 | if (device == MKDEV(TTYAUX_MAJOR, 0)) { |
2622 | tty = get_current_tty(); | 2624 | tty = get_current_tty(); |
2623 | if (!tty) { | 2625 | if (!tty) { |
2624 | mutex_unlock(&tty_mutex); | 2626 | mutex_unlock(&tty_mutex); |
@@ -2631,7 +2633,7 @@ retry_open: | |||
2631 | goto got_driver; | 2633 | goto got_driver; |
2632 | } | 2634 | } |
2633 | #ifdef CONFIG_VT | 2635 | #ifdef CONFIG_VT |
2634 | if (device == MKDEV(TTY_MAJOR,0)) { | 2636 | if (device == MKDEV(TTY_MAJOR, 0)) { |
2635 | extern struct tty_driver *console_driver; | 2637 | extern struct tty_driver *console_driver; |
2636 | driver = console_driver; | 2638 | driver = console_driver; |
2637 | index = fg_console; | 2639 | index = fg_console; |
@@ -2639,7 +2641,7 @@ retry_open: | |||
2639 | goto got_driver; | 2641 | goto got_driver; |
2640 | } | 2642 | } |
2641 | #endif | 2643 | #endif |
2642 | if (device == MKDEV(TTYAUX_MAJOR,1)) { | 2644 | if (device == MKDEV(TTYAUX_MAJOR, 1)) { |
2643 | driver = console_device(&index); | 2645 | driver = console_device(&index); |
2644 | if (driver) { | 2646 | if (driver) { |
2645 | /* Don't let /dev/console block */ | 2647 | /* Don't let /dev/console block */ |
@@ -2679,7 +2681,8 @@ got_driver: | |||
2679 | } | 2681 | } |
2680 | filp->f_flags = saved_flags; | 2682 | filp->f_flags = saved_flags; |
2681 | 2683 | ||
2682 | if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) && !capable(CAP_SYS_ADMIN)) | 2684 | if (!retval && test_bit(TTY_EXCLUSIVE, &tty->flags) && |
2685 | !capable(CAP_SYS_ADMIN)) | ||
2683 | retval = -EBUSY; | 2686 | retval = -EBUSY; |
2684 | 2687 | ||
2685 | if (retval) { | 2688 | if (retval) { |
@@ -2723,11 +2726,11 @@ got_driver: | |||
2723 | * Allocate a unix98 pty master device from the ptmx driver. | 2726 | * Allocate a unix98 pty master device from the ptmx driver. |
2724 | * | 2727 | * |
2725 | * Locking: tty_mutex protects theinit_dev work. tty->count should | 2728 | * Locking: tty_mutex protects theinit_dev work. tty->count should |
2726 | protect the rest. | 2729 | * protect the rest. |
2727 | * allocated_ptys_lock handles the list of free pty numbers | 2730 | * allocated_ptys_lock handles the list of free pty numbers |
2728 | */ | 2731 | */ |
2729 | 2732 | ||
2730 | static int ptmx_open(struct inode * inode, struct file * filp) | 2733 | static int ptmx_open(struct inode *inode, struct file *filp) |
2731 | { | 2734 | { |
2732 | struct tty_struct *tty; | 2735 | struct tty_struct *tty; |
2733 | int retval; | 2736 | int retval; |
@@ -2759,7 +2762,7 @@ static int ptmx_open(struct inode * inode, struct file * filp) | |||
2759 | mutex_lock(&tty_mutex); | 2762 | mutex_lock(&tty_mutex); |
2760 | retval = init_dev(ptm_driver, index, &tty); | 2763 | retval = init_dev(ptm_driver, index, &tty); |
2761 | mutex_unlock(&tty_mutex); | 2764 | mutex_unlock(&tty_mutex); |
2762 | 2765 | ||
2763 | if (retval) | 2766 | if (retval) |
2764 | goto out; | 2767 | goto out; |
2765 | 2768 | ||
@@ -2800,7 +2803,7 @@ out: | |||
2800 | * Takes bkl. See release_dev | 2803 | * Takes bkl. See release_dev |
2801 | */ | 2804 | */ |
2802 | 2805 | ||
2803 | static int tty_release(struct inode * inode, struct file * filp) | 2806 | static int tty_release(struct inode *inode, struct file *filp) |
2804 | { | 2807 | { |
2805 | lock_kernel(); | 2808 | lock_kernel(); |
2806 | release_dev(filp); | 2809 | release_dev(filp); |
@@ -2820,16 +2823,16 @@ static int tty_release(struct inode * inode, struct file * filp) | |||
2820 | * may be re-entered freely by other callers. | 2823 | * may be re-entered freely by other callers. |
2821 | */ | 2824 | */ |
2822 | 2825 | ||
2823 | static unsigned int tty_poll(struct file * filp, poll_table * wait) | 2826 | static unsigned int tty_poll(struct file *filp, poll_table *wait) |
2824 | { | 2827 | { |
2825 | struct tty_struct * tty; | 2828 | struct tty_struct *tty; |
2826 | struct tty_ldisc *ld; | 2829 | struct tty_ldisc *ld; |
2827 | int ret = 0; | 2830 | int ret = 0; |
2828 | 2831 | ||
2829 | tty = (struct tty_struct *)filp->private_data; | 2832 | tty = (struct tty_struct *)filp->private_data; |
2830 | if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll")) | 2833 | if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_poll")) |
2831 | return 0; | 2834 | return 0; |
2832 | 2835 | ||
2833 | ld = tty_ldisc_ref_wait(tty); | 2836 | ld = tty_ldisc_ref_wait(tty); |
2834 | if (ld->poll) | 2837 | if (ld->poll) |
2835 | ret = (ld->poll)(tty, filp, wait); | 2838 | ret = (ld->poll)(tty, filp, wait); |
@@ -2837,15 +2840,15 @@ static unsigned int tty_poll(struct file * filp, poll_table * wait) | |||
2837 | return ret; | 2840 | return ret; |
2838 | } | 2841 | } |
2839 | 2842 | ||
2840 | static int tty_fasync(int fd, struct file * filp, int on) | 2843 | static int tty_fasync(int fd, struct file *filp, int on) |
2841 | { | 2844 | { |
2842 | struct tty_struct * tty; | 2845 | struct tty_struct *tty; |
2843 | int retval; | 2846 | int retval; |
2844 | 2847 | ||
2845 | tty = (struct tty_struct *)filp->private_data; | 2848 | tty = (struct tty_struct *)filp->private_data; |
2846 | if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync")) | 2849 | if (tty_paranoia_check(tty, filp->f_path.dentry->d_inode, "tty_fasync")) |
2847 | return 0; | 2850 | return 0; |
2848 | 2851 | ||
2849 | retval = fasync_helper(fd, filp, on, &tty->fasync); | 2852 | retval = fasync_helper(fd, filp, on, &tty->fasync); |
2850 | if (retval <= 0) | 2853 | if (retval <= 0) |
2851 | return retval; | 2854 | return retval; |
@@ -2893,7 +2896,7 @@ static int tiocsti(struct tty_struct *tty, char __user *p) | |||
2893 | { | 2896 | { |
2894 | char ch, mbz = 0; | 2897 | char ch, mbz = 0; |
2895 | struct tty_ldisc *ld; | 2898 | struct tty_ldisc *ld; |
2896 | 2899 | ||
2897 | if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN)) | 2900 | if ((current->signal->tty != tty) && !capable(CAP_SYS_ADMIN)) |
2898 | return -EPERM; | 2901 | return -EPERM; |
2899 | if (get_user(ch, p)) | 2902 | if (get_user(ch, p)) |
@@ -2915,7 +2918,7 @@ static int tiocsti(struct tty_struct *tty, char __user *p) | |||
2915 | * is consistent. | 2918 | * is consistent. |
2916 | */ | 2919 | */ |
2917 | 2920 | ||
2918 | static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg) | 2921 | static int tiocgwinsz(struct tty_struct *tty, struct winsize __user *arg) |
2919 | { | 2922 | { |
2920 | int err; | 2923 | int err; |
2921 | 2924 | ||
@@ -2944,7 +2947,7 @@ static int tiocgwinsz(struct tty_struct *tty, struct winsize __user * arg) | |||
2944 | */ | 2947 | */ |
2945 | 2948 | ||
2946 | static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, | 2949 | static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, |
2947 | struct winsize __user * arg) | 2950 | struct winsize __user *arg) |
2948 | { | 2951 | { |
2949 | struct winsize tmp_ws; | 2952 | struct winsize tmp_ws; |
2950 | 2953 | ||
@@ -2960,7 +2963,7 @@ static int tiocswinsz(struct tty_struct *tty, struct tty_struct *real_tty, | |||
2960 | if (vc_lock_resize(tty->driver_data, tmp_ws.ws_col, | 2963 | if (vc_lock_resize(tty->driver_data, tmp_ws.ws_col, |
2961 | tmp_ws.ws_row)) { | 2964 | tmp_ws.ws_row)) { |
2962 | mutex_unlock(&tty->termios_mutex); | 2965 | mutex_unlock(&tty->termios_mutex); |
2963 | return -ENXIO; | 2966 | return -ENXIO; |
2964 | } | 2967 | } |
2965 | } | 2968 | } |
2966 | #endif | 2969 | #endif |
@@ -3070,7 +3073,7 @@ static int tiocsctty(struct tty_struct *tty, int arg) | |||
3070 | * This tty is already the controlling | 3073 | * This tty is already the controlling |
3071 | * tty for another session group! | 3074 | * tty for another session group! |
3072 | */ | 3075 | */ |
3073 | if ((arg == 1) && capable(CAP_SYS_ADMIN)) { | 3076 | if (arg == 1 && capable(CAP_SYS_ADMIN)) { |
3074 | /* | 3077 | /* |
3075 | * Steal it away | 3078 | * Steal it away |
3076 | */ | 3079 | */ |
@@ -3303,14 +3306,14 @@ static int tty_tiocmset(struct tty_struct *tty, struct file *file, unsigned int | |||
3303 | /* | 3306 | /* |
3304 | * Split this up, as gcc can choke on it otherwise.. | 3307 | * Split this up, as gcc can choke on it otherwise.. |
3305 | */ | 3308 | */ |
3306 | int tty_ioctl(struct inode * inode, struct file * file, | 3309 | int tty_ioctl(struct inode *inode, struct file *file, |
3307 | unsigned int cmd, unsigned long arg) | 3310 | unsigned int cmd, unsigned long arg) |
3308 | { | 3311 | { |
3309 | struct tty_struct *tty, *real_tty; | 3312 | struct tty_struct *tty, *real_tty; |
3310 | void __user *p = (void __user *)arg; | 3313 | void __user *p = (void __user *)arg; |
3311 | int retval; | 3314 | int retval; |
3312 | struct tty_ldisc *ld; | 3315 | struct tty_ldisc *ld; |
3313 | 3316 | ||
3314 | tty = (struct tty_struct *)file->private_data; | 3317 | tty = (struct tty_struct *)file->private_data; |
3315 | if (tty_paranoia_check(tty, inode, "tty_ioctl")) | 3318 | if (tty_paranoia_check(tty, inode, "tty_ioctl")) |
3316 | return -EINVAL; | 3319 | return -EINVAL; |
@@ -3326,13 +3329,13 @@ int tty_ioctl(struct inode * inode, struct file * file, | |||
3326 | * Break handling by driver | 3329 | * Break handling by driver |
3327 | */ | 3330 | */ |
3328 | if (!tty->driver->break_ctl) { | 3331 | if (!tty->driver->break_ctl) { |
3329 | switch(cmd) { | 3332 | switch (cmd) { |
3330 | case TIOCSBRK: | 3333 | case TIOCSBRK: |
3331 | case TIOCCBRK: | 3334 | case TIOCCBRK: |
3332 | if (tty->driver->ioctl) | 3335 | if (tty->driver->ioctl) |
3333 | return tty->driver->ioctl(tty, file, cmd, arg); | 3336 | return tty->driver->ioctl(tty, file, cmd, arg); |
3334 | return -EINVAL; | 3337 | return -EINVAL; |
3335 | 3338 | ||
3336 | /* These two ioctl's always return success; even if */ | 3339 | /* These two ioctl's always return success; even if */ |
3337 | /* the driver doesn't support them. */ | 3340 | /* the driver doesn't support them. */ |
3338 | case TCSBRK: | 3341 | case TCSBRK: |
@@ -3354,7 +3357,7 @@ int tty_ioctl(struct inode * inode, struct file * file, | |||
3354 | case TIOCSBRK: | 3357 | case TIOCSBRK: |
3355 | case TIOCCBRK: | 3358 | case TIOCCBRK: |
3356 | case TCSBRK: | 3359 | case TCSBRK: |
3357 | case TCSBRKP: | 3360 | case TCSBRKP: |
3358 | retval = tty_check_change(tty); | 3361 | retval = tty_check_change(tty); |
3359 | if (retval) | 3362 | if (retval) |
3360 | return retval; | 3363 | return retval; |
@@ -3367,81 +3370,80 @@ int tty_ioctl(struct inode * inode, struct file * file, | |||
3367 | } | 3370 | } |
3368 | 3371 | ||
3369 | switch (cmd) { | 3372 | switch (cmd) { |
3370 | case TIOCSTI: | 3373 | case TIOCSTI: |
3371 | return tiocsti(tty, p); | 3374 | return tiocsti(tty, p); |
3372 | case TIOCGWINSZ: | 3375 | case TIOCGWINSZ: |
3373 | return tiocgwinsz(tty, p); | 3376 | return tiocgwinsz(tty, p); |
3374 | case TIOCSWINSZ: | 3377 | case TIOCSWINSZ: |
3375 | return tiocswinsz(tty, real_tty, p); | 3378 | return tiocswinsz(tty, real_tty, p); |
3376 | case TIOCCONS: | 3379 | case TIOCCONS: |
3377 | return real_tty!=tty ? -EINVAL : tioccons(file); | 3380 | return real_tty != tty ? -EINVAL : tioccons(file); |
3378 | case FIONBIO: | 3381 | case FIONBIO: |
3379 | return fionbio(file, p); | 3382 | return fionbio(file, p); |
3380 | case TIOCEXCL: | 3383 | case TIOCEXCL: |
3381 | set_bit(TTY_EXCLUSIVE, &tty->flags); | 3384 | set_bit(TTY_EXCLUSIVE, &tty->flags); |
3382 | return 0; | 3385 | return 0; |
3383 | case TIOCNXCL: | 3386 | case TIOCNXCL: |
3384 | clear_bit(TTY_EXCLUSIVE, &tty->flags); | 3387 | clear_bit(TTY_EXCLUSIVE, &tty->flags); |
3385 | return 0; | 3388 | return 0; |
3386 | case TIOCNOTTY: | 3389 | case TIOCNOTTY: |
3387 | if (current->signal->tty != tty) | 3390 | if (current->signal->tty != tty) |
3388 | return -ENOTTY; | 3391 | return -ENOTTY; |
3389 | no_tty(); | 3392 | no_tty(); |
3390 | return 0; | 3393 | return 0; |
3391 | case TIOCSCTTY: | 3394 | case TIOCSCTTY: |
3392 | return tiocsctty(tty, arg); | 3395 | return tiocsctty(tty, arg); |
3393 | case TIOCGPGRP: | 3396 | case TIOCGPGRP: |
3394 | return tiocgpgrp(tty, real_tty, p); | 3397 | return tiocgpgrp(tty, real_tty, p); |
3395 | case TIOCSPGRP: | 3398 | case TIOCSPGRP: |
3396 | return tiocspgrp(tty, real_tty, p); | 3399 | return tiocspgrp(tty, real_tty, p); |
3397 | case TIOCGSID: | 3400 | case TIOCGSID: |
3398 | return tiocgsid(tty, real_tty, p); | 3401 | return tiocgsid(tty, real_tty, p); |
3399 | case TIOCGETD: | 3402 | case TIOCGETD: |
3400 | /* FIXME: check this is ok */ | 3403 | /* FIXME: check this is ok */ |
3401 | return put_user(tty->ldisc.num, (int __user *)p); | 3404 | return put_user(tty->ldisc.num, (int __user *)p); |
3402 | case TIOCSETD: | 3405 | case TIOCSETD: |
3403 | return tiocsetd(tty, p); | 3406 | return tiocsetd(tty, p); |
3404 | #ifdef CONFIG_VT | 3407 | #ifdef CONFIG_VT |
3405 | case TIOCLINUX: | 3408 | case TIOCLINUX: |
3406 | return tioclinux(tty, arg); | 3409 | return tioclinux(tty, arg); |
3407 | #endif | 3410 | #endif |
3408 | /* | 3411 | /* |
3409 | * Break handling | 3412 | * Break handling |
3410 | */ | 3413 | */ |
3411 | case TIOCSBRK: /* Turn break on, unconditionally */ | 3414 | case TIOCSBRK: /* Turn break on, unconditionally */ |
3412 | tty->driver->break_ctl(tty, -1); | 3415 | tty->driver->break_ctl(tty, -1); |
3413 | return 0; | 3416 | return 0; |
3414 | |||
3415 | case TIOCCBRK: /* Turn break off, unconditionally */ | ||
3416 | tty->driver->break_ctl(tty, 0); | ||
3417 | return 0; | ||
3418 | case TCSBRK: /* SVID version: non-zero arg --> no break */ | ||
3419 | /* non-zero arg means wait for all output data | ||
3420 | * to be sent (performed above) but don't send break. | ||
3421 | * This is used by the tcdrain() termios function. | ||
3422 | */ | ||
3423 | if (!arg) | ||
3424 | return send_break(tty, 250); | ||
3425 | return 0; | ||
3426 | case TCSBRKP: /* support for POSIX tcsendbreak() */ | ||
3427 | return send_break(tty, arg ? arg*100 : 250); | ||
3428 | |||
3429 | case TIOCMGET: | ||
3430 | return tty_tiocmget(tty, file, p); | ||
3431 | 3417 | ||
3432 | case TIOCMSET: | 3418 | case TIOCCBRK: /* Turn break off, unconditionally */ |
3433 | case TIOCMBIC: | 3419 | tty->driver->break_ctl(tty, 0); |
3434 | case TIOCMBIS: | 3420 | return 0; |
3435 | return tty_tiocmset(tty, file, cmd, p); | 3421 | case TCSBRK: /* SVID version: non-zero arg --> no break */ |
3436 | case TCFLSH: | 3422 | /* non-zero arg means wait for all output data |
3437 | switch (arg) { | 3423 | * to be sent (performed above) but don't send break. |
3438 | case TCIFLUSH: | 3424 | * This is used by the tcdrain() termios function. |
3439 | case TCIOFLUSH: | 3425 | */ |
3440 | /* flush tty buffer and allow ldisc to process ioctl */ | 3426 | if (!arg) |
3441 | tty_buffer_flush(tty); | 3427 | return send_break(tty, 250); |
3442 | break; | 3428 | return 0; |
3443 | } | 3429 | case TCSBRKP: /* support for POSIX tcsendbreak() */ |
3430 | return send_break(tty, arg ? arg*100 : 250); | ||
3431 | |||
3432 | case TIOCMGET: | ||
3433 | return tty_tiocmget(tty, file, p); | ||
3434 | case TIOCMSET: | ||
3435 | case TIOCMBIC: | ||
3436 | case TIOCMBIS: | ||
3437 | return tty_tiocmset(tty, file, cmd, p); | ||
3438 | case TCFLSH: | ||
3439 | switch (arg) { | ||
3440 | case TCIFLUSH: | ||
3441 | case TCIOFLUSH: | ||
3442 | /* flush tty buffer and allow ldisc to process ioctl */ | ||
3443 | tty_buffer_flush(tty); | ||
3444 | break; | 3444 | break; |
3445 | } | ||
3446 | break; | ||
3445 | } | 3447 | } |
3446 | if (tty->driver->ioctl) { | 3448 | if (tty->driver->ioctl) { |
3447 | retval = (tty->driver->ioctl)(tty, file, cmd, arg); | 3449 | retval = (tty->driver->ioctl)(tty, file, cmd, arg); |
@@ -3460,7 +3462,7 @@ int tty_ioctl(struct inode * inode, struct file * file, | |||
3460 | } | 3462 | } |
3461 | 3463 | ||
3462 | #ifdef CONFIG_COMPAT | 3464 | #ifdef CONFIG_COMPAT |
3463 | static long tty_compat_ioctl(struct file * file, unsigned int cmd, | 3465 | static long tty_compat_ioctl(struct file *file, unsigned int cmd, |
3464 | unsigned long arg) | 3466 | unsigned long arg) |
3465 | { | 3467 | { |
3466 | struct inode *inode = file->f_dentry->d_inode; | 3468 | struct inode *inode = file->f_dentry->d_inode; |
@@ -3491,7 +3493,7 @@ static long tty_compat_ioctl(struct file * file, unsigned int cmd, | |||
3491 | * prevent trojan horses by killing all processes associated with this | 3493 | * prevent trojan horses by killing all processes associated with this |
3492 | * tty when the user hits the "Secure Attention Key". Required for | 3494 | * tty when the user hits the "Secure Attention Key". Required for |
3493 | * super-paranoid applications --- see the Orange Book for more details. | 3495 | * super-paranoid applications --- see the Orange Book for more details. |
3494 | * | 3496 | * |
3495 | * This code could be nicer; ideally it should send a HUP, wait a few | 3497 | * This code could be nicer; ideally it should send a HUP, wait a few |
3496 | * seconds, then send a INT, and then a KILL signal. But you then | 3498 | * seconds, then send a INT, and then a KILL signal. But you then |
3497 | * have to coordinate with the init process, since all processes associated | 3499 | * have to coordinate with the init process, since all processes associated |
@@ -3515,16 +3517,16 @@ void __do_SAK(struct tty_struct *tty) | |||
3515 | int i; | 3517 | int i; |
3516 | struct file *filp; | 3518 | struct file *filp; |
3517 | struct fdtable *fdt; | 3519 | struct fdtable *fdt; |
3518 | 3520 | ||
3519 | if (!tty) | 3521 | if (!tty) |
3520 | return; | 3522 | return; |
3521 | session = tty->session; | 3523 | session = tty->session; |
3522 | 3524 | ||
3523 | tty_ldisc_flush(tty); | 3525 | tty_ldisc_flush(tty); |
3524 | 3526 | ||
3525 | if (tty->driver->flush_buffer) | 3527 | if (tty->driver->flush_buffer) |
3526 | tty->driver->flush_buffer(tty); | 3528 | tty->driver->flush_buffer(tty); |
3527 | 3529 | ||
3528 | read_lock(&tasklist_lock); | 3530 | read_lock(&tasklist_lock); |
3529 | /* Kill the entire session */ | 3531 | /* Kill the entire session */ |
3530 | do_each_pid_task(session, PIDTYPE_SID, p) { | 3532 | do_each_pid_task(session, PIDTYPE_SID, p) { |
@@ -3552,7 +3554,7 @@ void __do_SAK(struct tty_struct *tty) | |||
3552 | */ | 3554 | */ |
3553 | spin_lock(&p->files->file_lock); | 3555 | spin_lock(&p->files->file_lock); |
3554 | fdt = files_fdtable(p->files); | 3556 | fdt = files_fdtable(p->files); |
3555 | for (i=0; i < fdt->max_fds; i++) { | 3557 | for (i = 0; i < fdt->max_fds; i++) { |
3556 | filp = fcheck_files(p->files, i); | 3558 | filp = fcheck_files(p->files, i); |
3557 | if (!filp) | 3559 | if (!filp) |
3558 | continue; | 3560 | continue; |
@@ -3606,7 +3608,7 @@ EXPORT_SYMBOL(do_SAK); | |||
3606 | * while invoking the line discipline receive_buf method. The | 3608 | * while invoking the line discipline receive_buf method. The |
3607 | * receive_buf method is single threaded for each tty instance. | 3609 | * receive_buf method is single threaded for each tty instance. |
3608 | */ | 3610 | */ |
3609 | 3611 | ||
3610 | static void flush_to_ldisc(struct work_struct *work) | 3612 | static void flush_to_ldisc(struct work_struct *work) |
3611 | { | 3613 | { |
3612 | struct tty_struct *tty = | 3614 | struct tty_struct *tty = |
@@ -3622,7 +3624,8 @@ static void flush_to_ldisc(struct work_struct *work) | |||
3622 | return; | 3624 | return; |
3623 | 3625 | ||
3624 | spin_lock_irqsave(&tty->buf.lock, flags); | 3626 | spin_lock_irqsave(&tty->buf.lock, flags); |
3625 | set_bit(TTY_FLUSHING, &tty->flags); /* So we know a flush is running */ | 3627 | /* So we know a flush is running */ |
3628 | set_bit(TTY_FLUSHING, &tty->flags); | ||
3626 | head = tty->buf.head; | 3629 | head = tty->buf.head; |
3627 | if (head != NULL) { | 3630 | if (head != NULL) { |
3628 | tty->buf.head = NULL; | 3631 | tty->buf.head = NULL; |
@@ -3795,7 +3798,8 @@ struct device *tty_register_device(struct tty_driver *driver, unsigned index, | |||
3795 | 3798 | ||
3796 | void tty_unregister_device(struct tty_driver *driver, unsigned index) | 3799 | void tty_unregister_device(struct tty_driver *driver, unsigned index) |
3797 | { | 3800 | { |
3798 | device_destroy(tty_class, MKDEV(driver->major, driver->minor_start) + index); | 3801 | device_destroy(tty_class, |
3802 | MKDEV(driver->major, driver->minor_start) + index); | ||
3799 | } | 3803 | } |
3800 | 3804 | ||
3801 | EXPORT_SYMBOL(tty_register_device); | 3805 | EXPORT_SYMBOL(tty_register_device); |
@@ -3859,7 +3863,7 @@ EXPORT_SYMBOL(tty_set_operations); | |||
3859 | int tty_register_driver(struct tty_driver *driver) | 3863 | int tty_register_driver(struct tty_driver *driver) |
3860 | { | 3864 | { |
3861 | int error; | 3865 | int error; |
3862 | int i; | 3866 | int i; |
3863 | dev_t dev; | 3867 | dev_t dev; |
3864 | void **p = NULL; | 3868 | void **p = NULL; |
3865 | 3869 | ||
@@ -3873,8 +3877,8 @@ int tty_register_driver(struct tty_driver *driver) | |||
3873 | } | 3877 | } |
3874 | 3878 | ||
3875 | if (!driver->major) { | 3879 | if (!driver->major) { |
3876 | error = alloc_chrdev_region(&dev, driver->minor_start, driver->num, | 3880 | error = alloc_chrdev_region(&dev, driver->minor_start, |
3877 | driver->name); | 3881 | driver->num, driver->name); |
3878 | if (!error) { | 3882 | if (!error) { |
3879 | driver->major = MAJOR(dev); | 3883 | driver->major = MAJOR(dev); |
3880 | driver->minor_start = MINOR(dev); | 3884 | driver->minor_start = MINOR(dev); |
@@ -3891,7 +3895,8 @@ int tty_register_driver(struct tty_driver *driver) | |||
3891 | if (p) { | 3895 | if (p) { |
3892 | driver->ttys = (struct tty_struct **)p; | 3896 | driver->ttys = (struct tty_struct **)p; |
3893 | driver->termios = (struct ktermios **)(p + driver->num); | 3897 | driver->termios = (struct ktermios **)(p + driver->num); |
3894 | driver->termios_locked = (struct ktermios **)(p + driver->num * 2); | 3898 | driver->termios_locked = (struct ktermios **) |
3899 | (p + driver->num * 2); | ||
3895 | } else { | 3900 | } else { |
3896 | driver->ttys = NULL; | 3901 | driver->ttys = NULL; |
3897 | driver->termios = NULL; | 3902 | driver->termios = NULL; |
@@ -3911,13 +3916,13 @@ int tty_register_driver(struct tty_driver *driver) | |||
3911 | 3916 | ||
3912 | if (!driver->put_char) | 3917 | if (!driver->put_char) |
3913 | driver->put_char = tty_default_put_char; | 3918 | driver->put_char = tty_default_put_char; |
3914 | 3919 | ||
3915 | mutex_lock(&tty_mutex); | 3920 | mutex_lock(&tty_mutex); |
3916 | list_add(&driver->tty_drivers, &tty_drivers); | 3921 | list_add(&driver->tty_drivers, &tty_drivers); |
3917 | mutex_unlock(&tty_mutex); | 3922 | mutex_unlock(&tty_mutex); |
3918 | 3923 | ||
3919 | if ( !(driver->flags & TTY_DRIVER_DYNAMIC_DEV) ) { | 3924 | if (!(driver->flags & TTY_DRIVER_DYNAMIC_DEV)) { |
3920 | for(i = 0; i < driver->num; i++) | 3925 | for (i = 0; i < driver->num; i++) |
3921 | tty_register_device(driver, i, NULL); | 3926 | tty_register_device(driver, i, NULL); |
3922 | } | 3927 | } |
3923 | proc_tty_register_driver(driver); | 3928 | proc_tty_register_driver(driver); |
@@ -4037,7 +4042,7 @@ void __init console_init(void) | |||
4037 | (void) tty_register_ldisc(N_TTY, &tty_ldisc_N_TTY); | 4042 | (void) tty_register_ldisc(N_TTY, &tty_ldisc_N_TTY); |
4038 | 4043 | ||
4039 | /* | 4044 | /* |
4040 | * set up the console device so that later boot sequences can | 4045 | * set up the console device so that later boot sequences can |
4041 | * inform about problems etc.. | 4046 | * inform about problems etc.. |
4042 | */ | 4047 | */ |
4043 | call = __con_initcall_start; | 4048 | call = __con_initcall_start; |
diff --git a/drivers/char/tty_ioctl.c b/drivers/char/tty_ioctl.c index d4b6d64e858b..f95a80b2265f 100644 --- a/drivers/char/tty_ioctl.c +++ b/drivers/char/tty_ioctl.c | |||
@@ -50,11 +50,11 @@ | |||
50 | * Locking: none | 50 | * Locking: none |
51 | */ | 51 | */ |
52 | 52 | ||
53 | void tty_wait_until_sent(struct tty_struct * tty, long timeout) | 53 | void tty_wait_until_sent(struct tty_struct *tty, long timeout) |
54 | { | 54 | { |
55 | #ifdef TTY_DEBUG_WAIT_UNTIL_SENT | 55 | #ifdef TTY_DEBUG_WAIT_UNTIL_SENT |
56 | char buf[64]; | 56 | char buf[64]; |
57 | 57 | ||
58 | printk(KERN_DEBUG "%s wait until sent...\n", tty_name(tty, buf)); | 58 | printk(KERN_DEBUG "%s wait until sent...\n", tty_name(tty, buf)); |
59 | #endif | 59 | #endif |
60 | if (!tty->driver->chars_in_buffer) | 60 | if (!tty->driver->chars_in_buffer) |
@@ -67,7 +67,6 @@ void tty_wait_until_sent(struct tty_struct * tty, long timeout) | |||
67 | if (tty->driver->wait_until_sent) | 67 | if (tty->driver->wait_until_sent) |
68 | tty->driver->wait_until_sent(tty, timeout); | 68 | tty->driver->wait_until_sent(tty, timeout); |
69 | } | 69 | } |
70 | |||
71 | EXPORT_SYMBOL(tty_wait_until_sent); | 70 | EXPORT_SYMBOL(tty_wait_until_sent); |
72 | 71 | ||
73 | static void unset_locked_termios(struct ktermios *termios, | 72 | static void unset_locked_termios(struct ktermios *termios, |
@@ -75,8 +74,8 @@ static void unset_locked_termios(struct ktermios *termios, | |||
75 | struct ktermios *locked) | 74 | struct ktermios *locked) |
76 | { | 75 | { |
77 | int i; | 76 | int i; |
78 | 77 | ||
79 | #define NOSET_MASK(x,y,z) (x = ((x) & ~(z)) | ((y) & (z))) | 78 | #define NOSET_MASK(x, y, z) (x = ((x) & ~(z)) | ((y) & (z))) |
80 | 79 | ||
81 | if (!locked) { | 80 | if (!locked) { |
82 | printk(KERN_WARNING "Warning?!? termios_locked is NULL.\n"); | 81 | printk(KERN_WARNING "Warning?!? termios_locked is NULL.\n"); |
@@ -88,7 +87,7 @@ static void unset_locked_termios(struct ktermios *termios, | |||
88 | NOSET_MASK(termios->c_cflag, old->c_cflag, locked->c_cflag); | 87 | NOSET_MASK(termios->c_cflag, old->c_cflag, locked->c_cflag); |
89 | NOSET_MASK(termios->c_lflag, old->c_lflag, locked->c_lflag); | 88 | NOSET_MASK(termios->c_lflag, old->c_lflag, locked->c_lflag); |
90 | termios->c_line = locked->c_line ? old->c_line : termios->c_line; | 89 | termios->c_line = locked->c_line ? old->c_line : termios->c_line; |
91 | for (i=0; i < NCCS; i++) | 90 | for (i = 0; i < NCCS; i++) |
92 | termios->c_cc[i] = locked->c_cc[i] ? | 91 | termios->c_cc[i] = locked->c_cc[i] ? |
93 | old->c_cc[i] : termios->c_cc[i]; | 92 | old->c_cc[i] : termios->c_cc[i]; |
94 | /* FIXME: What should we do for i/ospeed */ | 93 | /* FIXME: What should we do for i/ospeed */ |
@@ -163,7 +162,6 @@ speed_t tty_termios_baud_rate(struct ktermios *termios) | |||
163 | } | 162 | } |
164 | return baud_table[cbaud]; | 163 | return baud_table[cbaud]; |
165 | } | 164 | } |
166 | |||
167 | EXPORT_SYMBOL(tty_termios_baud_rate); | 165 | EXPORT_SYMBOL(tty_termios_baud_rate); |
168 | 166 | ||
169 | /** | 167 | /** |
@@ -203,7 +201,6 @@ speed_t tty_termios_input_baud_rate(struct ktermios *termios) | |||
203 | return tty_termios_baud_rate(termios); | 201 | return tty_termios_baud_rate(termios); |
204 | #endif | 202 | #endif |
205 | } | 203 | } |
206 | |||
207 | EXPORT_SYMBOL(tty_termios_input_baud_rate); | 204 | EXPORT_SYMBOL(tty_termios_input_baud_rate); |
208 | 205 | ||
209 | /** | 206 | /** |
@@ -338,7 +335,6 @@ speed_t tty_get_baud_rate(struct tty_struct *tty) | |||
338 | 335 | ||
339 | return baud; | 336 | return baud; |
340 | } | 337 | } |
341 | |||
342 | EXPORT_SYMBOL(tty_get_baud_rate); | 338 | EXPORT_SYMBOL(tty_get_baud_rate); |
343 | 339 | ||
344 | /** | 340 | /** |
@@ -361,7 +357,6 @@ void tty_termios_copy_hw(struct ktermios *new, struct ktermios *old) | |||
361 | new->c_ispeed = old->c_ispeed; | 357 | new->c_ispeed = old->c_ispeed; |
362 | new->c_ospeed = old->c_ospeed; | 358 | new->c_ospeed = old->c_ospeed; |
363 | } | 359 | } |
364 | |||
365 | EXPORT_SYMBOL(tty_termios_copy_hw); | 360 | EXPORT_SYMBOL(tty_termios_copy_hw); |
366 | 361 | ||
367 | /** | 362 | /** |
@@ -395,16 +390,16 @@ EXPORT_SYMBOL(tty_termios_hw_change); | |||
395 | * Locking: termios_sem | 390 | * Locking: termios_sem |
396 | */ | 391 | */ |
397 | 392 | ||
398 | static void change_termios(struct tty_struct * tty, struct ktermios * new_termios) | 393 | static void change_termios(struct tty_struct *tty, struct ktermios *new_termios) |
399 | { | 394 | { |
400 | int canon_change; | 395 | int canon_change; |
401 | struct ktermios old_termios = *tty->termios; | 396 | struct ktermios old_termios = *tty->termios; |
402 | struct tty_ldisc *ld; | 397 | struct tty_ldisc *ld; |
403 | 398 | ||
404 | /* | 399 | /* |
405 | * Perform the actual termios internal changes under lock. | 400 | * Perform the actual termios internal changes under lock. |
406 | */ | 401 | */ |
407 | 402 | ||
408 | 403 | ||
409 | /* FIXME: we need to decide on some locking/ordering semantics | 404 | /* FIXME: we need to decide on some locking/ordering semantics |
410 | for the set_termios notification eventually */ | 405 | for the set_termios notification eventually */ |
@@ -419,7 +414,7 @@ static void change_termios(struct tty_struct * tty, struct ktermios * new_termio | |||
419 | tty->canon_data = 0; | 414 | tty->canon_data = 0; |
420 | tty->erasing = 0; | 415 | tty->erasing = 0; |
421 | } | 416 | } |
422 | 417 | ||
423 | /* This bit should be in the ldisc code */ | 418 | /* This bit should be in the ldisc code */ |
424 | if (canon_change && !L_ICANON(tty) && tty->read_cnt) | 419 | if (canon_change && !L_ICANON(tty) && tty->read_cnt) |
425 | /* Get characters left over from canonical mode. */ | 420 | /* Get characters left over from canonical mode. */ |
@@ -442,7 +437,7 @@ static void change_termios(struct tty_struct * tty, struct ktermios * new_termio | |||
442 | wake_up_interruptible(&tty->link->read_wait); | 437 | wake_up_interruptible(&tty->link->read_wait); |
443 | } | 438 | } |
444 | } | 439 | } |
445 | 440 | ||
446 | if (tty->driver->set_termios) | 441 | if (tty->driver->set_termios) |
447 | (*tty->driver->set_termios)(tty, &old_termios); | 442 | (*tty->driver->set_termios)(tty, &old_termios); |
448 | else | 443 | else |
@@ -470,7 +465,7 @@ static void change_termios(struct tty_struct * tty, struct ktermios * new_termio | |||
470 | * Called functions take ldisc and termios_sem locks | 465 | * Called functions take ldisc and termios_sem locks |
471 | */ | 466 | */ |
472 | 467 | ||
473 | static int set_termios(struct tty_struct * tty, void __user *arg, int opt) | 468 | static int set_termios(struct tty_struct *tty, void __user *arg, int opt) |
474 | { | 469 | { |
475 | struct ktermios tmp_termios; | 470 | struct ktermios tmp_termios; |
476 | struct tty_ldisc *ld; | 471 | struct tty_ldisc *ld; |
@@ -501,19 +496,19 @@ static int set_termios(struct tty_struct * tty, void __user *arg, int opt) | |||
501 | return -EFAULT; | 496 | return -EFAULT; |
502 | #endif | 497 | #endif |
503 | 498 | ||
504 | /* If old style Bfoo values are used then load c_ispeed/c_ospeed with the real speed | 499 | /* If old style Bfoo values are used then load c_ispeed/c_ospeed |
505 | so its unconditionally usable */ | 500 | * with the real speed so its unconditionally usable */ |
506 | tmp_termios.c_ispeed = tty_termios_input_baud_rate(&tmp_termios); | 501 | tmp_termios.c_ispeed = tty_termios_input_baud_rate(&tmp_termios); |
507 | tmp_termios.c_ospeed = tty_termios_baud_rate(&tmp_termios); | 502 | tmp_termios.c_ospeed = tty_termios_baud_rate(&tmp_termios); |
508 | 503 | ||
509 | ld = tty_ldisc_ref(tty); | 504 | ld = tty_ldisc_ref(tty); |
510 | 505 | ||
511 | if (ld != NULL) { | 506 | if (ld != NULL) { |
512 | if ((opt & TERMIOS_FLUSH) && ld->flush_buffer) | 507 | if ((opt & TERMIOS_FLUSH) && ld->flush_buffer) |
513 | ld->flush_buffer(tty); | 508 | ld->flush_buffer(tty); |
514 | tty_ldisc_deref(ld); | 509 | tty_ldisc_deref(ld); |
515 | } | 510 | } |
516 | 511 | ||
517 | if (opt & TERMIOS_WAIT) { | 512 | if (opt & TERMIOS_WAIT) { |
518 | tty_wait_until_sent(tty, 0); | 513 | tty_wait_until_sent(tty, 0); |
519 | if (signal_pending(current)) | 514 | if (signal_pending(current)) |
@@ -529,14 +524,14 @@ static int set_termios(struct tty_struct * tty, void __user *arg, int opt) | |||
529 | return 0; | 524 | return 0; |
530 | } | 525 | } |
531 | 526 | ||
532 | static int get_termio(struct tty_struct * tty, struct termio __user * termio) | 527 | static int get_termio(struct tty_struct *tty, struct termio __user *termio) |
533 | { | 528 | { |
534 | if (kernel_termios_to_user_termio(termio, tty->termios)) | 529 | if (kernel_termios_to_user_termio(termio, tty->termios)) |
535 | return -EFAULT; | 530 | return -EFAULT; |
536 | return 0; | 531 | return 0; |
537 | } | 532 | } |
538 | 533 | ||
539 | static unsigned long inq_canon(struct tty_struct * tty) | 534 | static unsigned long inq_canon(struct tty_struct *tty) |
540 | { | 535 | { |
541 | int nr, head, tail; | 536 | int nr, head, tail; |
542 | 537 | ||
@@ -561,7 +556,7 @@ static unsigned long inq_canon(struct tty_struct * tty) | |||
561 | * | 556 | * |
562 | * The "sg_flags" translation is a joke.. | 557 | * The "sg_flags" translation is a joke.. |
563 | */ | 558 | */ |
564 | static int get_sgflags(struct tty_struct * tty) | 559 | static int get_sgflags(struct tty_struct *tty) |
565 | { | 560 | { |
566 | int flags = 0; | 561 | int flags = 0; |
567 | 562 | ||
@@ -579,7 +574,7 @@ static int get_sgflags(struct tty_struct * tty) | |||
579 | return flags; | 574 | return flags; |
580 | } | 575 | } |
581 | 576 | ||
582 | static int get_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb) | 577 | static int get_sgttyb(struct tty_struct *tty, struct sgttyb __user *sgttyb) |
583 | { | 578 | { |
584 | struct sgttyb tmp; | 579 | struct sgttyb tmp; |
585 | 580 | ||
@@ -590,11 +585,11 @@ static int get_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb) | |||
590 | tmp.sg_kill = tty->termios->c_cc[VKILL]; | 585 | tmp.sg_kill = tty->termios->c_cc[VKILL]; |
591 | tmp.sg_flags = get_sgflags(tty); | 586 | tmp.sg_flags = get_sgflags(tty); |
592 | mutex_unlock(&tty->termios_mutex); | 587 | mutex_unlock(&tty->termios_mutex); |
593 | 588 | ||
594 | return copy_to_user(sgttyb, &tmp, sizeof(tmp)) ? -EFAULT : 0; | 589 | return copy_to_user(sgttyb, &tmp, sizeof(tmp)) ? -EFAULT : 0; |
595 | } | 590 | } |
596 | 591 | ||
597 | static void set_sgflags(struct ktermios * termios, int flags) | 592 | static void set_sgflags(struct ktermios *termios, int flags) |
598 | { | 593 | { |
599 | termios->c_iflag = ICRNL | IXON; | 594 | termios->c_iflag = ICRNL | IXON; |
600 | termios->c_oflag = 0; | 595 | termios->c_oflag = 0; |
@@ -631,7 +626,7 @@ static void set_sgflags(struct ktermios * termios, int flags) | |||
631 | * Locking: termios_sem | 626 | * Locking: termios_sem |
632 | */ | 627 | */ |
633 | 628 | ||
634 | static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb) | 629 | static int set_sgttyb(struct tty_struct *tty, struct sgttyb __user *sgttyb) |
635 | { | 630 | { |
636 | int retval; | 631 | int retval; |
637 | struct sgttyb tmp; | 632 | struct sgttyb tmp; |
@@ -640,7 +635,7 @@ static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb) | |||
640 | retval = tty_check_change(tty); | 635 | retval = tty_check_change(tty); |
641 | if (retval) | 636 | if (retval) |
642 | return retval; | 637 | return retval; |
643 | 638 | ||
644 | if (copy_from_user(&tmp, sgttyb, sizeof(tmp))) | 639 | if (copy_from_user(&tmp, sgttyb, sizeof(tmp))) |
645 | return -EFAULT; | 640 | return -EFAULT; |
646 | 641 | ||
@@ -651,7 +646,8 @@ static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb) | |||
651 | set_sgflags(&termios, tmp.sg_flags); | 646 | set_sgflags(&termios, tmp.sg_flags); |
652 | /* Try and encode into Bfoo format */ | 647 | /* Try and encode into Bfoo format */ |
653 | #ifdef BOTHER | 648 | #ifdef BOTHER |
654 | tty_termios_encode_baud_rate(&termios, termios.c_ispeed, termios.c_ospeed); | 649 | tty_termios_encode_baud_rate(&termios, termios.c_ispeed, |
650 | termios.c_ospeed); | ||
655 | #endif | 651 | #endif |
656 | mutex_unlock(&tty->termios_mutex); | 652 | mutex_unlock(&tty->termios_mutex); |
657 | change_termios(tty, &termios); | 653 | change_termios(tty, &termios); |
@@ -660,7 +656,7 @@ static int set_sgttyb(struct tty_struct * tty, struct sgttyb __user * sgttyb) | |||
660 | #endif | 656 | #endif |
661 | 657 | ||
662 | #ifdef TIOCGETC | 658 | #ifdef TIOCGETC |
663 | static int get_tchars(struct tty_struct * tty, struct tchars __user * tchars) | 659 | static int get_tchars(struct tty_struct *tty, struct tchars __user *tchars) |
664 | { | 660 | { |
665 | struct tchars tmp; | 661 | struct tchars tmp; |
666 | 662 | ||
@@ -673,7 +669,7 @@ static int get_tchars(struct tty_struct * tty, struct tchars __user * tchars) | |||
673 | return copy_to_user(tchars, &tmp, sizeof(tmp)) ? -EFAULT : 0; | 669 | return copy_to_user(tchars, &tmp, sizeof(tmp)) ? -EFAULT : 0; |
674 | } | 670 | } |
675 | 671 | ||
676 | static int set_tchars(struct tty_struct * tty, struct tchars __user * tchars) | 672 | static int set_tchars(struct tty_struct *tty, struct tchars __user *tchars) |
677 | { | 673 | { |
678 | struct tchars tmp; | 674 | struct tchars tmp; |
679 | 675 | ||
@@ -690,20 +686,22 @@ static int set_tchars(struct tty_struct * tty, struct tchars __user * tchars) | |||
690 | #endif | 686 | #endif |
691 | 687 | ||
692 | #ifdef TIOCGLTC | 688 | #ifdef TIOCGLTC |
693 | static int get_ltchars(struct tty_struct * tty, struct ltchars __user * ltchars) | 689 | static int get_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars) |
694 | { | 690 | { |
695 | struct ltchars tmp; | 691 | struct ltchars tmp; |
696 | 692 | ||
697 | tmp.t_suspc = tty->termios->c_cc[VSUSP]; | 693 | tmp.t_suspc = tty->termios->c_cc[VSUSP]; |
698 | tmp.t_dsuspc = tty->termios->c_cc[VSUSP]; /* what is dsuspc anyway? */ | 694 | /* what is dsuspc anyway? */ |
695 | tmp.t_dsuspc = tty->termios->c_cc[VSUSP]; | ||
699 | tmp.t_rprntc = tty->termios->c_cc[VREPRINT]; | 696 | tmp.t_rprntc = tty->termios->c_cc[VREPRINT]; |
700 | tmp.t_flushc = tty->termios->c_cc[VEOL2]; /* what is flushc anyway? */ | 697 | /* what is flushc anyway? */ |
698 | tmp.t_flushc = tty->termios->c_cc[VEOL2]; | ||
701 | tmp.t_werasc = tty->termios->c_cc[VWERASE]; | 699 | tmp.t_werasc = tty->termios->c_cc[VWERASE]; |
702 | tmp.t_lnextc = tty->termios->c_cc[VLNEXT]; | 700 | tmp.t_lnextc = tty->termios->c_cc[VLNEXT]; |
703 | return copy_to_user(ltchars, &tmp, sizeof(tmp)) ? -EFAULT : 0; | 701 | return copy_to_user(ltchars, &tmp, sizeof(tmp)) ? -EFAULT : 0; |
704 | } | 702 | } |
705 | 703 | ||
706 | static int set_ltchars(struct tty_struct * tty, struct ltchars __user * ltchars) | 704 | static int set_ltchars(struct tty_struct *tty, struct ltchars __user *ltchars) |
707 | { | 705 | { |
708 | struct ltchars tmp; | 706 | struct ltchars tmp; |
709 | 707 | ||
@@ -711,9 +709,11 @@ static int set_ltchars(struct tty_struct * tty, struct ltchars __user * ltchars) | |||
711 | return -EFAULT; | 709 | return -EFAULT; |
712 | 710 | ||
713 | tty->termios->c_cc[VSUSP] = tmp.t_suspc; | 711 | tty->termios->c_cc[VSUSP] = tmp.t_suspc; |
714 | tty->termios->c_cc[VEOL2] = tmp.t_dsuspc; /* what is dsuspc anyway? */ | 712 | /* what is dsuspc anyway? */ |
713 | tty->termios->c_cc[VEOL2] = tmp.t_dsuspc; | ||
715 | tty->termios->c_cc[VREPRINT] = tmp.t_rprntc; | 714 | tty->termios->c_cc[VREPRINT] = tmp.t_rprntc; |
716 | tty->termios->c_cc[VEOL2] = tmp.t_flushc; /* what is flushc anyway? */ | 715 | /* what is flushc anyway? */ |
716 | tty->termios->c_cc[VEOL2] = tmp.t_flushc; | ||
717 | tty->termios->c_cc[VWERASE] = tmp.t_werasc; | 717 | tty->termios->c_cc[VWERASE] = tmp.t_werasc; |
718 | tty->termios->c_cc[VLNEXT] = tmp.t_lnextc; | 718 | tty->termios->c_cc[VLNEXT] = tmp.t_lnextc; |
719 | return 0; | 719 | return 0; |
@@ -761,10 +761,10 @@ static int send_prio_char(struct tty_struct *tty, char ch) | |||
761 | * consistent mode setting. | 761 | * consistent mode setting. |
762 | */ | 762 | */ |
763 | 763 | ||
764 | int tty_mode_ioctl(struct tty_struct * tty, struct file *file, | 764 | int tty_mode_ioctl(struct tty_struct *tty, struct file *file, |
765 | unsigned int cmd, unsigned long arg) | 765 | unsigned int cmd, unsigned long arg) |
766 | { | 766 | { |
767 | struct tty_struct * real_tty; | 767 | struct tty_struct *real_tty; |
768 | void __user *p = (void __user *)arg; | 768 | void __user *p = (void __user *)arg; |
769 | 769 | ||
770 | if (tty->driver->type == TTY_DRIVER_TYPE_PTY && | 770 | if (tty->driver->type == TTY_DRIVER_TYPE_PTY && |
@@ -775,100 +775,100 @@ int tty_mode_ioctl(struct tty_struct * tty, struct file *file, | |||
775 | 775 | ||
776 | switch (cmd) { | 776 | switch (cmd) { |
777 | #ifdef TIOCGETP | 777 | #ifdef TIOCGETP |
778 | case TIOCGETP: | 778 | case TIOCGETP: |
779 | return get_sgttyb(real_tty, (struct sgttyb __user *) arg); | 779 | return get_sgttyb(real_tty, (struct sgttyb __user *) arg); |
780 | case TIOCSETP: | 780 | case TIOCSETP: |
781 | case TIOCSETN: | 781 | case TIOCSETN: |
782 | return set_sgttyb(real_tty, (struct sgttyb __user *) arg); | 782 | return set_sgttyb(real_tty, (struct sgttyb __user *) arg); |
783 | #endif | 783 | #endif |
784 | #ifdef TIOCGETC | 784 | #ifdef TIOCGETC |
785 | case TIOCGETC: | 785 | case TIOCGETC: |
786 | return get_tchars(real_tty, p); | 786 | return get_tchars(real_tty, p); |
787 | case TIOCSETC: | 787 | case TIOCSETC: |
788 | return set_tchars(real_tty, p); | 788 | return set_tchars(real_tty, p); |
789 | #endif | 789 | #endif |
790 | #ifdef TIOCGLTC | 790 | #ifdef TIOCGLTC |
791 | case TIOCGLTC: | 791 | case TIOCGLTC: |
792 | return get_ltchars(real_tty, p); | 792 | return get_ltchars(real_tty, p); |
793 | case TIOCSLTC: | 793 | case TIOCSLTC: |
794 | return set_ltchars(real_tty, p); | 794 | return set_ltchars(real_tty, p); |
795 | #endif | 795 | #endif |
796 | case TCSETSF: | 796 | case TCSETSF: |
797 | return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT | TERMIOS_OLD); | 797 | return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT | TERMIOS_OLD); |
798 | case TCSETSW: | 798 | case TCSETSW: |
799 | return set_termios(real_tty, p, TERMIOS_WAIT | TERMIOS_OLD); | 799 | return set_termios(real_tty, p, TERMIOS_WAIT | TERMIOS_OLD); |
800 | case TCSETS: | 800 | case TCSETS: |
801 | return set_termios(real_tty, p, TERMIOS_OLD); | 801 | return set_termios(real_tty, p, TERMIOS_OLD); |
802 | #ifndef TCGETS2 | 802 | #ifndef TCGETS2 |
803 | case TCGETS: | 803 | case TCGETS: |
804 | if (kernel_termios_to_user_termios((struct termios __user *)arg, real_tty->termios)) | 804 | if (kernel_termios_to_user_termios((struct termios __user *)arg, real_tty->termios)) |
805 | return -EFAULT; | 805 | return -EFAULT; |
806 | return 0; | 806 | return 0; |
807 | #else | 807 | #else |
808 | case TCGETS: | 808 | case TCGETS: |
809 | if (kernel_termios_to_user_termios_1((struct termios __user *)arg, real_tty->termios)) | 809 | if (kernel_termios_to_user_termios_1((struct termios __user *)arg, real_tty->termios)) |
810 | return -EFAULT; | 810 | return -EFAULT; |
811 | return 0; | 811 | return 0; |
812 | case TCGETS2: | 812 | case TCGETS2: |
813 | if (kernel_termios_to_user_termios((struct termios2 __user *)arg, real_tty->termios)) | 813 | if (kernel_termios_to_user_termios((struct termios2 __user *)arg, real_tty->termios)) |
814 | return -EFAULT; | 814 | return -EFAULT; |
815 | return 0; | 815 | return 0; |
816 | case TCSETSF2: | 816 | case TCSETSF2: |
817 | return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT); | 817 | return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT); |
818 | case TCSETSW2: | 818 | case TCSETSW2: |
819 | return set_termios(real_tty, p, TERMIOS_WAIT); | 819 | return set_termios(real_tty, p, TERMIOS_WAIT); |
820 | case TCSETS2: | 820 | case TCSETS2: |
821 | return set_termios(real_tty, p, 0); | 821 | return set_termios(real_tty, p, 0); |
822 | #endif | 822 | #endif |
823 | case TCGETA: | 823 | case TCGETA: |
824 | return get_termio(real_tty, p); | 824 | return get_termio(real_tty, p); |
825 | case TCSETAF: | 825 | case TCSETAF: |
826 | return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT | TERMIOS_TERMIO); | 826 | return set_termios(real_tty, p, TERMIOS_FLUSH | TERMIOS_WAIT | TERMIOS_TERMIO); |
827 | case TCSETAW: | 827 | case TCSETAW: |
828 | return set_termios(real_tty, p, TERMIOS_WAIT | TERMIOS_TERMIO); | 828 | return set_termios(real_tty, p, TERMIOS_WAIT | TERMIOS_TERMIO); |
829 | case TCSETA: | 829 | case TCSETA: |
830 | return set_termios(real_tty, p, TERMIOS_TERMIO); | 830 | return set_termios(real_tty, p, TERMIOS_TERMIO); |
831 | #ifndef TCGETS2 | 831 | #ifndef TCGETS2 |
832 | case TIOCGLCKTRMIOS: | 832 | case TIOCGLCKTRMIOS: |
833 | if (kernel_termios_to_user_termios((struct termios __user *)arg, real_tty->termios_locked)) | 833 | if (kernel_termios_to_user_termios((struct termios __user *)arg, real_tty->termios_locked)) |
834 | return -EFAULT; | 834 | return -EFAULT; |
835 | return 0; | 835 | return 0; |
836 | 836 | case TIOCSLCKTRMIOS: | |
837 | case TIOCSLCKTRMIOS: | 837 | if (!capable(CAP_SYS_ADMIN)) |
838 | if (!capable(CAP_SYS_ADMIN)) | 838 | return -EPERM; |
839 | return -EPERM; | 839 | if (user_termios_to_kernel_termios(real_tty->termios_locked, |
840 | if (user_termios_to_kernel_termios(real_tty->termios_locked, (struct termios __user *) arg)) | 840 | (struct termios __user *) arg)) |
841 | return -EFAULT; | 841 | return -EFAULT; |
842 | return 0; | 842 | return 0; |
843 | #else | 843 | #else |
844 | case TIOCGLCKTRMIOS: | 844 | case TIOCGLCKTRMIOS: |
845 | if (kernel_termios_to_user_termios_1((struct termios __user *)arg, real_tty->termios_locked)) | 845 | if (kernel_termios_to_user_termios_1((struct termios __user *)arg, real_tty->termios_locked)) |
846 | return -EFAULT; | 846 | return -EFAULT; |
847 | return 0; | 847 | return 0; |
848 | 848 | case TIOCSLCKTRMIOS: | |
849 | case TIOCSLCKTRMIOS: | 849 | if (!capable(CAP_SYS_ADMIN)) |
850 | if (!capable(CAP_SYS_ADMIN)) | 850 | return -EPERM; |
851 | return -EPERM; | 851 | if (user_termios_to_kernel_termios_1(real_tty->termios_locked, |
852 | if (user_termios_to_kernel_termios_1(real_tty->termios_locked, (struct termios __user *) arg)) | 852 | (struct termios __user *) arg)) |
853 | return -EFAULT; | 853 | return -EFAULT; |
854 | return 0; | 854 | return 0; |
855 | #endif | 855 | #endif |
856 | case TIOCGSOFTCAR: | 856 | case TIOCGSOFTCAR: |
857 | return put_user(C_CLOCAL(tty) ? 1 : 0, (int __user *)arg); | 857 | return put_user(C_CLOCAL(tty) ? 1 : 0, |
858 | case TIOCSSOFTCAR: | 858 | (int __user *)arg); |
859 | if (get_user(arg, (unsigned int __user *) arg)) | 859 | case TIOCSSOFTCAR: |
860 | return -EFAULT; | 860 | if (get_user(arg, (unsigned int __user *) arg)) |
861 | mutex_lock(&tty->termios_mutex); | 861 | return -EFAULT; |
862 | tty->termios->c_cflag = | 862 | mutex_lock(&tty->termios_mutex); |
863 | ((tty->termios->c_cflag & ~CLOCAL) | | 863 | tty->termios->c_cflag = |
864 | (arg ? CLOCAL : 0)); | 864 | ((tty->termios->c_cflag & ~CLOCAL) | |
865 | mutex_unlock(&tty->termios_mutex); | 865 | (arg ? CLOCAL : 0)); |
866 | return 0; | 866 | mutex_unlock(&tty->termios_mutex); |
867 | default: | 867 | return 0; |
868 | return -ENOIOCTLCMD; | 868 | default: |
869 | return -ENOIOCTLCMD; | ||
869 | } | 870 | } |
870 | } | 871 | } |
871 | |||
872 | EXPORT_SYMBOL_GPL(tty_mode_ioctl); | 872 | EXPORT_SYMBOL_GPL(tty_mode_ioctl); |
873 | 873 | ||
874 | int tty_perform_flush(struct tty_struct *tty, unsigned long arg) | 874 | int tty_perform_flush(struct tty_struct *tty, unsigned long arg) |
@@ -899,13 +899,12 @@ int tty_perform_flush(struct tty_struct *tty, unsigned long arg) | |||
899 | tty_ldisc_deref(ld); | 899 | tty_ldisc_deref(ld); |
900 | return 0; | 900 | return 0; |
901 | } | 901 | } |
902 | |||
903 | EXPORT_SYMBOL_GPL(tty_perform_flush); | 902 | EXPORT_SYMBOL_GPL(tty_perform_flush); |
904 | 903 | ||
905 | int n_tty_ioctl(struct tty_struct * tty, struct file * file, | 904 | int n_tty_ioctl(struct tty_struct *tty, struct file *file, |
906 | unsigned int cmd, unsigned long arg) | 905 | unsigned int cmd, unsigned long arg) |
907 | { | 906 | { |
908 | struct tty_struct * real_tty; | 907 | struct tty_struct *real_tty; |
909 | int retval; | 908 | int retval; |
910 | 909 | ||
911 | if (tty->driver->type == TTY_DRIVER_TYPE_PTY && | 910 | if (tty->driver->type == TTY_DRIVER_TYPE_PTY && |
@@ -915,68 +914,67 @@ int n_tty_ioctl(struct tty_struct * tty, struct file * file, | |||
915 | real_tty = tty; | 914 | real_tty = tty; |
916 | 915 | ||
917 | switch (cmd) { | 916 | switch (cmd) { |
918 | case TCXONC: | 917 | case TCXONC: |
919 | retval = tty_check_change(tty); | 918 | retval = tty_check_change(tty); |
920 | if (retval) | 919 | if (retval) |
921 | return retval; | 920 | return retval; |
922 | switch (arg) { | 921 | switch (arg) { |
923 | case TCOOFF: | 922 | case TCOOFF: |
924 | if (!tty->flow_stopped) { | 923 | if (!tty->flow_stopped) { |
925 | tty->flow_stopped = 1; | 924 | tty->flow_stopped = 1; |
926 | stop_tty(tty); | 925 | stop_tty(tty); |
927 | } | ||
928 | break; | ||
929 | case TCOON: | ||
930 | if (tty->flow_stopped) { | ||
931 | tty->flow_stopped = 0; | ||
932 | start_tty(tty); | ||
933 | } | ||
934 | break; | ||
935 | case TCIOFF: | ||
936 | if (STOP_CHAR(tty) != __DISABLED_CHAR) | ||
937 | return send_prio_char(tty, STOP_CHAR(tty)); | ||
938 | break; | ||
939 | case TCION: | ||
940 | if (START_CHAR(tty) != __DISABLED_CHAR) | ||
941 | return send_prio_char(tty, START_CHAR(tty)); | ||
942 | break; | ||
943 | default: | ||
944 | return -EINVAL; | ||
945 | } | 926 | } |
946 | return 0; | 927 | break; |
947 | case TCFLSH: | 928 | case TCOON: |
948 | return tty_perform_flush(tty, arg); | 929 | if (tty->flow_stopped) { |
949 | case TIOCOUTQ: | 930 | tty->flow_stopped = 0; |
950 | return put_user(tty->driver->chars_in_buffer ? | 931 | start_tty(tty); |
951 | tty->driver->chars_in_buffer(tty) : 0, | 932 | } |
952 | (int __user *) arg); | 933 | break; |
953 | case TIOCINQ: | 934 | case TCIOFF: |
954 | retval = tty->read_cnt; | 935 | if (STOP_CHAR(tty) != __DISABLED_CHAR) |
955 | if (L_ICANON(tty)) | 936 | return send_prio_char(tty, STOP_CHAR(tty)); |
956 | retval = inq_canon(tty); | 937 | break; |
957 | return put_user(retval, (unsigned int __user *) arg); | 938 | case TCION: |
958 | case TIOCPKT: | 939 | if (START_CHAR(tty) != __DISABLED_CHAR) |
959 | { | 940 | return send_prio_char(tty, START_CHAR(tty)); |
960 | int pktmode; | 941 | break; |
961 | |||
962 | if (tty->driver->type != TTY_DRIVER_TYPE_PTY || | ||
963 | tty->driver->subtype != PTY_TYPE_MASTER) | ||
964 | return -ENOTTY; | ||
965 | if (get_user(pktmode, (int __user *) arg)) | ||
966 | return -EFAULT; | ||
967 | if (pktmode) { | ||
968 | if (!tty->packet) { | ||
969 | tty->packet = 1; | ||
970 | tty->link->ctrl_status = 0; | ||
971 | } | ||
972 | } else | ||
973 | tty->packet = 0; | ||
974 | return 0; | ||
975 | } | ||
976 | default: | 942 | default: |
977 | /* Try the mode commands */ | 943 | return -EINVAL; |
978 | return tty_mode_ioctl(tty, file, cmd, arg); | ||
979 | } | 944 | } |
945 | return 0; | ||
946 | case TCFLSH: | ||
947 | return tty_perform_flush(tty, arg); | ||
948 | case TIOCOUTQ: | ||
949 | return put_user(tty->driver->chars_in_buffer ? | ||
950 | tty->driver->chars_in_buffer(tty) : 0, | ||
951 | (int __user *) arg); | ||
952 | case TIOCINQ: | ||
953 | retval = tty->read_cnt; | ||
954 | if (L_ICANON(tty)) | ||
955 | retval = inq_canon(tty); | ||
956 | return put_user(retval, (unsigned int __user *) arg); | ||
957 | case TIOCPKT: | ||
958 | { | ||
959 | int pktmode; | ||
960 | |||
961 | if (tty->driver->type != TTY_DRIVER_TYPE_PTY || | ||
962 | tty->driver->subtype != PTY_TYPE_MASTER) | ||
963 | return -ENOTTY; | ||
964 | if (get_user(pktmode, (int __user *) arg)) | ||
965 | return -EFAULT; | ||
966 | if (pktmode) { | ||
967 | if (!tty->packet) { | ||
968 | tty->packet = 1; | ||
969 | tty->link->ctrl_status = 0; | ||
970 | } | ||
971 | } else | ||
972 | tty->packet = 0; | ||
973 | return 0; | ||
974 | } | ||
975 | default: | ||
976 | /* Try the mode commands */ | ||
977 | return tty_mode_ioctl(tty, file, cmd, arg); | ||
978 | } | ||
980 | } | 979 | } |
981 | |||
982 | EXPORT_SYMBOL(n_tty_ioctl); | 980 | EXPORT_SYMBOL(n_tty_ioctl); |
diff --git a/drivers/dca/dca-sysfs.c b/drivers/dca/dca-sysfs.c index 24a263b6844c..011328faa5f2 100644 --- a/drivers/dca/dca-sysfs.c +++ b/drivers/dca/dca-sysfs.c | |||
@@ -12,10 +12,10 @@ static spinlock_t dca_idr_lock; | |||
12 | 12 | ||
13 | int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot) | 13 | int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot) |
14 | { | 14 | { |
15 | struct class_device *cd; | 15 | struct device *cd; |
16 | 16 | ||
17 | cd = class_device_create(dca_class, dca->cd, MKDEV(0, slot + 1), | 17 | cd = device_create(dca_class, dca->cd, MKDEV(0, slot + 1), |
18 | dev, "requester%d", slot); | 18 | "requester%d", slot); |
19 | if (IS_ERR(cd)) | 19 | if (IS_ERR(cd)) |
20 | return PTR_ERR(cd); | 20 | return PTR_ERR(cd); |
21 | return 0; | 21 | return 0; |
@@ -23,12 +23,12 @@ int dca_sysfs_add_req(struct dca_provider *dca, struct device *dev, int slot) | |||
23 | 23 | ||
24 | void dca_sysfs_remove_req(struct dca_provider *dca, int slot) | 24 | void dca_sysfs_remove_req(struct dca_provider *dca, int slot) |
25 | { | 25 | { |
26 | class_device_destroy(dca_class, MKDEV(0, slot + 1)); | 26 | device_destroy(dca_class, MKDEV(0, slot + 1)); |
27 | } | 27 | } |
28 | 28 | ||
29 | int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev) | 29 | int dca_sysfs_add_provider(struct dca_provider *dca, struct device *dev) |
30 | { | 30 | { |
31 | struct class_device *cd; | 31 | struct device *cd; |
32 | int err = 0; | 32 | int err = 0; |
33 | 33 | ||
34 | idr_try_again: | 34 | idr_try_again: |
@@ -46,8 +46,7 @@ idr_try_again: | |||
46 | return err; | 46 | return err; |
47 | } | 47 | } |
48 | 48 | ||
49 | cd = class_device_create(dca_class, NULL, MKDEV(0, 0), | 49 | cd = device_create(dca_class, dev, MKDEV(0, 0), "dca%d", dca->id); |
50 | dev, "dca%d", dca->id); | ||
51 | if (IS_ERR(cd)) { | 50 | if (IS_ERR(cd)) { |
52 | spin_lock(&dca_idr_lock); | 51 | spin_lock(&dca_idr_lock); |
53 | idr_remove(&dca_idr, dca->id); | 52 | idr_remove(&dca_idr, dca->id); |
@@ -60,7 +59,7 @@ idr_try_again: | |||
60 | 59 | ||
61 | void dca_sysfs_remove_provider(struct dca_provider *dca) | 60 | void dca_sysfs_remove_provider(struct dca_provider *dca) |
62 | { | 61 | { |
63 | class_device_unregister(dca->cd); | 62 | device_unregister(dca->cd); |
64 | dca->cd = NULL; | 63 | dca->cd = NULL; |
65 | spin_lock(&dca_idr_lock); | 64 | spin_lock(&dca_idr_lock); |
66 | idr_remove(&dca_idr, dca->id); | 65 | idr_remove(&dca_idr, dca->id); |
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c index 1412d7bcdbd1..653265a40b7f 100644 --- a/drivers/firmware/dmi_scan.c +++ b/drivers/firmware/dmi_scan.c | |||
@@ -250,6 +250,28 @@ static void __init dmi_save_ipmi_device(const struct dmi_header *dm) | |||
250 | list_add(&dev->list, &dmi_devices); | 250 | list_add(&dev->list, &dmi_devices); |
251 | } | 251 | } |
252 | 252 | ||
253 | static void __init dmi_save_extended_devices(const struct dmi_header *dm) | ||
254 | { | ||
255 | const u8 *d = (u8*) dm + 5; | ||
256 | struct dmi_device *dev; | ||
257 | |||
258 | /* Skip disabled device */ | ||
259 | if ((*d & 0x80) == 0) | ||
260 | return; | ||
261 | |||
262 | dev = dmi_alloc(sizeof(*dev)); | ||
263 | if (!dev) { | ||
264 | printk(KERN_ERR "dmi_save_extended_devices: out of memory.\n"); | ||
265 | return; | ||
266 | } | ||
267 | |||
268 | dev->type = *d-- & 0x7f; | ||
269 | dev->name = dmi_string(dm, *d); | ||
270 | dev->device_data = NULL; | ||
271 | |||
272 | list_add(&dev->list, &dmi_devices); | ||
273 | } | ||
274 | |||
253 | /* | 275 | /* |
254 | * Process a DMI table entry. Right now all we care about are the BIOS | 276 | * Process a DMI table entry. Right now all we care about are the BIOS |
255 | * and machine entries. For 2.5 we should pull the smbus controller info | 277 | * and machine entries. For 2.5 we should pull the smbus controller info |
@@ -292,6 +314,9 @@ static void __init dmi_decode(const struct dmi_header *dm) | |||
292 | break; | 314 | break; |
293 | case 38: /* IPMI Device Information */ | 315 | case 38: /* IPMI Device Information */ |
294 | dmi_save_ipmi_device(dm); | 316 | dmi_save_ipmi_device(dm); |
317 | break; | ||
318 | case 41: /* Onboard Devices Extended Information */ | ||
319 | dmi_save_extended_devices(dm); | ||
295 | } | 320 | } |
296 | } | 321 | } |
297 | 322 | ||
diff --git a/drivers/ide/ppc/mpc8xx.c b/drivers/ide/ppc/mpc8xx.c index 06190b1c4ec5..38fbfb8d5445 100644 --- a/drivers/ide/ppc/mpc8xx.c +++ b/drivers/ide/ppc/mpc8xx.c | |||
@@ -17,7 +17,6 @@ | |||
17 | #include <linux/ptrace.h> | 17 | #include <linux/ptrace.h> |
18 | #include <linux/slab.h> | 18 | #include <linux/slab.h> |
19 | #include <linux/user.h> | 19 | #include <linux/user.h> |
20 | #include <linux/a.out.h> | ||
21 | #include <linux/tty.h> | 20 | #include <linux/tty.h> |
22 | #include <linux/major.h> | 21 | #include <linux/major.h> |
23 | #include <linux/interrupt.h> | 22 | #include <linux/interrupt.h> |
diff --git a/drivers/infiniband/hw/mlx4/cq.c b/drivers/infiniband/hw/mlx4/cq.c index 7950aa6e8184..7360bbafbe84 100644 --- a/drivers/infiniband/hw/mlx4/cq.c +++ b/drivers/infiniband/hw/mlx4/cq.c | |||
@@ -64,13 +64,7 @@ static void mlx4_ib_cq_event(struct mlx4_cq *cq, enum mlx4_event type) | |||
64 | 64 | ||
65 | static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n) | 65 | static void *get_cqe_from_buf(struct mlx4_ib_cq_buf *buf, int n) |
66 | { | 66 | { |
67 | int offset = n * sizeof (struct mlx4_cqe); | 67 | return mlx4_buf_offset(&buf->buf, n * sizeof (struct mlx4_cqe)); |
68 | |||
69 | if (buf->buf.nbufs == 1) | ||
70 | return buf->buf.u.direct.buf + offset; | ||
71 | else | ||
72 | return buf->buf.u.page_list[offset >> PAGE_SHIFT].buf + | ||
73 | (offset & (PAGE_SIZE - 1)); | ||
74 | } | 68 | } |
75 | 69 | ||
76 | static void *get_cqe(struct mlx4_ib_cq *cq, int n) | 70 | static void *get_cqe(struct mlx4_ib_cq *cq, int n) |
@@ -332,6 +326,12 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, | |||
332 | is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == | 326 | is_error = (cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == |
333 | MLX4_CQE_OPCODE_ERROR; | 327 | MLX4_CQE_OPCODE_ERROR; |
334 | 328 | ||
329 | if (unlikely((cqe->owner_sr_opcode & MLX4_CQE_OPCODE_MASK) == MLX4_OPCODE_NOP && | ||
330 | is_send)) { | ||
331 | printk(KERN_WARNING "Completion for NOP opcode detected!\n"); | ||
332 | return -EINVAL; | ||
333 | } | ||
334 | |||
335 | if (!*cur_qp || | 335 | if (!*cur_qp || |
336 | (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { | 336 | (be32_to_cpu(cqe->my_qpn) & 0xffffff) != (*cur_qp)->mqp.qpn) { |
337 | /* | 337 | /* |
@@ -354,8 +354,10 @@ static int mlx4_ib_poll_one(struct mlx4_ib_cq *cq, | |||
354 | 354 | ||
355 | if (is_send) { | 355 | if (is_send) { |
356 | wq = &(*cur_qp)->sq; | 356 | wq = &(*cur_qp)->sq; |
357 | wqe_ctr = be16_to_cpu(cqe->wqe_index); | 357 | if (!(*cur_qp)->sq_signal_bits) { |
358 | wq->tail += (u16) (wqe_ctr - (u16) wq->tail); | 358 | wqe_ctr = be16_to_cpu(cqe->wqe_index); |
359 | wq->tail += (u16) (wqe_ctr - (u16) wq->tail); | ||
360 | } | ||
359 | wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; | 361 | wc->wr_id = wq->wrid[wq->tail & (wq->wqe_cnt - 1)]; |
360 | ++wq->tail; | 362 | ++wq->tail; |
361 | } else if ((*cur_qp)->ibqp.srq) { | 363 | } else if ((*cur_qp)->ibqp.srq) { |
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h index 28697653a370..3726e451a327 100644 --- a/drivers/infiniband/hw/mlx4/mlx4_ib.h +++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h | |||
@@ -120,6 +120,8 @@ struct mlx4_ib_qp { | |||
120 | 120 | ||
121 | u32 doorbell_qpn; | 121 | u32 doorbell_qpn; |
122 | __be32 sq_signal_bits; | 122 | __be32 sq_signal_bits; |
123 | unsigned sq_next_wqe; | ||
124 | int sq_max_wqes_per_wr; | ||
123 | int sq_spare_wqes; | 125 | int sq_spare_wqes; |
124 | struct mlx4_ib_wq sq; | 126 | struct mlx4_ib_wq sq; |
125 | 127 | ||
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c index 8cba9c532e64..958e205b6d7c 100644 --- a/drivers/infiniband/hw/mlx4/qp.c +++ b/drivers/infiniband/hw/mlx4/qp.c | |||
@@ -30,6 +30,8 @@ | |||
30 | * SOFTWARE. | 30 | * SOFTWARE. |
31 | */ | 31 | */ |
32 | 32 | ||
33 | #include <linux/log2.h> | ||
34 | |||
33 | #include <rdma/ib_cache.h> | 35 | #include <rdma/ib_cache.h> |
34 | #include <rdma/ib_pack.h> | 36 | #include <rdma/ib_pack.h> |
35 | 37 | ||
@@ -96,11 +98,7 @@ static int is_qp0(struct mlx4_ib_dev *dev, struct mlx4_ib_qp *qp) | |||
96 | 98 | ||
97 | static void *get_wqe(struct mlx4_ib_qp *qp, int offset) | 99 | static void *get_wqe(struct mlx4_ib_qp *qp, int offset) |
98 | { | 100 | { |
99 | if (qp->buf.nbufs == 1) | 101 | return mlx4_buf_offset(&qp->buf, offset); |
100 | return qp->buf.u.direct.buf + offset; | ||
101 | else | ||
102 | return qp->buf.u.page_list[offset >> PAGE_SHIFT].buf + | ||
103 | (offset & (PAGE_SIZE - 1)); | ||
104 | } | 102 | } |
105 | 103 | ||
106 | static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) | 104 | static void *get_recv_wqe(struct mlx4_ib_qp *qp, int n) |
@@ -115,16 +113,87 @@ static void *get_send_wqe(struct mlx4_ib_qp *qp, int n) | |||
115 | 113 | ||
116 | /* | 114 | /* |
117 | * Stamp a SQ WQE so that it is invalid if prefetched by marking the | 115 | * Stamp a SQ WQE so that it is invalid if prefetched by marking the |
118 | * first four bytes of every 64 byte chunk with 0xffffffff, except for | 116 | * first four bytes of every 64 byte chunk with |
119 | * the very first chunk of the WQE. | 117 | * 0x7FFFFFF | (invalid_ownership_value << 31). |
118 | * | ||
119 | * When the max work request size is less than or equal to the WQE | ||
120 | * basic block size, as an optimization, we can stamp all WQEs with | ||
121 | * 0xffffffff, and skip the very first chunk of each WQE. | ||
120 | */ | 122 | */ |
121 | static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n) | 123 | static void stamp_send_wqe(struct mlx4_ib_qp *qp, int n, int size) |
122 | { | 124 | { |
123 | u32 *wqe = get_send_wqe(qp, n); | 125 | u32 *wqe; |
124 | int i; | 126 | int i; |
127 | int s; | ||
128 | int ind; | ||
129 | void *buf; | ||
130 | __be32 stamp; | ||
131 | |||
132 | s = roundup(size, 1U << qp->sq.wqe_shift); | ||
133 | if (qp->sq_max_wqes_per_wr > 1) { | ||
134 | for (i = 0; i < s; i += 64) { | ||
135 | ind = (i >> qp->sq.wqe_shift) + n; | ||
136 | stamp = ind & qp->sq.wqe_cnt ? cpu_to_be32(0x7fffffff) : | ||
137 | cpu_to_be32(0xffffffff); | ||
138 | buf = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); | ||
139 | wqe = buf + (i & ((1 << qp->sq.wqe_shift) - 1)); | ||
140 | *wqe = stamp; | ||
141 | } | ||
142 | } else { | ||
143 | buf = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); | ||
144 | for (i = 64; i < s; i += 64) { | ||
145 | wqe = buf + i; | ||
146 | *wqe = 0xffffffff; | ||
147 | } | ||
148 | } | ||
149 | } | ||
150 | |||
151 | static void post_nop_wqe(struct mlx4_ib_qp *qp, int n, int size) | ||
152 | { | ||
153 | struct mlx4_wqe_ctrl_seg *ctrl; | ||
154 | struct mlx4_wqe_inline_seg *inl; | ||
155 | void *wqe; | ||
156 | int s; | ||
157 | |||
158 | ctrl = wqe = get_send_wqe(qp, n & (qp->sq.wqe_cnt - 1)); | ||
159 | s = sizeof(struct mlx4_wqe_ctrl_seg); | ||
125 | 160 | ||
126 | for (i = 16; i < 1 << (qp->sq.wqe_shift - 2); i += 16) | 161 | if (qp->ibqp.qp_type == IB_QPT_UD) { |
127 | wqe[i] = 0xffffffff; | 162 | struct mlx4_wqe_datagram_seg *dgram = wqe + sizeof *ctrl; |
163 | struct mlx4_av *av = (struct mlx4_av *)dgram->av; | ||
164 | memset(dgram, 0, sizeof *dgram); | ||
165 | av->port_pd = cpu_to_be32((qp->port << 24) | to_mpd(qp->ibqp.pd)->pdn); | ||
166 | s += sizeof(struct mlx4_wqe_datagram_seg); | ||
167 | } | ||
168 | |||
169 | /* Pad the remainder of the WQE with an inline data segment. */ | ||
170 | if (size > s) { | ||
171 | inl = wqe + s; | ||
172 | inl->byte_count = cpu_to_be32(1 << 31 | (size - s - sizeof *inl)); | ||
173 | } | ||
174 | ctrl->srcrb_flags = 0; | ||
175 | ctrl->fence_size = size / 16; | ||
176 | /* | ||
177 | * Make sure descriptor is fully written before setting ownership bit | ||
178 | * (because HW can start executing as soon as we do). | ||
179 | */ | ||
180 | wmb(); | ||
181 | |||
182 | ctrl->owner_opcode = cpu_to_be32(MLX4_OPCODE_NOP | MLX4_WQE_CTRL_NEC) | | ||
183 | (n & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); | ||
184 | |||
185 | stamp_send_wqe(qp, n + qp->sq_spare_wqes, size); | ||
186 | } | ||
187 | |||
188 | /* Post NOP WQE to prevent wrap-around in the middle of WR */ | ||
189 | static inline unsigned pad_wraparound(struct mlx4_ib_qp *qp, int ind) | ||
190 | { | ||
191 | unsigned s = qp->sq.wqe_cnt - (ind & (qp->sq.wqe_cnt - 1)); | ||
192 | if (unlikely(s < qp->sq_max_wqes_per_wr)) { | ||
193 | post_nop_wqe(qp, ind, s << qp->sq.wqe_shift); | ||
194 | ind += s; | ||
195 | } | ||
196 | return ind; | ||
128 | } | 197 | } |
129 | 198 | ||
130 | static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) | 199 | static void mlx4_ib_qp_event(struct mlx4_qp *qp, enum mlx4_event type) |
@@ -241,6 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
241 | static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | 310 | static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, |
242 | enum ib_qp_type type, struct mlx4_ib_qp *qp) | 311 | enum ib_qp_type type, struct mlx4_ib_qp *qp) |
243 | { | 312 | { |
313 | int s; | ||
314 | |||
244 | /* Sanity check SQ size before proceeding */ | 315 | /* Sanity check SQ size before proceeding */ |
245 | if (cap->max_send_wr > dev->dev->caps.max_wqes || | 316 | if (cap->max_send_wr > dev->dev->caps.max_wqes || |
246 | cap->max_send_sge > dev->dev->caps.max_sq_sg || | 317 | cap->max_send_sge > dev->dev->caps.max_sq_sg || |
@@ -256,20 +327,74 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
256 | cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) | 327 | cap->max_send_sge + 2 > dev->dev->caps.max_sq_sg) |
257 | return -EINVAL; | 328 | return -EINVAL; |
258 | 329 | ||
259 | qp->sq.wqe_shift = ilog2(roundup_pow_of_two(max(cap->max_send_sge * | 330 | s = max(cap->max_send_sge * sizeof (struct mlx4_wqe_data_seg), |
260 | sizeof (struct mlx4_wqe_data_seg), | 331 | cap->max_inline_data + sizeof (struct mlx4_wqe_inline_seg)) + |
261 | cap->max_inline_data + | 332 | send_wqe_overhead(type); |
262 | sizeof (struct mlx4_wqe_inline_seg)) + | ||
263 | send_wqe_overhead(type))); | ||
264 | qp->sq.max_gs = ((1 << qp->sq.wqe_shift) - send_wqe_overhead(type)) / | ||
265 | sizeof (struct mlx4_wqe_data_seg); | ||
266 | 333 | ||
267 | /* | 334 | /* |
268 | * We need to leave 2 KB + 1 WQE of headroom in the SQ to | 335 | * Hermon supports shrinking WQEs, such that a single work |
269 | * allow HW to prefetch. | 336 | * request can include multiple units of 1 << wqe_shift. This |
337 | * way, work requests can differ in size, and do not have to | ||
338 | * be a power of 2 in size, saving memory and speeding up send | ||
339 | * WR posting. Unfortunately, if we do this then the | ||
340 | * wqe_index field in CQEs can't be used to look up the WR ID | ||
341 | * anymore, so we do this only if selective signaling is off. | ||
342 | * | ||
343 | * Further, on 32-bit platforms, we can't use vmap() to make | ||
344 | * the QP buffer virtually contigious. Thus we have to use | ||
345 | * constant-sized WRs to make sure a WR is always fully within | ||
346 | * a single page-sized chunk. | ||
347 | * | ||
348 | * Finally, we use NOP work requests to pad the end of the | ||
349 | * work queue, to avoid wrap-around in the middle of WR. We | ||
350 | * set NEC bit to avoid getting completions with error for | ||
351 | * these NOP WRs, but since NEC is only supported starting | ||
352 | * with firmware 2.2.232, we use constant-sized WRs for older | ||
353 | * firmware. | ||
354 | * | ||
355 | * And, since MLX QPs only support SEND, we use constant-sized | ||
356 | * WRs in this case. | ||
357 | * | ||
358 | * We look for the smallest value of wqe_shift such that the | ||
359 | * resulting number of wqes does not exceed device | ||
360 | * capabilities. | ||
361 | * | ||
362 | * We set WQE size to at least 64 bytes, this way stamping | ||
363 | * invalidates each WQE. | ||
270 | */ | 364 | */ |
271 | qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + 1; | 365 | if (dev->dev->caps.fw_ver >= MLX4_FW_VER_WQE_CTRL_NEC && |
272 | qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr + qp->sq_spare_wqes); | 366 | qp->sq_signal_bits && BITS_PER_LONG == 64 && |
367 | type != IB_QPT_SMI && type != IB_QPT_GSI) | ||
368 | qp->sq.wqe_shift = ilog2(64); | ||
369 | else | ||
370 | qp->sq.wqe_shift = ilog2(roundup_pow_of_two(s)); | ||
371 | |||
372 | for (;;) { | ||
373 | if (1 << qp->sq.wqe_shift > dev->dev->caps.max_sq_desc_sz) | ||
374 | return -EINVAL; | ||
375 | |||
376 | qp->sq_max_wqes_per_wr = DIV_ROUND_UP(s, 1U << qp->sq.wqe_shift); | ||
377 | |||
378 | /* | ||
379 | * We need to leave 2 KB + 1 WR of headroom in the SQ to | ||
380 | * allow HW to prefetch. | ||
381 | */ | ||
382 | qp->sq_spare_wqes = (2048 >> qp->sq.wqe_shift) + qp->sq_max_wqes_per_wr; | ||
383 | qp->sq.wqe_cnt = roundup_pow_of_two(cap->max_send_wr * | ||
384 | qp->sq_max_wqes_per_wr + | ||
385 | qp->sq_spare_wqes); | ||
386 | |||
387 | if (qp->sq.wqe_cnt <= dev->dev->caps.max_wqes) | ||
388 | break; | ||
389 | |||
390 | if (qp->sq_max_wqes_per_wr <= 1) | ||
391 | return -EINVAL; | ||
392 | |||
393 | ++qp->sq.wqe_shift; | ||
394 | } | ||
395 | |||
396 | qp->sq.max_gs = ((qp->sq_max_wqes_per_wr << qp->sq.wqe_shift) - | ||
397 | send_wqe_overhead(type)) / sizeof (struct mlx4_wqe_data_seg); | ||
273 | 398 | ||
274 | qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + | 399 | qp->buf_size = (qp->rq.wqe_cnt << qp->rq.wqe_shift) + |
275 | (qp->sq.wqe_cnt << qp->sq.wqe_shift); | 400 | (qp->sq.wqe_cnt << qp->sq.wqe_shift); |
@@ -281,7 +406,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap, | |||
281 | qp->sq.offset = 0; | 406 | qp->sq.offset = 0; |
282 | } | 407 | } |
283 | 408 | ||
284 | cap->max_send_wr = qp->sq.max_post = qp->sq.wqe_cnt - qp->sq_spare_wqes; | 409 | cap->max_send_wr = qp->sq.max_post = |
410 | (qp->sq.wqe_cnt - qp->sq_spare_wqes) / qp->sq_max_wqes_per_wr; | ||
285 | cap->max_send_sge = qp->sq.max_gs; | 411 | cap->max_send_sge = qp->sq.max_gs; |
286 | /* We don't support inline sends for kernel QPs (yet) */ | 412 | /* We don't support inline sends for kernel QPs (yet) */ |
287 | cap->max_inline_data = 0; | 413 | cap->max_inline_data = 0; |
@@ -327,6 +453,12 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
327 | qp->rq.tail = 0; | 453 | qp->rq.tail = 0; |
328 | qp->sq.head = 0; | 454 | qp->sq.head = 0; |
329 | qp->sq.tail = 0; | 455 | qp->sq.tail = 0; |
456 | qp->sq_next_wqe = 0; | ||
457 | |||
458 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) | ||
459 | qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); | ||
460 | else | ||
461 | qp->sq_signal_bits = 0; | ||
330 | 462 | ||
331 | err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp); | 463 | err = set_rq_size(dev, &init_attr->cap, !!pd->uobject, !!init_attr->srq, qp); |
332 | if (err) | 464 | if (err) |
@@ -417,11 +549,6 @@ static int create_qp_common(struct mlx4_ib_dev *dev, struct ib_pd *pd, | |||
417 | */ | 549 | */ |
418 | qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); | 550 | qp->doorbell_qpn = swab32(qp->mqp.qpn << 8); |
419 | 551 | ||
420 | if (init_attr->sq_sig_type == IB_SIGNAL_ALL_WR) | ||
421 | qp->sq_signal_bits = cpu_to_be32(MLX4_WQE_CTRL_CQ_UPDATE); | ||
422 | else | ||
423 | qp->sq_signal_bits = 0; | ||
424 | |||
425 | qp->mqp.event = mlx4_ib_qp_event; | 552 | qp->mqp.event = mlx4_ib_qp_event; |
426 | 553 | ||
427 | return 0; | 554 | return 0; |
@@ -916,7 +1043,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
916 | ctrl = get_send_wqe(qp, i); | 1043 | ctrl = get_send_wqe(qp, i); |
917 | ctrl->owner_opcode = cpu_to_be32(1 << 31); | 1044 | ctrl->owner_opcode = cpu_to_be32(1 << 31); |
918 | 1045 | ||
919 | stamp_send_wqe(qp, i); | 1046 | stamp_send_wqe(qp, i, 1 << qp->sq.wqe_shift); |
920 | } | 1047 | } |
921 | } | 1048 | } |
922 | 1049 | ||
@@ -969,6 +1096,7 @@ static int __mlx4_ib_modify_qp(struct ib_qp *ibqp, | |||
969 | qp->rq.tail = 0; | 1096 | qp->rq.tail = 0; |
970 | qp->sq.head = 0; | 1097 | qp->sq.head = 0; |
971 | qp->sq.tail = 0; | 1098 | qp->sq.tail = 0; |
1099 | qp->sq_next_wqe = 0; | ||
972 | if (!ibqp->srq) | 1100 | if (!ibqp->srq) |
973 | *qp->db.db = 0; | 1101 | *qp->db.db = 0; |
974 | } | 1102 | } |
@@ -1278,13 +1406,14 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1278 | unsigned long flags; | 1406 | unsigned long flags; |
1279 | int nreq; | 1407 | int nreq; |
1280 | int err = 0; | 1408 | int err = 0; |
1281 | int ind; | 1409 | unsigned ind; |
1282 | int size; | 1410 | int uninitialized_var(stamp); |
1411 | int uninitialized_var(size); | ||
1283 | int i; | 1412 | int i; |
1284 | 1413 | ||
1285 | spin_lock_irqsave(&qp->sq.lock, flags); | 1414 | spin_lock_irqsave(&qp->sq.lock, flags); |
1286 | 1415 | ||
1287 | ind = qp->sq.head; | 1416 | ind = qp->sq_next_wqe; |
1288 | 1417 | ||
1289 | for (nreq = 0; wr; ++nreq, wr = wr->next) { | 1418 | for (nreq = 0; wr; ++nreq, wr = wr->next) { |
1290 | if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { | 1419 | if (mlx4_wq_overflow(&qp->sq, nreq, qp->ibqp.send_cq)) { |
@@ -1300,7 +1429,7 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1300 | } | 1429 | } |
1301 | 1430 | ||
1302 | ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); | 1431 | ctrl = wqe = get_send_wqe(qp, ind & (qp->sq.wqe_cnt - 1)); |
1303 | qp->sq.wrid[ind & (qp->sq.wqe_cnt - 1)] = wr->wr_id; | 1432 | qp->sq.wrid[(qp->sq.head + nreq) & (qp->sq.wqe_cnt - 1)] = wr->wr_id; |
1304 | 1433 | ||
1305 | ctrl->srcrb_flags = | 1434 | ctrl->srcrb_flags = |
1306 | (wr->send_flags & IB_SEND_SIGNALED ? | 1435 | (wr->send_flags & IB_SEND_SIGNALED ? |
@@ -1413,16 +1542,23 @@ int mlx4_ib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, | |||
1413 | ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | | 1542 | ctrl->owner_opcode = mlx4_ib_opcode[wr->opcode] | |
1414 | (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); | 1543 | (ind & qp->sq.wqe_cnt ? cpu_to_be32(1 << 31) : 0); |
1415 | 1544 | ||
1545 | stamp = ind + qp->sq_spare_wqes; | ||
1546 | ind += DIV_ROUND_UP(size * 16, 1U << qp->sq.wqe_shift); | ||
1547 | |||
1416 | /* | 1548 | /* |
1417 | * We can improve latency by not stamping the last | 1549 | * We can improve latency by not stamping the last |
1418 | * send queue WQE until after ringing the doorbell, so | 1550 | * send queue WQE until after ringing the doorbell, so |
1419 | * only stamp here if there are still more WQEs to post. | 1551 | * only stamp here if there are still more WQEs to post. |
1552 | * | ||
1553 | * Same optimization applies to padding with NOP wqe | ||
1554 | * in case of WQE shrinking (used to prevent wrap-around | ||
1555 | * in the middle of WR). | ||
1420 | */ | 1556 | */ |
1421 | if (wr->next) | 1557 | if (wr->next) { |
1422 | stamp_send_wqe(qp, (ind + qp->sq_spare_wqes) & | 1558 | stamp_send_wqe(qp, stamp, size * 16); |
1423 | (qp->sq.wqe_cnt - 1)); | 1559 | ind = pad_wraparound(qp, ind); |
1560 | } | ||
1424 | 1561 | ||
1425 | ++ind; | ||
1426 | } | 1562 | } |
1427 | 1563 | ||
1428 | out: | 1564 | out: |
@@ -1444,8 +1580,10 @@ out: | |||
1444 | */ | 1580 | */ |
1445 | mmiowb(); | 1581 | mmiowb(); |
1446 | 1582 | ||
1447 | stamp_send_wqe(qp, (ind + qp->sq_spare_wqes - 1) & | 1583 | stamp_send_wqe(qp, stamp, size * 16); |
1448 | (qp->sq.wqe_cnt - 1)); | 1584 | |
1585 | ind = pad_wraparound(qp, ind); | ||
1586 | qp->sq_next_wqe = ind; | ||
1449 | } | 1587 | } |
1450 | 1588 | ||
1451 | spin_unlock_irqrestore(&qp->sq.lock, flags); | 1589 | spin_unlock_irqrestore(&qp->sq.lock, flags); |
diff --git a/drivers/infiniband/hw/mlx4/srq.c b/drivers/infiniband/hw/mlx4/srq.c index e7e9a3d0dac3..beaa3b06cf58 100644 --- a/drivers/infiniband/hw/mlx4/srq.c +++ b/drivers/infiniband/hw/mlx4/srq.c | |||
@@ -38,13 +38,7 @@ | |||
38 | 38 | ||
39 | static void *get_wqe(struct mlx4_ib_srq *srq, int n) | 39 | static void *get_wqe(struct mlx4_ib_srq *srq, int n) |
40 | { | 40 | { |
41 | int offset = n << srq->msrq.wqe_shift; | 41 | return mlx4_buf_offset(&srq->buf, n << srq->msrq.wqe_shift); |
42 | |||
43 | if (srq->buf.nbufs == 1) | ||
44 | return srq->buf.u.direct.buf + offset; | ||
45 | else | ||
46 | return srq->buf.u.page_list[offset >> PAGE_SHIFT].buf + | ||
47 | (offset & (PAGE_SIZE - 1)); | ||
48 | } | 42 | } |
49 | 43 | ||
50 | static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) | 44 | static void mlx4_ib_srq_event(struct mlx4_srq *srq, enum mlx4_event type) |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h index fe250c60607d..f9b7caa54143 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib.h +++ b/drivers/infiniband/ulp/ipoib/ipoib.h | |||
@@ -143,7 +143,7 @@ struct ipoib_rx_buf { | |||
143 | 143 | ||
144 | struct ipoib_tx_buf { | 144 | struct ipoib_tx_buf { |
145 | struct sk_buff *skb; | 145 | struct sk_buff *skb; |
146 | u64 mapping; | 146 | u64 mapping[MAX_SKB_FRAGS + 1]; |
147 | }; | 147 | }; |
148 | 148 | ||
149 | struct ib_cm_id; | 149 | struct ib_cm_id; |
@@ -296,7 +296,7 @@ struct ipoib_dev_priv { | |||
296 | struct ipoib_tx_buf *tx_ring; | 296 | struct ipoib_tx_buf *tx_ring; |
297 | unsigned tx_head; | 297 | unsigned tx_head; |
298 | unsigned tx_tail; | 298 | unsigned tx_tail; |
299 | struct ib_sge tx_sge; | 299 | struct ib_sge tx_sge[MAX_SKB_FRAGS + 1]; |
300 | struct ib_send_wr tx_wr; | 300 | struct ib_send_wr tx_wr; |
301 | unsigned tx_outstanding; | 301 | unsigned tx_outstanding; |
302 | 302 | ||
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c index 1818f958c250..7dd2ec473d24 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c | |||
@@ -634,8 +634,8 @@ static inline int post_send(struct ipoib_dev_priv *priv, | |||
634 | { | 634 | { |
635 | struct ib_send_wr *bad_wr; | 635 | struct ib_send_wr *bad_wr; |
636 | 636 | ||
637 | priv->tx_sge.addr = addr; | 637 | priv->tx_sge[0].addr = addr; |
638 | priv->tx_sge.length = len; | 638 | priv->tx_sge[0].length = len; |
639 | 639 | ||
640 | priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; | 640 | priv->tx_wr.wr_id = wr_id | IPOIB_OP_CM; |
641 | 641 | ||
@@ -676,7 +676,7 @@ void ipoib_cm_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_cm_ | |||
676 | return; | 676 | return; |
677 | } | 677 | } |
678 | 678 | ||
679 | tx_req->mapping = addr; | 679 | tx_req->mapping[0] = addr; |
680 | 680 | ||
681 | if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), | 681 | if (unlikely(post_send(priv, tx, tx->tx_head & (ipoib_sendq_size - 1), |
682 | addr, skb->len))) { | 682 | addr, skb->len))) { |
@@ -715,7 +715,7 @@ void ipoib_cm_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) | |||
715 | 715 | ||
716 | tx_req = &tx->tx_ring[wr_id]; | 716 | tx_req = &tx->tx_ring[wr_id]; |
717 | 717 | ||
718 | ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, DMA_TO_DEVICE); | 718 | ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len, DMA_TO_DEVICE); |
719 | 719 | ||
720 | /* FIXME: is this right? Shouldn't we only increment on success? */ | 720 | /* FIXME: is this right? Shouldn't we only increment on success? */ |
721 | ++dev->stats.tx_packets; | 721 | ++dev->stats.tx_packets; |
@@ -1110,7 +1110,7 @@ timeout: | |||
1110 | 1110 | ||
1111 | while ((int) p->tx_tail - (int) p->tx_head < 0) { | 1111 | while ((int) p->tx_tail - (int) p->tx_head < 0) { |
1112 | tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; | 1112 | tx_req = &p->tx_ring[p->tx_tail & (ipoib_sendq_size - 1)]; |
1113 | ib_dma_unmap_single(priv->ca, tx_req->mapping, tx_req->skb->len, | 1113 | ib_dma_unmap_single(priv->ca, tx_req->mapping[0], tx_req->skb->len, |
1114 | DMA_TO_DEVICE); | 1114 | DMA_TO_DEVICE); |
1115 | dev_kfree_skb_any(tx_req->skb); | 1115 | dev_kfree_skb_any(tx_req->skb); |
1116 | ++p->tx_tail; | 1116 | ++p->tx_tail; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c index 52bc2bd5799a..9d3e778dc56d 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c | |||
@@ -239,6 +239,54 @@ repost: | |||
239 | "for buf %d\n", wr_id); | 239 | "for buf %d\n", wr_id); |
240 | } | 240 | } |
241 | 241 | ||
242 | static int ipoib_dma_map_tx(struct ib_device *ca, | ||
243 | struct ipoib_tx_buf *tx_req) | ||
244 | { | ||
245 | struct sk_buff *skb = tx_req->skb; | ||
246 | u64 *mapping = tx_req->mapping; | ||
247 | int i; | ||
248 | |||
249 | mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb), | ||
250 | DMA_TO_DEVICE); | ||
251 | if (unlikely(ib_dma_mapping_error(ca, mapping[0]))) | ||
252 | return -EIO; | ||
253 | |||
254 | for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { | ||
255 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
256 | mapping[i + 1] = ib_dma_map_page(ca, frag->page, | ||
257 | frag->page_offset, frag->size, | ||
258 | DMA_TO_DEVICE); | ||
259 | if (unlikely(ib_dma_mapping_error(ca, mapping[i + 1]))) | ||
260 | goto partial_error; | ||
261 | } | ||
262 | return 0; | ||
263 | |||
264 | partial_error: | ||
265 | ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); | ||
266 | |||
267 | for (; i > 0; --i) { | ||
268 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; | ||
269 | ib_dma_unmap_page(ca, mapping[i], frag->size, DMA_TO_DEVICE); | ||
270 | } | ||
271 | return -EIO; | ||
272 | } | ||
273 | |||
274 | static void ipoib_dma_unmap_tx(struct ib_device *ca, | ||
275 | struct ipoib_tx_buf *tx_req) | ||
276 | { | ||
277 | struct sk_buff *skb = tx_req->skb; | ||
278 | u64 *mapping = tx_req->mapping; | ||
279 | int i; | ||
280 | |||
281 | ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); | ||
282 | |||
283 | for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { | ||
284 | skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; | ||
285 | ib_dma_unmap_page(ca, mapping[i + 1], frag->size, | ||
286 | DMA_TO_DEVICE); | ||
287 | } | ||
288 | } | ||
289 | |||
242 | static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) | 290 | static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) |
243 | { | 291 | { |
244 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 292 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
@@ -257,8 +305,7 @@ static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) | |||
257 | 305 | ||
258 | tx_req = &priv->tx_ring[wr_id]; | 306 | tx_req = &priv->tx_ring[wr_id]; |
259 | 307 | ||
260 | ib_dma_unmap_single(priv->ca, tx_req->mapping, | 308 | ipoib_dma_unmap_tx(priv->ca, tx_req); |
261 | tx_req->skb->len, DMA_TO_DEVICE); | ||
262 | 309 | ||
263 | ++dev->stats.tx_packets; | 310 | ++dev->stats.tx_packets; |
264 | dev->stats.tx_bytes += tx_req->skb->len; | 311 | dev->stats.tx_bytes += tx_req->skb->len; |
@@ -341,16 +388,23 @@ void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) | |||
341 | static inline int post_send(struct ipoib_dev_priv *priv, | 388 | static inline int post_send(struct ipoib_dev_priv *priv, |
342 | unsigned int wr_id, | 389 | unsigned int wr_id, |
343 | struct ib_ah *address, u32 qpn, | 390 | struct ib_ah *address, u32 qpn, |
344 | u64 addr, int len) | 391 | u64 *mapping, int headlen, |
392 | skb_frag_t *frags, | ||
393 | int nr_frags) | ||
345 | { | 394 | { |
346 | struct ib_send_wr *bad_wr; | 395 | struct ib_send_wr *bad_wr; |
396 | int i; | ||
347 | 397 | ||
348 | priv->tx_sge.addr = addr; | 398 | priv->tx_sge[0].addr = mapping[0]; |
349 | priv->tx_sge.length = len; | 399 | priv->tx_sge[0].length = headlen; |
350 | 400 | for (i = 0; i < nr_frags; ++i) { | |
351 | priv->tx_wr.wr_id = wr_id; | 401 | priv->tx_sge[i + 1].addr = mapping[i + 1]; |
352 | priv->tx_wr.wr.ud.remote_qpn = qpn; | 402 | priv->tx_sge[i + 1].length = frags[i].size; |
353 | priv->tx_wr.wr.ud.ah = address; | 403 | } |
404 | priv->tx_wr.num_sge = nr_frags + 1; | ||
405 | priv->tx_wr.wr_id = wr_id; | ||
406 | priv->tx_wr.wr.ud.remote_qpn = qpn; | ||
407 | priv->tx_wr.wr.ud.ah = address; | ||
354 | 408 | ||
355 | return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); | 409 | return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); |
356 | } | 410 | } |
@@ -360,7 +414,6 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
360 | { | 414 | { |
361 | struct ipoib_dev_priv *priv = netdev_priv(dev); | 415 | struct ipoib_dev_priv *priv = netdev_priv(dev); |
362 | struct ipoib_tx_buf *tx_req; | 416 | struct ipoib_tx_buf *tx_req; |
363 | u64 addr; | ||
364 | 417 | ||
365 | if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { | 418 | if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { |
366 | ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", | 419 | ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", |
@@ -383,20 +436,19 @@ void ipoib_send(struct net_device *dev, struct sk_buff *skb, | |||
383 | */ | 436 | */ |
384 | tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; | 437 | tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; |
385 | tx_req->skb = skb; | 438 | tx_req->skb = skb; |
386 | addr = ib_dma_map_single(priv->ca, skb->data, skb->len, | 439 | if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { |
387 | DMA_TO_DEVICE); | ||
388 | if (unlikely(ib_dma_mapping_error(priv->ca, addr))) { | ||
389 | ++dev->stats.tx_errors; | 440 | ++dev->stats.tx_errors; |
390 | dev_kfree_skb_any(skb); | 441 | dev_kfree_skb_any(skb); |
391 | return; | 442 | return; |
392 | } | 443 | } |
393 | tx_req->mapping = addr; | ||
394 | 444 | ||
395 | if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), | 445 | if (unlikely(post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), |
396 | address->ah, qpn, addr, skb->len))) { | 446 | address->ah, qpn, |
447 | tx_req->mapping, skb_headlen(skb), | ||
448 | skb_shinfo(skb)->frags, skb_shinfo(skb)->nr_frags))) { | ||
397 | ipoib_warn(priv, "post_send failed\n"); | 449 | ipoib_warn(priv, "post_send failed\n"); |
398 | ++dev->stats.tx_errors; | 450 | ++dev->stats.tx_errors; |
399 | ib_dma_unmap_single(priv->ca, addr, skb->len, DMA_TO_DEVICE); | 451 | ipoib_dma_unmap_tx(priv->ca, tx_req); |
400 | dev_kfree_skb_any(skb); | 452 | dev_kfree_skb_any(skb); |
401 | } else { | 453 | } else { |
402 | dev->trans_start = jiffies; | 454 | dev->trans_start = jiffies; |
@@ -615,10 +667,7 @@ int ipoib_ib_dev_stop(struct net_device *dev, int flush) | |||
615 | while ((int) priv->tx_tail - (int) priv->tx_head < 0) { | 667 | while ((int) priv->tx_tail - (int) priv->tx_head < 0) { |
616 | tx_req = &priv->tx_ring[priv->tx_tail & | 668 | tx_req = &priv->tx_ring[priv->tx_tail & |
617 | (ipoib_sendq_size - 1)]; | 669 | (ipoib_sendq_size - 1)]; |
618 | ib_dma_unmap_single(priv->ca, | 670 | ipoib_dma_unmap_tx(priv->ca, tx_req); |
619 | tx_req->mapping, | ||
620 | tx_req->skb->len, | ||
621 | DMA_TO_DEVICE); | ||
622 | dev_kfree_skb_any(tx_req->skb); | 671 | dev_kfree_skb_any(tx_req->skb); |
623 | ++priv->tx_tail; | 672 | ++priv->tx_tail; |
624 | --priv->tx_outstanding; | 673 | --priv->tx_outstanding; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c index 09f5371137a1..f96477a8ca5a 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_main.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c | |||
@@ -965,7 +965,9 @@ static void ipoib_setup(struct net_device *dev) | |||
965 | dev->addr_len = INFINIBAND_ALEN; | 965 | dev->addr_len = INFINIBAND_ALEN; |
966 | dev->type = ARPHRD_INFINIBAND; | 966 | dev->type = ARPHRD_INFINIBAND; |
967 | dev->tx_queue_len = ipoib_sendq_size * 2; | 967 | dev->tx_queue_len = ipoib_sendq_size * 2; |
968 | dev->features = NETIF_F_VLAN_CHALLENGED | NETIF_F_LLTX; | 968 | dev->features = (NETIF_F_VLAN_CHALLENGED | |
969 | NETIF_F_LLTX | | ||
970 | NETIF_F_HIGHDMA); | ||
969 | 971 | ||
970 | /* MTU will be reset when mcast join happens */ | 972 | /* MTU will be reset when mcast join happens */ |
971 | dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; | 973 | dev->mtu = IPOIB_PACKET_SIZE - IPOIB_ENCAP_LEN; |
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c index 433e99ac227b..a3aeb911f024 100644 --- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c +++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c | |||
@@ -157,6 +157,7 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) | |||
157 | }; | 157 | }; |
158 | 158 | ||
159 | int ret, size; | 159 | int ret, size; |
160 | int i; | ||
160 | 161 | ||
161 | priv->pd = ib_alloc_pd(priv->ca); | 162 | priv->pd = ib_alloc_pd(priv->ca); |
162 | if (IS_ERR(priv->pd)) { | 163 | if (IS_ERR(priv->pd)) { |
@@ -191,6 +192,9 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) | |||
191 | init_attr.send_cq = priv->cq; | 192 | init_attr.send_cq = priv->cq; |
192 | init_attr.recv_cq = priv->cq; | 193 | init_attr.recv_cq = priv->cq; |
193 | 194 | ||
195 | if (dev->features & NETIF_F_SG) | ||
196 | init_attr.cap.max_send_sge = MAX_SKB_FRAGS + 1; | ||
197 | |||
194 | priv->qp = ib_create_qp(priv->pd, &init_attr); | 198 | priv->qp = ib_create_qp(priv->pd, &init_attr); |
195 | if (IS_ERR(priv->qp)) { | 199 | if (IS_ERR(priv->qp)) { |
196 | printk(KERN_WARNING "%s: failed to create QP\n", ca->name); | 200 | printk(KERN_WARNING "%s: failed to create QP\n", ca->name); |
@@ -201,11 +205,11 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca) | |||
201 | priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; | 205 | priv->dev->dev_addr[2] = (priv->qp->qp_num >> 8) & 0xff; |
202 | priv->dev->dev_addr[3] = (priv->qp->qp_num ) & 0xff; | 206 | priv->dev->dev_addr[3] = (priv->qp->qp_num ) & 0xff; |
203 | 207 | ||
204 | priv->tx_sge.lkey = priv->mr->lkey; | 208 | for (i = 0; i < MAX_SKB_FRAGS + 1; ++i) |
209 | priv->tx_sge[i].lkey = priv->mr->lkey; | ||
205 | 210 | ||
206 | priv->tx_wr.opcode = IB_WR_SEND; | 211 | priv->tx_wr.opcode = IB_WR_SEND; |
207 | priv->tx_wr.sg_list = &priv->tx_sge; | 212 | priv->tx_wr.sg_list = priv->tx_sge; |
208 | priv->tx_wr.num_sge = 1; | ||
209 | priv->tx_wr.send_flags = IB_SEND_SIGNALED; | 213 | priv->tx_wr.send_flags = IB_SEND_SIGNALED; |
210 | 214 | ||
211 | return 0; | 215 | return 0; |
diff --git a/drivers/input/joystick/analog.c b/drivers/input/joystick/analog.c index f32e031dcb27..708c5ae13b24 100644 --- a/drivers/input/joystick/analog.c +++ b/drivers/input/joystick/analog.c | |||
@@ -1,6 +1,4 @@ | |||
1 | /* | 1 | /* |
2 | * $Id: analog.c,v 1.68 2002/01/22 20:18:32 vojtech Exp $ | ||
3 | * | ||
4 | * Copyright (c) 1996-2001 Vojtech Pavlik | 2 | * Copyright (c) 1996-2001 Vojtech Pavlik |
5 | */ | 3 | */ |
6 | 4 | ||
@@ -164,6 +162,10 @@ static unsigned int get_time_pit(void) | |||
164 | #define GET_TIME(x) do { x = get_cycles(); } while (0) | 162 | #define GET_TIME(x) do { x = get_cycles(); } while (0) |
165 | #define DELTA(x,y) ((y)-(x)) | 163 | #define DELTA(x,y) ((y)-(x)) |
166 | #define TIME_NAME "PCC" | 164 | #define TIME_NAME "PCC" |
165 | #elif defined(CONFIG_MN10300) | ||
166 | #define GET_TIME(x) do { x = get_cycles(); } while (0) | ||
167 | #define DELTA(x, y) ((x) - (y)) | ||
168 | #define TIME_NAME "TSC" | ||
167 | #else | 169 | #else |
168 | #define FAKE_TIME | 170 | #define FAKE_TIME |
169 | static unsigned long analog_faketime = 0; | 171 | static unsigned long analog_faketime = 0; |
diff --git a/drivers/isdn/capi/capifs.c b/drivers/isdn/capi/capifs.c index 2dd1b57b0ba4..6d7c47ec0367 100644 --- a/drivers/isdn/capi/capifs.c +++ b/drivers/isdn/capi/capifs.c | |||
@@ -52,6 +52,7 @@ static int capifs_remount(struct super_block *s, int *flags, char *data) | |||
52 | gid_t gid = 0; | 52 | gid_t gid = 0; |
53 | umode_t mode = 0600; | 53 | umode_t mode = 0600; |
54 | char *this_char; | 54 | char *this_char; |
55 | char *new_opt = kstrdup(data, GFP_KERNEL); | ||
55 | 56 | ||
56 | this_char = NULL; | 57 | this_char = NULL; |
57 | while ((this_char = strsep(&data, ",")) != NULL) { | 58 | while ((this_char = strsep(&data, ",")) != NULL) { |
@@ -72,11 +73,16 @@ static int capifs_remount(struct super_block *s, int *flags, char *data) | |||
72 | return -EINVAL; | 73 | return -EINVAL; |
73 | } | 74 | } |
74 | } | 75 | } |
76 | |||
77 | kfree(s->s_options); | ||
78 | s->s_options = new_opt; | ||
79 | |||
75 | config.setuid = setuid; | 80 | config.setuid = setuid; |
76 | config.setgid = setgid; | 81 | config.setgid = setgid; |
77 | config.uid = uid; | 82 | config.uid = uid; |
78 | config.gid = gid; | 83 | config.gid = gid; |
79 | config.mode = mode; | 84 | config.mode = mode; |
85 | |||
80 | return 0; | 86 | return 0; |
81 | } | 87 | } |
82 | 88 | ||
@@ -84,6 +90,7 @@ static struct super_operations capifs_sops = | |||
84 | { | 90 | { |
85 | .statfs = simple_statfs, | 91 | .statfs = simple_statfs, |
86 | .remount_fs = capifs_remount, | 92 | .remount_fs = capifs_remount, |
93 | .show_options = generic_show_options, | ||
87 | }; | 94 | }; |
88 | 95 | ||
89 | 96 | ||
diff --git a/drivers/isdn/hisax/amd7930_fn.c b/drivers/isdn/hisax/amd7930_fn.c index c0d7036404a5..341faf58a65c 100644 --- a/drivers/isdn/hisax/amd7930_fn.c +++ b/drivers/isdn/hisax/amd7930_fn.c | |||
@@ -744,8 +744,7 @@ dbusy_timer_handler(struct IsdnCardState *cs) | |||
744 | 744 | ||
745 | 745 | ||
746 | 746 | ||
747 | void __devinit | 747 | void Amd7930_init(struct IsdnCardState *cs) |
748 | Amd7930_init(struct IsdnCardState *cs) | ||
749 | { | 748 | { |
750 | WORD *ptr; | 749 | WORD *ptr; |
751 | BYTE cmd, cnt; | 750 | BYTE cmd, cnt; |
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig index 851a3b01781e..859814f62cb0 100644 --- a/drivers/leds/Kconfig +++ b/drivers/leds/Kconfig | |||
@@ -18,6 +18,13 @@ config LEDS_CLASS | |||
18 | 18 | ||
19 | comment "LED drivers" | 19 | comment "LED drivers" |
20 | 20 | ||
21 | config LEDS_ATMEL_PWM | ||
22 | tristate "LED Support using Atmel PWM outputs" | ||
23 | depends on LEDS_CLASS && ATMEL_PWM | ||
24 | help | ||
25 | This option enables support for LEDs driven using outputs | ||
26 | of the dedicated PWM controller found on newer Atmel SOCs. | ||
27 | |||
21 | config LEDS_CORGI | 28 | config LEDS_CORGI |
22 | tristate "LED Support for the Sharp SL-C7x0 series" | 29 | tristate "LED Support for the Sharp SL-C7x0 series" |
23 | depends on LEDS_CLASS && PXA_SHARP_C7xx | 30 | depends on LEDS_CLASS && PXA_SHARP_C7xx |
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile index bc6afc8dcb27..84ced3b1a13d 100644 --- a/drivers/leds/Makefile +++ b/drivers/leds/Makefile | |||
@@ -5,6 +5,7 @@ obj-$(CONFIG_LEDS_CLASS) += led-class.o | |||
5 | obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o | 5 | obj-$(CONFIG_LEDS_TRIGGERS) += led-triggers.o |
6 | 6 | ||
7 | # LED Platform Drivers | 7 | # LED Platform Drivers |
8 | obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o | ||
8 | obj-$(CONFIG_LEDS_CORGI) += leds-corgi.o | 9 | obj-$(CONFIG_LEDS_CORGI) += leds-corgi.o |
9 | obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o | 10 | obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o |
10 | obj-$(CONFIG_LEDS_SPITZ) += leds-spitz.o | 11 | obj-$(CONFIG_LEDS_SPITZ) += leds-spitz.o |
diff --git a/drivers/leds/leds-atmel-pwm.c b/drivers/leds/leds-atmel-pwm.c new file mode 100644 index 000000000000..af61f55571fe --- /dev/null +++ b/drivers/leds/leds-atmel-pwm.c | |||
@@ -0,0 +1,157 @@ | |||
1 | #include <linux/kernel.h> | ||
2 | #include <linux/platform_device.h> | ||
3 | #include <linux/leds.h> | ||
4 | #include <linux/io.h> | ||
5 | #include <linux/atmel_pwm.h> | ||
6 | |||
7 | |||
8 | struct pwmled { | ||
9 | struct led_classdev cdev; | ||
10 | struct pwm_channel pwmc; | ||
11 | struct gpio_led *desc; | ||
12 | u32 mult; | ||
13 | u8 active_low; | ||
14 | }; | ||
15 | |||
16 | |||
17 | /* | ||
18 | * For simplicity, we use "brightness" as if it were a linear function | ||
19 | * of PWM duty cycle. However, a logarithmic function of duty cycle is | ||
20 | * probably a better match for perceived brightness: two is half as bright | ||
21 | * as four, four is half as bright as eight, etc | ||
22 | */ | ||
23 | static void pwmled_brightness(struct led_classdev *cdev, enum led_brightness b) | ||
24 | { | ||
25 | struct pwmled *led; | ||
26 | |||
27 | /* update the duty cycle for the *next* period */ | ||
28 | led = container_of(cdev, struct pwmled, cdev); | ||
29 | pwm_channel_writel(&led->pwmc, PWM_CUPD, led->mult * (unsigned) b); | ||
30 | } | ||
31 | |||
32 | /* | ||
33 | * NOTE: we reuse the platform_data structure of GPIO leds, | ||
34 | * but repurpose its "gpio" number as a PWM channel number. | ||
35 | */ | ||
36 | static int __init pwmled_probe(struct platform_device *pdev) | ||
37 | { | ||
38 | const struct gpio_led_platform_data *pdata; | ||
39 | struct pwmled *leds; | ||
40 | unsigned i; | ||
41 | int status; | ||
42 | |||
43 | pdata = pdev->dev.platform_data; | ||
44 | if (!pdata || pdata->num_leds < 1) | ||
45 | return -ENODEV; | ||
46 | |||
47 | leds = kcalloc(pdata->num_leds, sizeof(*leds), GFP_KERNEL); | ||
48 | if (!leds) | ||
49 | return -ENOMEM; | ||
50 | |||
51 | for (i = 0; i < pdata->num_leds; i++) { | ||
52 | struct pwmled *led = leds + i; | ||
53 | const struct gpio_led *dat = pdata->leds + i; | ||
54 | u32 tmp; | ||
55 | |||
56 | led->cdev.name = dat->name; | ||
57 | led->cdev.brightness = LED_OFF; | ||
58 | led->cdev.brightness_set = pwmled_brightness; | ||
59 | led->cdev.default_trigger = dat->default_trigger; | ||
60 | |||
61 | led->active_low = dat->active_low; | ||
62 | |||
63 | status = pwm_channel_alloc(dat->gpio, &led->pwmc); | ||
64 | if (status < 0) | ||
65 | goto err; | ||
66 | |||
67 | /* | ||
68 | * Prescale clock by 2^x, so PWM counts in low MHz. | ||
69 | * Start each cycle with the LED active, so increasing | ||
70 | * the duty cycle gives us more time on (== brighter). | ||
71 | */ | ||
72 | tmp = 5; | ||
73 | if (!led->active_low) | ||
74 | tmp |= PWM_CPR_CPOL; | ||
75 | pwm_channel_writel(&led->pwmc, PWM_CMR, tmp); | ||
76 | |||
77 | /* | ||
78 | * Pick a period so PWM cycles at 100+ Hz; and a multiplier | ||
79 | * for scaling duty cycle: brightness * mult. | ||
80 | */ | ||
81 | tmp = (led->pwmc.mck / (1 << 5)) / 100; | ||
82 | tmp /= 255; | ||
83 | led->mult = tmp; | ||
84 | pwm_channel_writel(&led->pwmc, PWM_CDTY, | ||
85 | led->cdev.brightness * 255); | ||
86 | pwm_channel_writel(&led->pwmc, PWM_CPRD, | ||
87 | LED_FULL * tmp); | ||
88 | |||
89 | pwm_channel_enable(&led->pwmc); | ||
90 | |||
91 | /* Hand it over to the LED framework */ | ||
92 | status = led_classdev_register(&pdev->dev, &led->cdev); | ||
93 | if (status < 0) { | ||
94 | pwm_channel_free(&led->pwmc); | ||
95 | goto err; | ||
96 | } | ||
97 | } | ||
98 | |||
99 | platform_set_drvdata(pdev, leds); | ||
100 | return 0; | ||
101 | |||
102 | err: | ||
103 | if (i > 0) { | ||
104 | for (i = i - 1; i >= 0; i--) { | ||
105 | led_classdev_unregister(&leds[i].cdev); | ||
106 | pwm_channel_free(&leds[i].pwmc); | ||
107 | } | ||
108 | } | ||
109 | kfree(leds); | ||
110 | |||
111 | return status; | ||
112 | } | ||
113 | |||
114 | static int __exit pwmled_remove(struct platform_device *pdev) | ||
115 | { | ||
116 | const struct gpio_led_platform_data *pdata; | ||
117 | struct pwmled *leds; | ||
118 | unsigned i; | ||
119 | |||
120 | pdata = pdev->dev.platform_data; | ||
121 | leds = platform_get_drvdata(pdev); | ||
122 | |||
123 | for (i = 0; i < pdata->num_leds; i++) { | ||
124 | struct pwmled *led = leds + i; | ||
125 | |||
126 | led_classdev_unregister(&led->cdev); | ||
127 | pwm_channel_free(&led->pwmc); | ||
128 | } | ||
129 | |||
130 | kfree(leds); | ||
131 | platform_set_drvdata(pdev, NULL); | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | static struct platform_driver pwmled_driver = { | ||
136 | .driver = { | ||
137 | .name = "leds-atmel-pwm", | ||
138 | .owner = THIS_MODULE, | ||
139 | }, | ||
140 | /* REVISIT add suspend() and resume() methods */ | ||
141 | .remove = __exit_p(pwmled_remove), | ||
142 | }; | ||
143 | |||
144 | static int __init modinit(void) | ||
145 | { | ||
146 | return platform_driver_probe(&pwmled_driver, pwmled_probe); | ||
147 | } | ||
148 | module_init(modinit); | ||
149 | |||
150 | static void __exit modexit(void) | ||
151 | { | ||
152 | platform_driver_unregister(&pwmled_driver); | ||
153 | } | ||
154 | module_exit(modexit); | ||
155 | |||
156 | MODULE_DESCRIPTION("Driver for LEDs with PWM-controlled brightness"); | ||
157 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/lguest/lguest_device.c b/drivers/lguest/lguest_device.c index 84f85e23cca7..1b2ec0bf5eb1 100644 --- a/drivers/lguest/lguest_device.c +++ b/drivers/lguest/lguest_device.c | |||
@@ -47,7 +47,7 @@ struct lguest_device { | |||
47 | /* Since the virtio infrastructure hands us a pointer to the virtio_device all | 47 | /* Since the virtio infrastructure hands us a pointer to the virtio_device all |
48 | * the time, it helps to have a curt macro to get a pointer to the struct | 48 | * the time, it helps to have a curt macro to get a pointer to the struct |
49 | * lguest_device it's enclosed in. */ | 49 | * lguest_device it's enclosed in. */ |
50 | #define to_lgdev(vdev) container_of(vdev, struct lguest_device, vdev) | 50 | #define to_lgdev(vd) container_of(vd, struct lguest_device, vdev) |
51 | 51 | ||
52 | /*D:130 | 52 | /*D:130 |
53 | * Device configurations | 53 | * Device configurations |
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index 8ba49385c3ff..77ad192962c5 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c | |||
@@ -85,6 +85,7 @@ struct smu_device { | |||
85 | u32 cmd_buf_abs; /* command buffer absolute */ | 85 | u32 cmd_buf_abs; /* command buffer absolute */ |
86 | struct list_head cmd_list; | 86 | struct list_head cmd_list; |
87 | struct smu_cmd *cmd_cur; /* pending command */ | 87 | struct smu_cmd *cmd_cur; /* pending command */ |
88 | int broken_nap; | ||
88 | struct list_head cmd_i2c_list; | 89 | struct list_head cmd_i2c_list; |
89 | struct smu_i2c_cmd *cmd_i2c_cur; /* pending i2c command */ | 90 | struct smu_i2c_cmd *cmd_i2c_cur; /* pending i2c command */ |
90 | struct timer_list i2c_timer; | 91 | struct timer_list i2c_timer; |
@@ -135,6 +136,19 @@ static void smu_start_cmd(void) | |||
135 | fend = faddr + smu->cmd_buf->length + 2; | 136 | fend = faddr + smu->cmd_buf->length + 2; |
136 | flush_inval_dcache_range(faddr, fend); | 137 | flush_inval_dcache_range(faddr, fend); |
137 | 138 | ||
139 | |||
140 | /* We also disable NAP mode for the duration of the command | ||
141 | * on U3 based machines. | ||
142 | * This is slightly racy as it can be written back to 1 by a sysctl | ||
143 | * but that never happens in practice. There seem to be an issue with | ||
144 | * U3 based machines such as the iMac G5 where napping for the | ||
145 | * whole duration of the command prevents the SMU from fetching it | ||
146 | * from memory. This might be related to the strange i2c based | ||
147 | * mechanism the SMU uses to access memory. | ||
148 | */ | ||
149 | if (smu->broken_nap) | ||
150 | powersave_nap = 0; | ||
151 | |||
138 | /* This isn't exactly a DMA mapping here, I suspect | 152 | /* This isn't exactly a DMA mapping here, I suspect |
139 | * the SMU is actually communicating with us via i2c to the | 153 | * the SMU is actually communicating with us via i2c to the |
140 | * northbridge or the CPU to access RAM. | 154 | * northbridge or the CPU to access RAM. |
@@ -211,6 +225,10 @@ static irqreturn_t smu_db_intr(int irq, void *arg) | |||
211 | misc = cmd->misc; | 225 | misc = cmd->misc; |
212 | mb(); | 226 | mb(); |
213 | cmd->status = rc; | 227 | cmd->status = rc; |
228 | |||
229 | /* Re-enable NAP mode */ | ||
230 | if (smu->broken_nap) | ||
231 | powersave_nap = 1; | ||
214 | bail: | 232 | bail: |
215 | /* Start next command if any */ | 233 | /* Start next command if any */ |
216 | smu_start_cmd(); | 234 | smu_start_cmd(); |
@@ -461,7 +479,7 @@ int __init smu_init (void) | |||
461 | if (np == NULL) | 479 | if (np == NULL) |
462 | return -ENODEV; | 480 | return -ENODEV; |
463 | 481 | ||
464 | printk(KERN_INFO "SMU driver %s %s\n", VERSION, AUTHOR); | 482 | printk(KERN_INFO "SMU: Driver %s %s\n", VERSION, AUTHOR); |
465 | 483 | ||
466 | if (smu_cmdbuf_abs == 0) { | 484 | if (smu_cmdbuf_abs == 0) { |
467 | printk(KERN_ERR "SMU: Command buffer not allocated !\n"); | 485 | printk(KERN_ERR "SMU: Command buffer not allocated !\n"); |
@@ -533,6 +551,11 @@ int __init smu_init (void) | |||
533 | goto fail; | 551 | goto fail; |
534 | } | 552 | } |
535 | 553 | ||
554 | /* U3 has an issue with NAP mode when issuing SMU commands */ | ||
555 | smu->broken_nap = pmac_get_uninorth_variant() < 4; | ||
556 | if (smu->broken_nap) | ||
557 | printk(KERN_INFO "SMU: using NAP mode workaround\n"); | ||
558 | |||
536 | sys_ctrler = SYS_CTRLER_SMU; | 559 | sys_ctrler = SYS_CTRLER_SMU; |
537 | return 0; | 560 | return 0; |
538 | 561 | ||
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig index 7b5220ca7d7f..c143a86c2ea6 100644 --- a/drivers/misc/Kconfig +++ b/drivers/misc/Kconfig | |||
@@ -13,6 +13,15 @@ menuconfig MISC_DEVICES | |||
13 | 13 | ||
14 | if MISC_DEVICES | 14 | if MISC_DEVICES |
15 | 15 | ||
16 | config ATMEL_PWM | ||
17 | tristate "Atmel AT32/AT91 PWM support" | ||
18 | depends on AVR32 || ARCH_AT91 | ||
19 | help | ||
20 | This option enables device driver support for the PWM channels | ||
21 | on certain Atmel prcoessors. Pulse Width Modulation is used for | ||
22 | purposes including software controlled power-efficent backlights | ||
23 | on LCD displays, motor control, and waveform generation. | ||
24 | |||
16 | config IBM_ASM | 25 | config IBM_ASM |
17 | tristate "Device driver for IBM RSA service processor" | 26 | tristate "Device driver for IBM RSA service processor" |
18 | depends on X86 && PCI && INPUT && EXPERIMENTAL | 27 | depends on X86 && PCI && INPUT && EXPERIMENTAL |
@@ -97,9 +106,9 @@ config ACER_WMI | |||
97 | depends on X86 | 106 | depends on X86 |
98 | depends on EXPERIMENTAL | 107 | depends on EXPERIMENTAL |
99 | depends on ACPI | 108 | depends on ACPI |
100 | depends on ACPI_WMI | ||
101 | depends on LEDS_CLASS | 109 | depends on LEDS_CLASS |
102 | depends on BACKLIGHT_CLASS_DEVICE | 110 | depends on BACKLIGHT_CLASS_DEVICE |
111 | select ACPI_WMI | ||
103 | ---help--- | 112 | ---help--- |
104 | This is a driver for newer Acer (and Wistron) laptops. It adds | 113 | This is a driver for newer Acer (and Wistron) laptops. It adds |
105 | wireless radio and bluetooth control, and on some laptops, | 114 | wireless radio and bluetooth control, and on some laptops, |
@@ -146,7 +155,7 @@ config TC1100_WMI | |||
146 | tristate "HP Compaq TC1100 Tablet WMI Extras" | 155 | tristate "HP Compaq TC1100 Tablet WMI Extras" |
147 | depends on X86 && !X86_64 | 156 | depends on X86 && !X86_64 |
148 | depends on ACPI | 157 | depends on ACPI |
149 | depends on ACPI_WMI | 158 | select ACPI_WMI |
150 | ---help--- | 159 | ---help--- |
151 | This is a driver for the WMI extensions (wireless and bluetooth power | 160 | This is a driver for the WMI extensions (wireless and bluetooth power |
152 | control) of the HP Compaq TC1100 tablet. | 161 | control) of the HP Compaq TC1100 tablet. |
@@ -279,6 +288,7 @@ config ATMEL_SSC | |||
279 | config INTEL_MENLOW | 288 | config INTEL_MENLOW |
280 | tristate "Thermal Management driver for Intel menlow platform" | 289 | tristate "Thermal Management driver for Intel menlow platform" |
281 | depends on ACPI_THERMAL | 290 | depends on ACPI_THERMAL |
291 | depends on X86 | ||
282 | ---help--- | 292 | ---help--- |
283 | ACPI thermal management enhancement driver on | 293 | ACPI thermal management enhancement driver on |
284 | Intel Menlow platform. | 294 | Intel Menlow platform. |
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile index 7f13549cc87e..3b12f5da8562 100644 --- a/drivers/misc/Makefile +++ b/drivers/misc/Makefile | |||
@@ -8,6 +8,7 @@ obj-$(CONFIG_HDPU_FEATURES) += hdpuftrs/ | |||
8 | obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o | 8 | obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o |
9 | obj-$(CONFIG_ACER_WMI) += acer-wmi.o | 9 | obj-$(CONFIG_ACER_WMI) += acer-wmi.o |
10 | obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o | 10 | obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o |
11 | obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o | ||
11 | obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o | 12 | obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o |
12 | obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o | 13 | obj-$(CONFIG_TC1100_WMI) += tc1100-wmi.o |
13 | obj-$(CONFIG_LKDTM) += lkdtm.o | 14 | obj-$(CONFIG_LKDTM) += lkdtm.o |
diff --git a/drivers/misc/atmel_pwm.c b/drivers/misc/atmel_pwm.c new file mode 100644 index 000000000000..f8d3b9a76cbd --- /dev/null +++ b/drivers/misc/atmel_pwm.c | |||
@@ -0,0 +1,409 @@ | |||
1 | #include <linux/module.h> | ||
2 | #include <linux/clk.h> | ||
3 | #include <linux/err.h> | ||
4 | #include <linux/io.h> | ||
5 | #include <linux/interrupt.h> | ||
6 | #include <linux/platform_device.h> | ||
7 | #include <linux/atmel_pwm.h> | ||
8 | |||
9 | |||
10 | /* | ||
11 | * This is a simple driver for the PWM controller found in various newer | ||
12 | * Atmel SOCs, including the AVR32 series and the AT91sam9263. | ||
13 | * | ||
14 | * Chips with current Linux ports have only 4 PWM channels, out of max 32. | ||
15 | * AT32UC3A and AT32UC3B chips have 7 channels (but currently no Linux). | ||
16 | * Docs are inconsistent about the width of the channel counter registers; | ||
17 | * it's at least 16 bits, but several places say 20 bits. | ||
18 | */ | ||
19 | #define PWM_NCHAN 4 /* max 32 */ | ||
20 | |||
21 | struct pwm { | ||
22 | spinlock_t lock; | ||
23 | struct platform_device *pdev; | ||
24 | u32 mask; | ||
25 | int irq; | ||
26 | void __iomem *base; | ||
27 | struct clk *clk; | ||
28 | struct pwm_channel *channel[PWM_NCHAN]; | ||
29 | void (*handler[PWM_NCHAN])(struct pwm_channel *); | ||
30 | }; | ||
31 | |||
32 | |||
33 | /* global PWM controller registers */ | ||
34 | #define PWM_MR 0x00 | ||
35 | #define PWM_ENA 0x04 | ||
36 | #define PWM_DIS 0x08 | ||
37 | #define PWM_SR 0x0c | ||
38 | #define PWM_IER 0x10 | ||
39 | #define PWM_IDR 0x14 | ||
40 | #define PWM_IMR 0x18 | ||
41 | #define PWM_ISR 0x1c | ||
42 | |||
43 | static inline void pwm_writel(const struct pwm *p, unsigned offset, u32 val) | ||
44 | { | ||
45 | __raw_writel(val, p->base + offset); | ||
46 | } | ||
47 | |||
48 | static inline u32 pwm_readl(const struct pwm *p, unsigned offset) | ||
49 | { | ||
50 | return __raw_readl(p->base + offset); | ||
51 | } | ||
52 | |||
53 | static inline void __iomem *pwmc_regs(const struct pwm *p, int index) | ||
54 | { | ||
55 | return p->base + 0x200 + index * 0x20; | ||
56 | } | ||
57 | |||
58 | static struct pwm *pwm; | ||
59 | |||
60 | static void pwm_dumpregs(struct pwm_channel *ch, char *tag) | ||
61 | { | ||
62 | struct device *dev = &pwm->pdev->dev; | ||
63 | |||
64 | dev_dbg(dev, "%s: mr %08x, sr %08x, imr %08x\n", | ||
65 | tag, | ||
66 | pwm_readl(pwm, PWM_MR), | ||
67 | pwm_readl(pwm, PWM_SR), | ||
68 | pwm_readl(pwm, PWM_IMR)); | ||
69 | dev_dbg(dev, | ||
70 | "pwm ch%d - mr %08x, dty %u, prd %u, cnt %u\n", | ||
71 | ch->index, | ||
72 | pwm_channel_readl(ch, PWM_CMR), | ||
73 | pwm_channel_readl(ch, PWM_CDTY), | ||
74 | pwm_channel_readl(ch, PWM_CPRD), | ||
75 | pwm_channel_readl(ch, PWM_CCNT)); | ||
76 | } | ||
77 | |||
78 | |||
79 | /** | ||
80 | * pwm_channel_alloc - allocate an unused PWM channel | ||
81 | * @index: identifies the channel | ||
82 | * @ch: structure to be initialized | ||
83 | * | ||
84 | * Drivers allocate PWM channels according to the board's wiring, and | ||
85 | * matching board-specific setup code. Returns zero or negative errno. | ||
86 | */ | ||
87 | int pwm_channel_alloc(int index, struct pwm_channel *ch) | ||
88 | { | ||
89 | unsigned long flags; | ||
90 | int status = 0; | ||
91 | |||
92 | /* insist on PWM init, with this signal pinned out */ | ||
93 | if (!pwm || !(pwm->mask & 1 << index)) | ||
94 | return -ENODEV; | ||
95 | |||
96 | if (index < 0 || index >= PWM_NCHAN || !ch) | ||
97 | return -EINVAL; | ||
98 | memset(ch, 0, sizeof *ch); | ||
99 | |||
100 | spin_lock_irqsave(&pwm->lock, flags); | ||
101 | if (pwm->channel[index]) | ||
102 | status = -EBUSY; | ||
103 | else { | ||
104 | clk_enable(pwm->clk); | ||
105 | |||
106 | ch->regs = pwmc_regs(pwm, index); | ||
107 | ch->index = index; | ||
108 | |||
109 | /* REVISIT: ap7000 seems to go 2x as fast as we expect!! */ | ||
110 | ch->mck = clk_get_rate(pwm->clk); | ||
111 | |||
112 | pwm->channel[index] = ch; | ||
113 | pwm->handler[index] = NULL; | ||
114 | |||
115 | /* channel and irq are always disabled when we return */ | ||
116 | pwm_writel(pwm, PWM_DIS, 1 << index); | ||
117 | pwm_writel(pwm, PWM_IDR, 1 << index); | ||
118 | } | ||
119 | spin_unlock_irqrestore(&pwm->lock, flags); | ||
120 | return status; | ||
121 | } | ||
122 | EXPORT_SYMBOL(pwm_channel_alloc); | ||
123 | |||
124 | static int pwmcheck(struct pwm_channel *ch) | ||
125 | { | ||
126 | int index; | ||
127 | |||
128 | if (!pwm) | ||
129 | return -ENODEV; | ||
130 | if (!ch) | ||
131 | return -EINVAL; | ||
132 | index = ch->index; | ||
133 | if (index < 0 || index >= PWM_NCHAN || pwm->channel[index] != ch) | ||
134 | return -EINVAL; | ||
135 | |||
136 | return index; | ||
137 | } | ||
138 | |||
139 | /** | ||
140 | * pwm_channel_free - release a previously allocated channel | ||
141 | * @ch: the channel being released | ||
142 | * | ||
143 | * The channel is completely shut down (counter and IRQ disabled), | ||
144 | * and made available for re-use. Returns zero, or negative errno. | ||
145 | */ | ||
146 | int pwm_channel_free(struct pwm_channel *ch) | ||
147 | { | ||
148 | unsigned long flags; | ||
149 | int t; | ||
150 | |||
151 | spin_lock_irqsave(&pwm->lock, flags); | ||
152 | t = pwmcheck(ch); | ||
153 | if (t >= 0) { | ||
154 | pwm->channel[t] = NULL; | ||
155 | pwm->handler[t] = NULL; | ||
156 | |||
157 | /* channel and irq are always disabled when we return */ | ||
158 | pwm_writel(pwm, PWM_DIS, 1 << t); | ||
159 | pwm_writel(pwm, PWM_IDR, 1 << t); | ||
160 | |||
161 | clk_disable(pwm->clk); | ||
162 | t = 0; | ||
163 | } | ||
164 | spin_unlock_irqrestore(&pwm->lock, flags); | ||
165 | return t; | ||
166 | } | ||
167 | EXPORT_SYMBOL(pwm_channel_free); | ||
168 | |||
169 | int __pwm_channel_onoff(struct pwm_channel *ch, int enabled) | ||
170 | { | ||
171 | unsigned long flags; | ||
172 | int t; | ||
173 | |||
174 | /* OMITTED FUNCTIONALITY: starting several channels in synch */ | ||
175 | |||
176 | spin_lock_irqsave(&pwm->lock, flags); | ||
177 | t = pwmcheck(ch); | ||
178 | if (t >= 0) { | ||
179 | pwm_writel(pwm, enabled ? PWM_ENA : PWM_DIS, 1 << t); | ||
180 | t = 0; | ||
181 | pwm_dumpregs(ch, enabled ? "enable" : "disable"); | ||
182 | } | ||
183 | spin_unlock_irqrestore(&pwm->lock, flags); | ||
184 | |||
185 | return t; | ||
186 | } | ||
187 | EXPORT_SYMBOL(__pwm_channel_onoff); | ||
188 | |||
189 | /** | ||
190 | * pwm_clk_alloc - allocate and configure CLKA or CLKB | ||
191 | * @prescale: from 0..10, the power of two used to divide MCK | ||
192 | * @div: from 1..255, the linear divisor to use | ||
193 | * | ||
194 | * Returns PWM_CPR_CLKA, PWM_CPR_CLKB, or negative errno. The allocated | ||
195 | * clock will run with a period of (2^prescale * div) / MCK, or twice as | ||
196 | * long if center aligned PWM output is used. The clock must later be | ||
197 | * deconfigured using pwm_clk_free(). | ||
198 | */ | ||
199 | int pwm_clk_alloc(unsigned prescale, unsigned div) | ||
200 | { | ||
201 | unsigned long flags; | ||
202 | u32 mr; | ||
203 | u32 val = (prescale << 8) | div; | ||
204 | int ret = -EBUSY; | ||
205 | |||
206 | if (prescale >= 10 || div == 0 || div > 255) | ||
207 | return -EINVAL; | ||
208 | |||
209 | spin_lock_irqsave(&pwm->lock, flags); | ||
210 | mr = pwm_readl(pwm, PWM_MR); | ||
211 | if ((mr & 0xffff) == 0) { | ||
212 | mr |= val; | ||
213 | ret = PWM_CPR_CLKA; | ||
214 | } | ||
215 | if ((mr & (0xffff << 16)) == 0) { | ||
216 | mr |= val << 16; | ||
217 | ret = PWM_CPR_CLKB; | ||
218 | } | ||
219 | if (ret > 0) | ||
220 | pwm_writel(pwm, PWM_MR, mr); | ||
221 | spin_unlock_irqrestore(&pwm->lock, flags); | ||
222 | return ret; | ||
223 | } | ||
224 | EXPORT_SYMBOL(pwm_clk_alloc); | ||
225 | |||
226 | /** | ||
227 | * pwm_clk_free - deconfigure and release CLKA or CLKB | ||
228 | * | ||
229 | * Reverses the effect of pwm_clk_alloc(). | ||
230 | */ | ||
231 | void pwm_clk_free(unsigned clk) | ||
232 | { | ||
233 | unsigned long flags; | ||
234 | u32 mr; | ||
235 | |||
236 | spin_lock_irqsave(&pwm->lock, flags); | ||
237 | mr = pwm_readl(pwm, PWM_MR); | ||
238 | if (clk == PWM_CPR_CLKA) | ||
239 | pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 0)); | ||
240 | if (clk == PWM_CPR_CLKB) | ||
241 | pwm_writel(pwm, PWM_MR, mr & ~(0xffff << 16)); | ||
242 | spin_unlock_irqrestore(&pwm->lock, flags); | ||
243 | } | ||
244 | EXPORT_SYMBOL(pwm_clk_free); | ||
245 | |||
246 | /** | ||
247 | * pwm_channel_handler - manage channel's IRQ handler | ||
248 | * @ch: the channel | ||
249 | * @handler: the handler to use, possibly NULL | ||
250 | * | ||
251 | * If the handler is non-null, the handler will be called after every | ||
252 | * period of this PWM channel. If the handler is null, this channel | ||
253 | * won't generate an IRQ. | ||
254 | */ | ||
255 | int pwm_channel_handler(struct pwm_channel *ch, | ||
256 | void (*handler)(struct pwm_channel *ch)) | ||
257 | { | ||
258 | unsigned long flags; | ||
259 | int t; | ||
260 | |||
261 | spin_lock_irqsave(&pwm->lock, flags); | ||
262 | t = pwmcheck(ch); | ||
263 | if (t >= 0) { | ||
264 | pwm->handler[t] = handler; | ||
265 | pwm_writel(pwm, handler ? PWM_IER : PWM_IDR, 1 << t); | ||
266 | t = 0; | ||
267 | } | ||
268 | spin_unlock_irqrestore(&pwm->lock, flags); | ||
269 | |||
270 | return t; | ||
271 | } | ||
272 | EXPORT_SYMBOL(pwm_channel_handler); | ||
273 | |||
274 | static irqreturn_t pwm_irq(int id, void *_pwm) | ||
275 | { | ||
276 | struct pwm *p = _pwm; | ||
277 | irqreturn_t handled = IRQ_NONE; | ||
278 | u32 irqstat; | ||
279 | int index; | ||
280 | |||
281 | spin_lock(&p->lock); | ||
282 | |||
283 | /* ack irqs, then handle them */ | ||
284 | irqstat = pwm_readl(pwm, PWM_ISR); | ||
285 | |||
286 | while (irqstat) { | ||
287 | struct pwm_channel *ch; | ||
288 | void (*handler)(struct pwm_channel *ch); | ||
289 | |||
290 | index = ffs(irqstat) - 1; | ||
291 | irqstat &= ~(1 << index); | ||
292 | ch = pwm->channel[index]; | ||
293 | handler = pwm->handler[index]; | ||
294 | if (handler && ch) { | ||
295 | spin_unlock(&p->lock); | ||
296 | handler(ch); | ||
297 | spin_lock(&p->lock); | ||
298 | handled = IRQ_HANDLED; | ||
299 | } | ||
300 | } | ||
301 | |||
302 | spin_unlock(&p->lock); | ||
303 | return handled; | ||
304 | } | ||
305 | |||
306 | static int __init pwm_probe(struct platform_device *pdev) | ||
307 | { | ||
308 | struct resource *r = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
309 | int irq = platform_get_irq(pdev, 0); | ||
310 | u32 *mp = pdev->dev.platform_data; | ||
311 | struct pwm *p; | ||
312 | int status = -EIO; | ||
313 | |||
314 | if (pwm) | ||
315 | return -EBUSY; | ||
316 | if (!r || irq < 0 || !mp || !*mp) | ||
317 | return -ENODEV; | ||
318 | if (*mp & ~((1<<PWM_NCHAN)-1)) { | ||
319 | dev_warn(&pdev->dev, "mask 0x%x ... more than %d channels\n", | ||
320 | *mp, PWM_NCHAN); | ||
321 | return -EINVAL; | ||
322 | } | ||
323 | |||
324 | p = kzalloc(sizeof(*p), GFP_KERNEL); | ||
325 | if (!p) | ||
326 | return -ENOMEM; | ||
327 | |||
328 | spin_lock_init(&p->lock); | ||
329 | p->pdev = pdev; | ||
330 | p->mask = *mp; | ||
331 | p->irq = irq; | ||
332 | p->base = ioremap(r->start, r->end - r->start + 1); | ||
333 | if (!p->base) | ||
334 | goto fail; | ||
335 | p->clk = clk_get(&pdev->dev, "mck"); | ||
336 | if (IS_ERR(p->clk)) { | ||
337 | status = PTR_ERR(p->clk); | ||
338 | p->clk = NULL; | ||
339 | goto fail; | ||
340 | } | ||
341 | |||
342 | status = request_irq(irq, pwm_irq, 0, pdev->name, p); | ||
343 | if (status < 0) | ||
344 | goto fail; | ||
345 | |||
346 | pwm = p; | ||
347 | platform_set_drvdata(pdev, p); | ||
348 | |||
349 | return 0; | ||
350 | |||
351 | fail: | ||
352 | if (p->clk) | ||
353 | clk_put(p->clk); | ||
354 | if (p->base) | ||
355 | iounmap(p->base); | ||
356 | |||
357 | kfree(p); | ||
358 | return status; | ||
359 | } | ||
360 | |||
361 | static int __exit pwm_remove(struct platform_device *pdev) | ||
362 | { | ||
363 | struct pwm *p = platform_get_drvdata(pdev); | ||
364 | |||
365 | if (p != pwm) | ||
366 | return -EINVAL; | ||
367 | |||
368 | clk_enable(pwm->clk); | ||
369 | pwm_writel(pwm, PWM_DIS, (1 << PWM_NCHAN) - 1); | ||
370 | pwm_writel(pwm, PWM_IDR, (1 << PWM_NCHAN) - 1); | ||
371 | clk_disable(pwm->clk); | ||
372 | |||
373 | pwm = NULL; | ||
374 | |||
375 | free_irq(p->irq, p); | ||
376 | clk_put(p->clk); | ||
377 | iounmap(p->base); | ||
378 | kfree(p); | ||
379 | |||
380 | return 0; | ||
381 | } | ||
382 | |||
383 | static struct platform_driver atmel_pwm_driver = { | ||
384 | .driver = { | ||
385 | .name = "atmel_pwm", | ||
386 | .owner = THIS_MODULE, | ||
387 | }, | ||
388 | .remove = __exit_p(pwm_remove), | ||
389 | |||
390 | /* NOTE: PWM can keep running in AVR32 "idle" and "frozen" states; | ||
391 | * and all AT91sam9263 states, albeit at reduced clock rate if | ||
392 | * MCK becomes the slow clock (i.e. what Linux labels STR). | ||
393 | */ | ||
394 | }; | ||
395 | |||
396 | static int __init pwm_init(void) | ||
397 | { | ||
398 | return platform_driver_probe(&atmel_pwm_driver, pwm_probe); | ||
399 | } | ||
400 | module_init(pwm_init); | ||
401 | |||
402 | static void __exit pwm_exit(void) | ||
403 | { | ||
404 | platform_driver_unregister(&atmel_pwm_driver); | ||
405 | } | ||
406 | module_exit(pwm_exit); | ||
407 | |||
408 | MODULE_DESCRIPTION("Driver for AT32/AT91 PWM module"); | ||
409 | MODULE_LICENSE("GPL"); | ||
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index 9cc25fd80b60..50c2b60e1fee 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig | |||
@@ -879,7 +879,8 @@ config SMC91X | |||
879 | tristate "SMC 91C9x/91C1xxx support" | 879 | tristate "SMC 91C9x/91C1xxx support" |
880 | select CRC32 | 880 | select CRC32 |
881 | select MII | 881 | select MII |
882 | depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || SOC_AU1X00 || BLACKFIN | 882 | depends on ARM || REDWOOD_5 || REDWOOD_6 || M32R || SUPERH || \ |
883 | SOC_AU1X00 || BLACKFIN || MN10300 | ||
883 | help | 884 | help |
884 | This is a driver for SMC's 91x series of Ethernet chipsets, | 885 | This is a driver for SMC's 91x series of Ethernet chipsets, |
885 | including the SMC91C94 and the SMC91C111. Say Y if you want it | 886 | including the SMC91C94 and the SMC91C111. Say Y if you want it |
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index 917b7b46f1a7..65d0a9103297 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c | |||
@@ -1,221 +1,10 @@ | |||
1 | /* $Id: ethernet.c,v 1.31 2004/10/18 14:49:03 starvik Exp $ | 1 | /* |
2 | * | ||
3 | * e100net.c: A network driver for the ETRAX 100LX network controller. | 2 | * e100net.c: A network driver for the ETRAX 100LX network controller. |
4 | * | 3 | * |
5 | * Copyright (c) 1998-2002 Axis Communications AB. | 4 | * Copyright (c) 1998-2002 Axis Communications AB. |
6 | * | 5 | * |
7 | * The outline of this driver comes from skeleton.c. | 6 | * The outline of this driver comes from skeleton.c. |
8 | * | 7 | * |
9 | * $Log: ethernet.c,v $ | ||
10 | * Revision 1.31 2004/10/18 14:49:03 starvik | ||
11 | * Use RX interrupt as random source | ||
12 | * | ||
13 | * Revision 1.30 2004/09/29 10:44:04 starvik | ||
14 | * Enabed MAC-address output again | ||
15 | * | ||
16 | * Revision 1.29 2004/08/24 07:14:05 starvik | ||
17 | * Make use of generic MDIO interface and constants. | ||
18 | * | ||
19 | * Revision 1.28 2004/08/20 09:37:11 starvik | ||
20 | * Added support for Intel LXT972A. Creds to Randy Scarborough. | ||
21 | * | ||
22 | * Revision 1.27 2004/08/16 12:37:22 starvik | ||
23 | * Merge of Linux 2.6.8 | ||
24 | * | ||
25 | * Revision 1.25 2004/06/21 10:29:57 starvik | ||
26 | * Merge of Linux 2.6.7 | ||
27 | * | ||
28 | * Revision 1.23 2004/06/09 05:29:22 starvik | ||
29 | * Avoid any race where R_DMA_CH1_FIRST is NULL (may trigger cache bug). | ||
30 | * | ||
31 | * Revision 1.22 2004/05/14 07:58:03 starvik | ||
32 | * Merge of changes from 2.4 | ||
33 | * | ||
34 | * Revision 1.20 2004/03/11 11:38:40 starvik | ||
35 | * Merge of Linux 2.6.4 | ||
36 | * | ||
37 | * Revision 1.18 2003/12/03 13:45:46 starvik | ||
38 | * Use hardware pad for short packets to prevent information leakage. | ||
39 | * | ||
40 | * Revision 1.17 2003/07/04 08:27:37 starvik | ||
41 | * Merge of Linux 2.5.74 | ||
42 | * | ||
43 | * Revision 1.16 2003/04/24 08:28:22 starvik | ||
44 | * New LED behaviour: LED off when no link | ||
45 | * | ||
46 | * Revision 1.15 2003/04/09 05:20:47 starvik | ||
47 | * Merge of Linux 2.5.67 | ||
48 | * | ||
49 | * Revision 1.13 2003/03/06 16:11:01 henriken | ||
50 | * Off by one error in group address register setting. | ||
51 | * | ||
52 | * Revision 1.12 2003/02/27 17:24:19 starvik | ||
53 | * Corrected Rev to Revision | ||
54 | * | ||
55 | * Revision 1.11 2003/01/24 09:53:21 starvik | ||
56 | * Oops. Initialize GA to 0, not to 1 | ||
57 | * | ||
58 | * Revision 1.10 2003/01/24 09:50:55 starvik | ||
59 | * Initialize GA_0 and GA_1 to 0 to avoid matching of unwanted packets | ||
60 | * | ||
61 | * Revision 1.9 2002/12/13 07:40:58 starvik | ||
62 | * Added basic ethtool interface | ||
63 | * Handled out of memory when allocating new buffers | ||
64 | * | ||
65 | * Revision 1.8 2002/12/11 13:13:57 starvik | ||
66 | * Added arch/ to v10 specific includes | ||
67 | * Added fix from Linux 2.4 in serial.c (flush_to_flip_buffer) | ||
68 | * | ||
69 | * Revision 1.7 2002/11/26 09:41:42 starvik | ||
70 | * Added e100_set_config (standard interface to set media type) | ||
71 | * Added protection against preemptive scheduling | ||
72 | * Added standard MII ioctls | ||
73 | * | ||
74 | * Revision 1.6 2002/11/21 07:18:18 starvik | ||
75 | * Timers must be initialized in 2.5.48 | ||
76 | * | ||
77 | * Revision 1.5 2002/11/20 11:56:11 starvik | ||
78 | * Merge of Linux 2.5.48 | ||
79 | * | ||
80 | * Revision 1.4 2002/11/18 07:26:46 starvik | ||
81 | * Linux 2.5 port of latest Linux 2.4 ethernet driver | ||
82 | * | ||
83 | * Revision 1.33 2002/10/02 20:16:17 hp | ||
84 | * SETF, SETS: Use underscored IO_x_ macros rather than incorrect token concatenation | ||
85 | * | ||
86 | * Revision 1.32 2002/09/16 06:05:58 starvik | ||
87 | * Align memory returned by dev_alloc_skb | ||
88 | * Moved handling of sent packets to interrupt to avoid reference counting problem | ||
89 | * | ||
90 | * Revision 1.31 2002/09/10 13:28:23 larsv | ||
91 | * Return -EINVAL for unknown ioctls to avoid confusing tools that tests | ||
92 | * for supported functionality by issuing special ioctls, i.e. wireless | ||
93 | * extensions. | ||
94 | * | ||
95 | * Revision 1.30 2002/05/07 18:50:08 johana | ||
96 | * Correct spelling in comments. | ||
97 | * | ||
98 | * Revision 1.29 2002/05/06 05:38:49 starvik | ||
99 | * Performance improvements: | ||
100 | * Large packets are not copied (breakpoint set to 256 bytes) | ||
101 | * The cache bug workaround is delayed until half of the receive list | ||
102 | * has been used | ||
103 | * Added transmit list | ||
104 | * Transmit interrupts are only enabled when transmit queue is full | ||
105 | * | ||
106 | * Revision 1.28.2.1 2002/04/30 08:15:51 starvik | ||
107 | * Performance improvements: | ||
108 | * Large packets are not copied (breakpoint set to 256 bytes) | ||
109 | * The cache bug workaround is delayed until half of the receive list | ||
110 | * has been used. | ||
111 | * Added transmit list | ||
112 | * Transmit interrupts are only enabled when transmit queue is full | ||
113 | * | ||
114 | * Revision 1.28 2002/04/22 11:47:21 johana | ||
115 | * Fix according to 2.4.19-pre7. time_after/time_before and | ||
116 | * missing end of comment. | ||
117 | * The patch has a typo for ethernet.c in e100_clear_network_leds(), | ||
118 | * that is fixed here. | ||
119 | * | ||
120 | * Revision 1.27 2002/04/12 11:55:11 bjornw | ||
121 | * Added TODO | ||
122 | * | ||
123 | * Revision 1.26 2002/03/15 17:11:02 bjornw | ||
124 | * Use prepare_rx_descriptor after the CPU has touched the receiving descs | ||
125 | * | ||
126 | * Revision 1.25 2002/03/08 13:07:53 bjornw | ||
127 | * Unnecessary spinlock removed | ||
128 | * | ||
129 | * Revision 1.24 2002/02/20 12:57:43 fredriks | ||
130 | * Replaced MIN() with min(). | ||
131 | * | ||
132 | * Revision 1.23 2002/02/20 10:58:14 fredriks | ||
133 | * Strip the Ethernet checksum (4 bytes) before forwarding a frame to upper layers. | ||
134 | * | ||
135 | * Revision 1.22 2002/01/30 07:48:22 matsfg | ||
136 | * Initiate R_NETWORK_TR_CTRL | ||
137 | * | ||
138 | * Revision 1.21 2001/11/23 11:54:49 starvik | ||
139 | * Added IFF_PROMISC and IFF_ALLMULTI handling in set_multicast_list | ||
140 | * Removed compiler warnings | ||
141 | * | ||
142 | * Revision 1.20 2001/11/12 19:26:00 pkj | ||
143 | * * Corrected e100_negotiate() to not assign half to current_duplex when | ||
144 | * it was supposed to compare them... | ||
145 | * * Cleaned up failure handling in e100_open(). | ||
146 | * * Fixed compiler warnings. | ||
147 | * | ||
148 | * Revision 1.19 2001/11/09 07:43:09 starvik | ||
149 | * Added full duplex support | ||
150 | * Added ioctl to set speed and duplex | ||
151 | * Clear LED timer only runs when LED is lit | ||
152 | * | ||
153 | * Revision 1.18 2001/10/03 14:40:43 jonashg | ||
154 | * Update rx_bytes counter. | ||
155 | * | ||
156 | * Revision 1.17 2001/06/11 12:43:46 olof | ||
157 | * Modified defines for network LED behavior | ||
158 | * | ||
159 | * Revision 1.16 2001/05/30 06:12:46 markusl | ||
160 | * TxDesc.next should not be set to NULL | ||
161 | * | ||
162 | * Revision 1.15 2001/05/29 10:27:04 markusl | ||
163 | * Updated after review remarks: | ||
164 | * +Use IO_EXTRACT | ||
165 | * +Handle underrun | ||
166 | * | ||
167 | * Revision 1.14 2001/05/29 09:20:14 jonashg | ||
168 | * Use driver name on printk output so one can tell which driver that complains. | ||
169 | * | ||
170 | * Revision 1.13 2001/05/09 12:35:59 johana | ||
171 | * Use DMA_NBR and IRQ_NBR defines from dma.h and irq.h | ||
172 | * | ||
173 | * Revision 1.12 2001/04/05 11:43:11 tobiasa | ||
174 | * Check dev before panic. | ||
175 | * | ||
176 | * Revision 1.11 2001/04/04 11:21:05 markusl | ||
177 | * Updated according to review remarks | ||
178 | * | ||
179 | * Revision 1.10 2001/03/26 16:03:06 bjornw | ||
180 | * Needs linux/config.h | ||
181 | * | ||
182 | * Revision 1.9 2001/03/19 14:47:48 pkj | ||
183 | * * Make sure there is always a pause after the network LEDs are | ||
184 | * changed so they will not look constantly lit during heavy traffic. | ||
185 | * * Always use HZ when setting times relative to jiffies. | ||
186 | * * Use LED_NETWORK_SET() when setting the network LEDs. | ||
187 | * | ||
188 | * Revision 1.8 2001/02/27 13:52:48 bjornw | ||
189 | * malloc.h -> slab.h | ||
190 | * | ||
191 | * Revision 1.7 2001/02/23 13:46:38 bjornw | ||
192 | * Spellling check | ||
193 | * | ||
194 | * Revision 1.6 2001/01/26 15:21:04 starvik | ||
195 | * Don't disable interrupts while reading MDIO registers (MDIO is slow) | ||
196 | * Corrected promiscuous mode | ||
197 | * Improved deallocation of IRQs ("ifconfig eth0 down" now works) | ||
198 | * | ||
199 | * Revision 1.5 2000/11/29 17:22:22 bjornw | ||
200 | * Get rid of the udword types legacy stuff | ||
201 | * | ||
202 | * Revision 1.4 2000/11/22 16:36:09 bjornw | ||
203 | * Please marketing by using the correct case when spelling Etrax. | ||
204 | * | ||
205 | * Revision 1.3 2000/11/21 16:43:04 bjornw | ||
206 | * Minor short->int change | ||
207 | * | ||
208 | * Revision 1.2 2000/11/08 14:27:57 bjornw | ||
209 | * 2.4 port | ||
210 | * | ||
211 | * Revision 1.1 2000/11/06 13:56:00 bjornw | ||
212 | * Verbatim copy of the 1.24 version of e100net.c from elinux | ||
213 | * | ||
214 | * Revision 1.24 2000/10/04 15:55:23 bjornw | ||
215 | * * Use virt_to_phys etc. for DMA addresses | ||
216 | * * Removed bogus CHECKSUM_UNNECESSARY | ||
217 | * | ||
218 | * | ||
219 | */ | 8 | */ |
220 | 9 | ||
221 | 10 | ||
@@ -244,7 +33,7 @@ | |||
244 | #include <linux/ethtool.h> | 33 | #include <linux/ethtool.h> |
245 | 34 | ||
246 | #include <asm/arch/svinto.h>/* DMA and register descriptions */ | 35 | #include <asm/arch/svinto.h>/* DMA and register descriptions */ |
247 | #include <asm/io.h> /* LED_* I/O functions */ | 36 | #include <asm/io.h> /* CRIS_LED_* I/O functions */ |
248 | #include <asm/irq.h> | 37 | #include <asm/irq.h> |
249 | #include <asm/dma.h> | 38 | #include <asm/dma.h> |
250 | #include <asm/system.h> | 39 | #include <asm/system.h> |
@@ -1899,18 +1688,18 @@ e100_set_network_leds(int active) | |||
1899 | if (!current_speed) { | 1688 | if (!current_speed) { |
1900 | /* Make LED red, link is down */ | 1689 | /* Make LED red, link is down */ |
1901 | #if defined(CONFIG_ETRAX_NETWORK_RED_ON_NO_CONNECTION) | 1690 | #if defined(CONFIG_ETRAX_NETWORK_RED_ON_NO_CONNECTION) |
1902 | LED_NETWORK_SET(LED_RED); | 1691 | CRIS_LED_NETWORK_SET(CRIS_LED_RED); |
1903 | #else | 1692 | #else |
1904 | LED_NETWORK_SET(LED_OFF); | 1693 | CRIS_LED_NETWORK_SET(CRIS_LED_OFF); |
1905 | #endif | 1694 | #endif |
1906 | } else if (light_leds) { | 1695 | } else if (light_leds) { |
1907 | if (current_speed == 10) { | 1696 | if (current_speed == 10) { |
1908 | LED_NETWORK_SET(LED_ORANGE); | 1697 | CRIS_LED_NETWORK_SET(CRIS_LED_ORANGE); |
1909 | } else { | 1698 | } else { |
1910 | LED_NETWORK_SET(LED_GREEN); | 1699 | CRIS_LED_NETWORK_SET(CRIS_LED_GREEN); |
1911 | } | 1700 | } |
1912 | } else { | 1701 | } else { |
1913 | LED_NETWORK_SET(LED_OFF); | 1702 | CRIS_LED_NETWORK_SET(CRIS_LED_OFF); |
1914 | } | 1703 | } |
1915 | } | 1704 | } |
1916 | 1705 | ||
diff --git a/drivers/net/mlx4/alloc.c b/drivers/net/mlx4/alloc.c index b226e019bc8b..521dc0322ee4 100644 --- a/drivers/net/mlx4/alloc.c +++ b/drivers/net/mlx4/alloc.c | |||
@@ -116,40 +116,53 @@ int mlx4_buf_alloc(struct mlx4_dev *dev, int size, int max_direct, | |||
116 | buf->nbufs = 1; | 116 | buf->nbufs = 1; |
117 | buf->npages = 1; | 117 | buf->npages = 1; |
118 | buf->page_shift = get_order(size) + PAGE_SHIFT; | 118 | buf->page_shift = get_order(size) + PAGE_SHIFT; |
119 | buf->u.direct.buf = dma_alloc_coherent(&dev->pdev->dev, | 119 | buf->direct.buf = dma_alloc_coherent(&dev->pdev->dev, |
120 | size, &t, GFP_KERNEL); | 120 | size, &t, GFP_KERNEL); |
121 | if (!buf->u.direct.buf) | 121 | if (!buf->direct.buf) |
122 | return -ENOMEM; | 122 | return -ENOMEM; |
123 | 123 | ||
124 | buf->u.direct.map = t; | 124 | buf->direct.map = t; |
125 | 125 | ||
126 | while (t & ((1 << buf->page_shift) - 1)) { | 126 | while (t & ((1 << buf->page_shift) - 1)) { |
127 | --buf->page_shift; | 127 | --buf->page_shift; |
128 | buf->npages *= 2; | 128 | buf->npages *= 2; |
129 | } | 129 | } |
130 | 130 | ||
131 | memset(buf->u.direct.buf, 0, size); | 131 | memset(buf->direct.buf, 0, size); |
132 | } else { | 132 | } else { |
133 | int i; | 133 | int i; |
134 | 134 | ||
135 | buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; | 135 | buf->nbufs = (size + PAGE_SIZE - 1) / PAGE_SIZE; |
136 | buf->npages = buf->nbufs; | 136 | buf->npages = buf->nbufs; |
137 | buf->page_shift = PAGE_SHIFT; | 137 | buf->page_shift = PAGE_SHIFT; |
138 | buf->u.page_list = kzalloc(buf->nbufs * sizeof *buf->u.page_list, | 138 | buf->page_list = kzalloc(buf->nbufs * sizeof *buf->page_list, |
139 | GFP_KERNEL); | 139 | GFP_KERNEL); |
140 | if (!buf->u.page_list) | 140 | if (!buf->page_list) |
141 | return -ENOMEM; | 141 | return -ENOMEM; |
142 | 142 | ||
143 | for (i = 0; i < buf->nbufs; ++i) { | 143 | for (i = 0; i < buf->nbufs; ++i) { |
144 | buf->u.page_list[i].buf = | 144 | buf->page_list[i].buf = |
145 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, | 145 | dma_alloc_coherent(&dev->pdev->dev, PAGE_SIZE, |
146 | &t, GFP_KERNEL); | 146 | &t, GFP_KERNEL); |
147 | if (!buf->u.page_list[i].buf) | 147 | if (!buf->page_list[i].buf) |
148 | goto err_free; | 148 | goto err_free; |
149 | 149 | ||
150 | buf->u.page_list[i].map = t; | 150 | buf->page_list[i].map = t; |
151 | 151 | ||
152 | memset(buf->u.page_list[i].buf, 0, PAGE_SIZE); | 152 | memset(buf->page_list[i].buf, 0, PAGE_SIZE); |
153 | } | ||
154 | |||
155 | if (BITS_PER_LONG == 64) { | ||
156 | struct page **pages; | ||
157 | pages = kmalloc(sizeof *pages * buf->nbufs, GFP_KERNEL); | ||
158 | if (!pages) | ||
159 | goto err_free; | ||
160 | for (i = 0; i < buf->nbufs; ++i) | ||
161 | pages[i] = virt_to_page(buf->page_list[i].buf); | ||
162 | buf->direct.buf = vmap(pages, buf->nbufs, VM_MAP, PAGE_KERNEL); | ||
163 | kfree(pages); | ||
164 | if (!buf->direct.buf) | ||
165 | goto err_free; | ||
153 | } | 166 | } |
154 | } | 167 | } |
155 | 168 | ||
@@ -167,15 +180,18 @@ void mlx4_buf_free(struct mlx4_dev *dev, int size, struct mlx4_buf *buf) | |||
167 | int i; | 180 | int i; |
168 | 181 | ||
169 | if (buf->nbufs == 1) | 182 | if (buf->nbufs == 1) |
170 | dma_free_coherent(&dev->pdev->dev, size, buf->u.direct.buf, | 183 | dma_free_coherent(&dev->pdev->dev, size, buf->direct.buf, |
171 | buf->u.direct.map); | 184 | buf->direct.map); |
172 | else { | 185 | else { |
186 | if (BITS_PER_LONG == 64) | ||
187 | vunmap(buf->direct.buf); | ||
188 | |||
173 | for (i = 0; i < buf->nbufs; ++i) | 189 | for (i = 0; i < buf->nbufs; ++i) |
174 | if (buf->u.page_list[i].buf) | 190 | if (buf->page_list[i].buf) |
175 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, | 191 | dma_free_coherent(&dev->pdev->dev, PAGE_SIZE, |
176 | buf->u.page_list[i].buf, | 192 | buf->page_list[i].buf, |
177 | buf->u.page_list[i].map); | 193 | buf->page_list[i].map); |
178 | kfree(buf->u.page_list); | 194 | kfree(buf->page_list); |
179 | } | 195 | } |
180 | } | 196 | } |
181 | EXPORT_SYMBOL_GPL(mlx4_buf_free); | 197 | EXPORT_SYMBOL_GPL(mlx4_buf_free); |
diff --git a/drivers/net/mlx4/mr.c b/drivers/net/mlx4/mr.c index 9c9e308d0917..679dfdb6807f 100644 --- a/drivers/net/mlx4/mr.c +++ b/drivers/net/mlx4/mr.c | |||
@@ -419,9 +419,9 @@ int mlx4_buf_write_mtt(struct mlx4_dev *dev, struct mlx4_mtt *mtt, | |||
419 | 419 | ||
420 | for (i = 0; i < buf->npages; ++i) | 420 | for (i = 0; i < buf->npages; ++i) |
421 | if (buf->nbufs == 1) | 421 | if (buf->nbufs == 1) |
422 | page_list[i] = buf->u.direct.map + (i << buf->page_shift); | 422 | page_list[i] = buf->direct.map + (i << buf->page_shift); |
423 | else | 423 | else |
424 | page_list[i] = buf->u.page_list[i].map; | 424 | page_list[i] = buf->page_list[i].map; |
425 | 425 | ||
426 | err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); | 426 | err = mlx4_write_mtt(dev, mtt, 0, buf->npages, page_list); |
427 | 427 | ||
diff --git a/drivers/net/smc91x.h b/drivers/net/smc91x.h index 271c28dc9baa..51d4134b37b1 100644 --- a/drivers/net/smc91x.h +++ b/drivers/net/smc91x.h | |||
@@ -450,8 +450,20 @@ static inline void LPD7_SMC_outsw (unsigned char* a, int r, | |||
450 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) | 450 | #define SMC_outsl(a, r, p, l) writesl((a) + (r), p, l) |
451 | #define SMC_IRQ_FLAGS (-1) /* from resource */ | 451 | #define SMC_IRQ_FLAGS (-1) /* from resource */ |
452 | 452 | ||
453 | #elif defined(CONFIG_MN10300) | ||
454 | |||
455 | /* | ||
456 | * MN10300/AM33 configuration | ||
457 | */ | ||
458 | |||
459 | #include <asm/unit/smc91111.h> | ||
460 | |||
453 | #else | 461 | #else |
454 | 462 | ||
463 | /* | ||
464 | * Default configuration | ||
465 | */ | ||
466 | |||
455 | #define SMC_CAN_USE_8BIT 1 | 467 | #define SMC_CAN_USE_8BIT 1 |
456 | #define SMC_CAN_USE_16BIT 1 | 468 | #define SMC_CAN_USE_16BIT 1 |
457 | #define SMC_CAN_USE_32BIT 1 | 469 | #define SMC_CAN_USE_32BIT 1 |
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c index 1e89d4de1bb7..5e2d763c6b5f 100644 --- a/drivers/net/wan/x25_asy.c +++ b/drivers/net/wan/x25_asy.c | |||
@@ -554,6 +554,7 @@ static void x25_asy_receive_buf(struct tty_struct *tty, const unsigned char *cp, | |||
554 | static int x25_asy_open_tty(struct tty_struct *tty) | 554 | static int x25_asy_open_tty(struct tty_struct *tty) |
555 | { | 555 | { |
556 | struct x25_asy *sl = (struct x25_asy *) tty->disc_data; | 556 | struct x25_asy *sl = (struct x25_asy *) tty->disc_data; |
557 | struct tty_ldisc *ld; | ||
557 | int err; | 558 | int err; |
558 | 559 | ||
559 | /* First make sure we're not already connected. */ | 560 | /* First make sure we're not already connected. */ |
@@ -572,9 +573,7 @@ static int x25_asy_open_tty(struct tty_struct *tty) | |||
572 | if (tty->driver->flush_buffer) { | 573 | if (tty->driver->flush_buffer) { |
573 | tty->driver->flush_buffer(tty); | 574 | tty->driver->flush_buffer(tty); |
574 | } | 575 | } |
575 | if (tty->ldisc.flush_buffer) { | 576 | tty_ldisc_flush(tty); |
576 | tty->ldisc.flush_buffer(tty); | ||
577 | } | ||
578 | 577 | ||
579 | /* Restore default settings */ | 578 | /* Restore default settings */ |
580 | sl->dev->type = ARPHRD_X25; | 579 | sl->dev->type = ARPHRD_X25; |
diff --git a/drivers/parport/Kconfig b/drivers/parport/Kconfig index d449b150930e..b7bcdcc5c724 100644 --- a/drivers/parport/Kconfig +++ b/drivers/parport/Kconfig | |||
@@ -35,7 +35,8 @@ if PARPORT | |||
35 | 35 | ||
36 | config PARPORT_PC | 36 | config PARPORT_PC |
37 | tristate "PC-style hardware" | 37 | tristate "PC-style hardware" |
38 | depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && (!M68K || ISA) | 38 | depends on (!SPARC64 || PCI) && !SPARC32 && !M32R && !FRV && \ |
39 | (!M68K || ISA) && !MN10300 | ||
39 | ---help--- | 40 | ---help--- |
40 | You should say Y here if you have a PC-style parallel port. All | 41 | You should say Y here if you have a PC-style parallel port. All |
41 | IBM PC compatible computers and some Alphas have PC-style | 42 | IBM PC compatible computers and some Alphas have PC-style |
diff --git a/drivers/parport/ieee1284_ops.c b/drivers/parport/ieee1284_ops.c index 525312f2fe9c..2e21af43d91e 100644 --- a/drivers/parport/ieee1284_ops.c +++ b/drivers/parport/ieee1284_ops.c | |||
@@ -888,7 +888,7 @@ size_t parport_ieee1284_epp_read_addr (struct parport *port, | |||
888 | 888 | ||
889 | /* Event 59: set nSelectIn (nAStrb) high */ | 889 | /* Event 59: set nSelectIn (nAStrb) high */ |
890 | parport_frob_control (port, PARPORT_CONTROL_SELECT, | 890 | parport_frob_control (port, PARPORT_CONTROL_SELECT, |
891 | PARPORT_CONTROL_SELECT); | 891 | 0); |
892 | 892 | ||
893 | /* Event 60: wait for Busy to go low */ | 893 | /* Event 60: wait for Busy to go low */ |
894 | if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, | 894 | if (parport_poll_peripheral (port, PARPORT_STATUS_BUSY, |
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 9f04d17576d6..4d1ce2e7361e 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile | |||
@@ -38,6 +38,7 @@ obj-$(CONFIG_PPC32) += setup-irq.o | |||
38 | obj-$(CONFIG_PPC) += setup-bus.o | 38 | obj-$(CONFIG_PPC) += setup-bus.o |
39 | obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o | 39 | obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o |
40 | obj-$(CONFIG_X86_VISWS) += setup-irq.o | 40 | obj-$(CONFIG_X86_VISWS) += setup-irq.o |
41 | obj-$(CONFIG_MN10300) += setup-bus.o | ||
41 | 42 | ||
42 | # | 43 | # |
43 | # ACPI Related PCI FW Functions | 44 | # ACPI Related PCI FW Functions |
diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index 31fa6c92aa5e..a4c3089f892a 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c | |||
@@ -692,6 +692,23 @@ static int iommu_flush_iotlb_psi(struct intel_iommu *iommu, u16 did, | |||
692 | DMA_TLB_PSI_FLUSH, non_present_entry_flush); | 692 | DMA_TLB_PSI_FLUSH, non_present_entry_flush); |
693 | } | 693 | } |
694 | 694 | ||
695 | static void iommu_disable_protect_mem_regions(struct intel_iommu *iommu) | ||
696 | { | ||
697 | u32 pmen; | ||
698 | unsigned long flags; | ||
699 | |||
700 | spin_lock_irqsave(&iommu->register_lock, flags); | ||
701 | pmen = readl(iommu->reg + DMAR_PMEN_REG); | ||
702 | pmen &= ~DMA_PMEN_EPM; | ||
703 | writel(pmen, iommu->reg + DMAR_PMEN_REG); | ||
704 | |||
705 | /* wait for the protected region status bit to clear */ | ||
706 | IOMMU_WAIT_OP(iommu, DMAR_PMEN_REG, | ||
707 | readl, !(pmen & DMA_PMEN_PRS), pmen); | ||
708 | |||
709 | spin_unlock_irqrestore(&iommu->register_lock, flags); | ||
710 | } | ||
711 | |||
695 | static int iommu_enable_translation(struct intel_iommu *iommu) | 712 | static int iommu_enable_translation(struct intel_iommu *iommu) |
696 | { | 713 | { |
697 | u32 sts; | 714 | u32 sts; |
@@ -728,7 +745,7 @@ static int iommu_disable_translation(struct intel_iommu *iommu) | |||
728 | 745 | ||
729 | /* iommu interrupt handling. Most stuff are MSI-like. */ | 746 | /* iommu interrupt handling. Most stuff are MSI-like. */ |
730 | 747 | ||
731 | static char *fault_reason_strings[] = | 748 | static const char *fault_reason_strings[] = |
732 | { | 749 | { |
733 | "Software", | 750 | "Software", |
734 | "Present bit in root entry is clear", | 751 | "Present bit in root entry is clear", |
@@ -743,14 +760,13 @@ static char *fault_reason_strings[] = | |||
743 | "non-zero reserved fields in RTP", | 760 | "non-zero reserved fields in RTP", |
744 | "non-zero reserved fields in CTP", | 761 | "non-zero reserved fields in CTP", |
745 | "non-zero reserved fields in PTE", | 762 | "non-zero reserved fields in PTE", |
746 | "Unknown" | ||
747 | }; | 763 | }; |
748 | #define MAX_FAULT_REASON_IDX ARRAY_SIZE(fault_reason_strings) - 1 | 764 | #define MAX_FAULT_REASON_IDX (ARRAY_SIZE(fault_reason_strings) - 1) |
749 | 765 | ||
750 | char *dmar_get_fault_reason(u8 fault_reason) | 766 | const char *dmar_get_fault_reason(u8 fault_reason) |
751 | { | 767 | { |
752 | if (fault_reason >= MAX_FAULT_REASON_IDX) | 768 | if (fault_reason > MAX_FAULT_REASON_IDX) |
753 | return fault_reason_strings[MAX_FAULT_REASON_IDX - 1]; | 769 | return "Unknown"; |
754 | else | 770 | else |
755 | return fault_reason_strings[fault_reason]; | 771 | return fault_reason_strings[fault_reason]; |
756 | } | 772 | } |
@@ -808,7 +824,7 @@ void dmar_msi_read(int irq, struct msi_msg *msg) | |||
808 | static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, | 824 | static int iommu_page_fault_do_one(struct intel_iommu *iommu, int type, |
809 | u8 fault_reason, u16 source_id, u64 addr) | 825 | u8 fault_reason, u16 source_id, u64 addr) |
810 | { | 826 | { |
811 | char *reason; | 827 | const char *reason; |
812 | 828 | ||
813 | reason = dmar_get_fault_reason(fault_reason); | 829 | reason = dmar_get_fault_reason(fault_reason); |
814 | 830 | ||
@@ -1730,6 +1746,8 @@ int __init init_dmars(void) | |||
1730 | iommu_flush_context_global(iommu, 0); | 1746 | iommu_flush_context_global(iommu, 0); |
1731 | iommu_flush_iotlb_global(iommu, 0); | 1747 | iommu_flush_iotlb_global(iommu, 0); |
1732 | 1748 | ||
1749 | iommu_disable_protect_mem_regions(iommu); | ||
1750 | |||
1733 | ret = iommu_enable_translation(iommu); | 1751 | ret = iommu_enable_translation(iommu); |
1734 | if (ret) | 1752 | if (ret) |
1735 | goto error; | 1753 | goto error; |
diff --git a/drivers/pci/intel-iommu.h b/drivers/pci/intel-iommu.h index 0e4862675ad2..07f5f6353bda 100644 --- a/drivers/pci/intel-iommu.h +++ b/drivers/pci/intel-iommu.h | |||
@@ -140,6 +140,10 @@ static inline void dmar_writeq(void __iomem *addr, u64 val) | |||
140 | #define DMA_TLB_IH_NONLEAF (((u64)1) << 6) | 140 | #define DMA_TLB_IH_NONLEAF (((u64)1) << 6) |
141 | #define DMA_TLB_MAX_SIZE (0x3f) | 141 | #define DMA_TLB_MAX_SIZE (0x3f) |
142 | 142 | ||
143 | /* PMEN_REG */ | ||
144 | #define DMA_PMEN_EPM (((u32)1)<<31) | ||
145 | #define DMA_PMEN_PRS (((u32)1)<<0) | ||
146 | |||
143 | /* GCMD_REG */ | 147 | /* GCMD_REG */ |
144 | #define DMA_GCMD_TE (((u32)1) << 31) | 148 | #define DMA_GCMD_TE (((u32)1) << 31) |
145 | #define DMA_GCMD_SRTP (((u32)1) << 30) | 149 | #define DMA_GCMD_SRTP (((u32)1) << 30) |
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c index b8a4bd94f51d..77f7a7f0646e 100644 --- a/drivers/serial/8250.c +++ b/drivers/serial/8250.c | |||
@@ -305,7 +305,7 @@ static inline int map_8250_out_reg(struct uart_8250_port *up, int offset) | |||
305 | return au_io_out_map[offset]; | 305 | return au_io_out_map[offset]; |
306 | } | 306 | } |
307 | 307 | ||
308 | #elif defined (CONFIG_SERIAL_8250_RM9K) | 308 | #elif defined(CONFIG_SERIAL_8250_RM9K) |
309 | 309 | ||
310 | static const u8 | 310 | static const u8 |
311 | regmap_in[8] = { | 311 | regmap_in[8] = { |
@@ -475,7 +475,7 @@ static inline void _serial_dl_write(struct uart_8250_port *up, int value) | |||
475 | serial_outp(up, UART_DLM, value >> 8 & 0xff); | 475 | serial_outp(up, UART_DLM, value >> 8 & 0xff); |
476 | } | 476 | } |
477 | 477 | ||
478 | #if defined (CONFIG_SERIAL_8250_AU1X00) | 478 | #if defined(CONFIG_SERIAL_8250_AU1X00) |
479 | /* Au1x00 haven't got a standard divisor latch */ | 479 | /* Au1x00 haven't got a standard divisor latch */ |
480 | static int serial_dl_read(struct uart_8250_port *up) | 480 | static int serial_dl_read(struct uart_8250_port *up) |
481 | { | 481 | { |
@@ -492,7 +492,7 @@ static void serial_dl_write(struct uart_8250_port *up, int value) | |||
492 | else | 492 | else |
493 | _serial_dl_write(up, value); | 493 | _serial_dl_write(up, value); |
494 | } | 494 | } |
495 | #elif defined (CONFIG_SERIAL_8250_RM9K) | 495 | #elif defined(CONFIG_SERIAL_8250_RM9K) |
496 | static int serial_dl_read(struct uart_8250_port *up) | 496 | static int serial_dl_read(struct uart_8250_port *up) |
497 | { | 497 | { |
498 | return (up->port.iotype == UPIO_RM9000) ? | 498 | return (up->port.iotype == UPIO_RM9000) ? |
@@ -1185,8 +1185,8 @@ static void autoconfig_irq(struct uart_8250_port *up) | |||
1185 | 1185 | ||
1186 | irqs = probe_irq_on(); | 1186 | irqs = probe_irq_on(); |
1187 | serial_outp(up, UART_MCR, 0); | 1187 | serial_outp(up, UART_MCR, 0); |
1188 | udelay (10); | 1188 | udelay(10); |
1189 | if (up->port.flags & UPF_FOURPORT) { | 1189 | if (up->port.flags & UPF_FOURPORT) { |
1190 | serial_outp(up, UART_MCR, | 1190 | serial_outp(up, UART_MCR, |
1191 | UART_MCR_DTR | UART_MCR_RTS); | 1191 | UART_MCR_DTR | UART_MCR_RTS); |
1192 | } else { | 1192 | } else { |
@@ -1199,7 +1199,7 @@ static void autoconfig_irq(struct uart_8250_port *up) | |||
1199 | (void)serial_inp(up, UART_IIR); | 1199 | (void)serial_inp(up, UART_IIR); |
1200 | (void)serial_inp(up, UART_MSR); | 1200 | (void)serial_inp(up, UART_MSR); |
1201 | serial_outp(up, UART_TX, 0xFF); | 1201 | serial_outp(up, UART_TX, 0xFF); |
1202 | udelay (20); | 1202 | udelay(20); |
1203 | irq = probe_irq_off(irqs); | 1203 | irq = probe_irq_off(irqs); |
1204 | 1204 | ||
1205 | serial_outp(up, UART_MCR, save_mcr); | 1205 | serial_outp(up, UART_MCR, save_mcr); |
@@ -1343,7 +1343,7 @@ receive_chars(struct uart_8250_port *up, unsigned int *status) | |||
1343 | 1343 | ||
1344 | uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag); | 1344 | uart_insert_char(&up->port, lsr, UART_LSR_OE, ch, flag); |
1345 | 1345 | ||
1346 | ignore_char: | 1346 | ignore_char: |
1347 | lsr = serial_inp(up, UART_LSR); | 1347 | lsr = serial_inp(up, UART_LSR); |
1348 | } while ((lsr & UART_LSR_DR) && (max_count-- > 0)); | 1348 | } while ((lsr & UART_LSR_DR) && (max_count-- > 0)); |
1349 | spin_unlock(&up->port.lock); | 1349 | spin_unlock(&up->port.lock); |
@@ -1633,7 +1633,8 @@ static void serial8250_backup_timeout(unsigned long data) | |||
1633 | serial_out(up, UART_IER, ier); | 1633 | serial_out(up, UART_IER, ier); |
1634 | 1634 | ||
1635 | /* Standard timer interval plus 0.2s to keep the port running */ | 1635 | /* Standard timer interval plus 0.2s to keep the port running */ |
1636 | mod_timer(&up->timer, jiffies + poll_timeout(up->port.timeout) + HZ/5); | 1636 | mod_timer(&up->timer, |
1637 | jiffies + poll_timeout(up->port.timeout) + HZ / 5); | ||
1637 | } | 1638 | } |
1638 | 1639 | ||
1639 | static unsigned int serial8250_tx_empty(struct uart_port *port) | 1640 | static unsigned int serial8250_tx_empty(struct uart_port *port) |
@@ -1844,7 +1845,7 @@ static int serial8250_startup(struct uart_port *port) | |||
1844 | up->timer.function = serial8250_backup_timeout; | 1845 | up->timer.function = serial8250_backup_timeout; |
1845 | up->timer.data = (unsigned long)up; | 1846 | up->timer.data = (unsigned long)up; |
1846 | mod_timer(&up->timer, jiffies + | 1847 | mod_timer(&up->timer, jiffies + |
1847 | poll_timeout(up->port.timeout) + HZ/5); | 1848 | poll_timeout(up->port.timeout) + HZ / 5); |
1848 | } | 1849 | } |
1849 | } | 1850 | } |
1850 | 1851 | ||
@@ -2173,6 +2174,7 @@ serial8250_set_termios(struct uart_port *port, struct ktermios *termios, | |||
2173 | } | 2174 | } |
2174 | serial8250_set_mctrl(&up->port, up->port.mctrl); | 2175 | serial8250_set_mctrl(&up->port, up->port.mctrl); |
2175 | spin_unlock_irqrestore(&up->port.lock, flags); | 2176 | spin_unlock_irqrestore(&up->port.lock, flags); |
2177 | tty_termios_encode_baud_rate(termios, baud, baud); | ||
2176 | } | 2178 | } |
2177 | 2179 | ||
2178 | static void | 2180 | static void |
diff --git a/drivers/serial/8250_early.c b/drivers/serial/8250_early.c index 1f16de719962..38776e8b064b 100644 --- a/drivers/serial/8250_early.c +++ b/drivers/serial/8250_early.c | |||
@@ -82,7 +82,8 @@ static void __init serial_putc(struct uart_port *port, int c) | |||
82 | serial_out(port, UART_TX, c); | 82 | serial_out(port, UART_TX, c); |
83 | } | 83 | } |
84 | 84 | ||
85 | static void __init early_serial8250_write(struct console *console, const char *s, unsigned int count) | 85 | static void __init early_serial8250_write(struct console *console, |
86 | const char *s, unsigned int count) | ||
86 | { | 87 | { |
87 | struct uart_port *port = &early_device.port; | 88 | struct uart_port *port = &early_device.port; |
88 | unsigned int ier; | 89 | unsigned int ier; |
@@ -132,7 +133,8 @@ static void __init init_port(struct early_serial8250_device *device) | |||
132 | serial_out(port, UART_LCR, c & ~UART_LCR_DLAB); | 133 | serial_out(port, UART_LCR, c & ~UART_LCR_DLAB); |
133 | } | 134 | } |
134 | 135 | ||
135 | static int __init parse_options(struct early_serial8250_device *device, char *options) | 136 | static int __init parse_options(struct early_serial8250_device *device, |
137 | char *options) | ||
136 | { | 138 | { |
137 | struct uart_port *port = &device->port; | 139 | struct uart_port *port = &device->port; |
138 | int mmio, length; | 140 | int mmio, length; |
@@ -145,8 +147,10 @@ static int __init parse_options(struct early_serial8250_device *device, char *op | |||
145 | port->iotype = UPIO_MEM; | 147 | port->iotype = UPIO_MEM; |
146 | port->mapbase = simple_strtoul(options + 5, &options, 0); | 148 | port->mapbase = simple_strtoul(options + 5, &options, 0); |
147 | #ifdef CONFIG_FIX_EARLYCON_MEM | 149 | #ifdef CONFIG_FIX_EARLYCON_MEM |
148 | set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, port->mapbase & PAGE_MASK); | 150 | set_fixmap_nocache(FIX_EARLYCON_MEM_BASE, |
149 | port->membase = (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE); | 151 | port->mapbase & PAGE_MASK); |
152 | port->membase = | ||
153 | (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE); | ||
150 | port->membase += port->mapbase & ~PAGE_MASK; | 154 | port->membase += port->mapbase & ~PAGE_MASK; |
151 | #else | 155 | #else |
152 | port->membase = ioremap(port->mapbase, 64); | 156 | port->membase = ioremap(port->mapbase, 64); |
@@ -165,7 +169,8 @@ static int __init parse_options(struct early_serial8250_device *device, char *op | |||
165 | } else | 169 | } else |
166 | return -EINVAL; | 170 | return -EINVAL; |
167 | 171 | ||
168 | if ((options = strchr(options, ','))) { | 172 | options = strchr(options, ','); |
173 | if (options) { | ||
169 | options++; | 174 | options++; |
170 | device->baud = simple_strtoul(options, NULL, 0); | 175 | device->baud = simple_strtoul(options, NULL, 0); |
171 | length = min(strcspn(options, " "), sizeof(device->options)); | 176 | length = min(strcspn(options, " "), sizeof(device->options)); |
@@ -179,7 +184,7 @@ static int __init parse_options(struct early_serial8250_device *device, char *op | |||
179 | printk(KERN_INFO "Early serial console at %s 0x%llx (options '%s')\n", | 184 | printk(KERN_INFO "Early serial console at %s 0x%llx (options '%s')\n", |
180 | mmio ? "MMIO" : "I/O port", | 185 | mmio ? "MMIO" : "I/O port", |
181 | mmio ? (unsigned long long) port->mapbase | 186 | mmio ? (unsigned long long) port->mapbase |
182 | : (unsigned long long) port->iobase, | 187 | : (unsigned long long) port->iobase, |
183 | device->options); | 188 | device->options); |
184 | return 0; | 189 | return 0; |
185 | } | 190 | } |
@@ -199,7 +204,8 @@ static int __init early_serial8250_setup(char *options) | |||
199 | if (device->port.membase || device->port.iobase) | 204 | if (device->port.membase || device->port.iobase) |
200 | return 0; | 205 | return 0; |
201 | 206 | ||
202 | if ((err = parse_options(device, options)) < 0) | 207 | err = parse_options(device, options); |
208 | if (err < 0) | ||
203 | return err; | 209 | return err; |
204 | 210 | ||
205 | init_port(device); | 211 | init_port(device); |
@@ -219,7 +225,8 @@ int __init setup_early_serial8250_console(char *cmdline) | |||
219 | } | 225 | } |
220 | 226 | ||
221 | options = strchr(cmdline, ',') + 1; | 227 | options = strchr(cmdline, ',') + 1; |
222 | if ((err = early_serial8250_setup(options)) < 0) | 228 | err = early_serial8250_setup(options); |
229 | if (err < 0) | ||
223 | return err; | 230 | return err; |
224 | 231 | ||
225 | register_console(&early_serial8250_console); | 232 | register_console(&early_serial8250_console); |
diff --git a/drivers/serial/8250_gsc.c b/drivers/serial/8250_gsc.c index c5d0addfda4f..4eb7437a404a 100644 --- a/drivers/serial/8250_gsc.c +++ b/drivers/serial/8250_gsc.c | |||
@@ -25,8 +25,7 @@ | |||
25 | 25 | ||
26 | #include "8250.h" | 26 | #include "8250.h" |
27 | 27 | ||
28 | static int __init | 28 | static int __init serial_init_chip(struct parisc_device *dev) |
29 | serial_init_chip(struct parisc_device *dev) | ||
30 | { | 29 | { |
31 | struct uart_port port; | 30 | struct uart_port port; |
32 | unsigned long address; | 31 | unsigned long address; |
@@ -38,18 +37,17 @@ serial_init_chip(struct parisc_device *dev) | |||
38 | * what we have here is a missing parent device, so tell | 37 | * what we have here is a missing parent device, so tell |
39 | * the user what they're missing. | 38 | * the user what they're missing. |
40 | */ | 39 | */ |
41 | if (parisc_parent(dev)->id.hw_type != HPHW_IOA) { | 40 | if (parisc_parent(dev)->id.hw_type != HPHW_IOA) |
42 | printk(KERN_INFO "Serial: device 0x%lx not configured.\n" | 41 | printk(KERN_INFO |
42 | "Serial: device 0x%lx not configured.\n" | ||
43 | "Enable support for Wax, Lasi, Asp or Dino.\n", | 43 | "Enable support for Wax, Lasi, Asp or Dino.\n", |
44 | dev->hpa.start); | 44 | dev->hpa.start); |
45 | } | ||
46 | return -ENODEV; | 45 | return -ENODEV; |
47 | } | 46 | } |
48 | 47 | ||
49 | address = dev->hpa.start; | 48 | address = dev->hpa.start; |
50 | if (dev->id.sversion != 0x8d) { | 49 | if (dev->id.sversion != 0x8d) |
51 | address += 0x800; | 50 | address += 0x800; |
52 | } | ||
53 | 51 | ||
54 | memset(&port, 0, sizeof(port)); | 52 | memset(&port, 0, sizeof(port)); |
55 | port.iotype = UPIO_MEM; | 53 | port.iotype = UPIO_MEM; |
@@ -63,11 +61,12 @@ serial_init_chip(struct parisc_device *dev) | |||
63 | 61 | ||
64 | err = serial8250_register_port(&port); | 62 | err = serial8250_register_port(&port); |
65 | if (err < 0) { | 63 | if (err < 0) { |
66 | printk(KERN_WARNING "serial8250_register_port returned error %d\n", err); | 64 | printk(KERN_WARNING |
65 | "serial8250_register_port returned error %d\n", err); | ||
67 | iounmap(port.membase); | 66 | iounmap(port.membase); |
68 | return err; | 67 | return err; |
69 | } | 68 | } |
70 | 69 | ||
71 | return 0; | 70 | return 0; |
72 | } | 71 | } |
73 | 72 | ||
diff --git a/drivers/serial/8250_hp300.c b/drivers/serial/8250_hp300.c index 2cf0953fe0ec..0e1410f2c033 100644 --- a/drivers/serial/8250_hp300.c +++ b/drivers/serial/8250_hp300.c | |||
@@ -36,7 +36,7 @@ static struct hp300_port *hp300_ports; | |||
36 | #ifdef CONFIG_HPDCA | 36 | #ifdef CONFIG_HPDCA |
37 | 37 | ||
38 | static int __devinit hpdca_init_one(struct dio_dev *d, | 38 | static int __devinit hpdca_init_one(struct dio_dev *d, |
39 | const struct dio_device_id *ent); | 39 | const struct dio_device_id *ent); |
40 | static void __devexit hpdca_remove_one(struct dio_dev *d); | 40 | static void __devexit hpdca_remove_one(struct dio_dev *d); |
41 | 41 | ||
42 | static struct dio_device_id hpdca_dio_tbl[] = { | 42 | static struct dio_device_id hpdca_dio_tbl[] = { |
@@ -85,7 +85,7 @@ extern int hp300_uart_scode; | |||
85 | 85 | ||
86 | #ifdef CONFIG_SERIAL_8250_CONSOLE | 86 | #ifdef CONFIG_SERIAL_8250_CONSOLE |
87 | /* | 87 | /* |
88 | * Parse the bootinfo to find descriptions for headless console and | 88 | * Parse the bootinfo to find descriptions for headless console and |
89 | * debug serial ports and register them with the 8250 driver. | 89 | * debug serial ports and register them with the 8250 driver. |
90 | * This function should be called before serial_console_init() is called | 90 | * This function should be called before serial_console_init() is called |
91 | * to make sure the serial console will be available for use. IA-64 kernel | 91 | * to make sure the serial console will be available for use. IA-64 kernel |
@@ -126,13 +126,11 @@ int __init hp300_setup_serial_console(void) | |||
126 | printk(KERN_WARNING "Serial console is APCI but support is disabled (CONFIG_HPAPCI)!\n"); | 126 | printk(KERN_WARNING "Serial console is APCI but support is disabled (CONFIG_HPAPCI)!\n"); |
127 | return 0; | 127 | return 0; |
128 | #endif | 128 | #endif |
129 | } | 129 | } else { |
130 | else { | ||
131 | #ifdef CONFIG_HPDCA | 130 | #ifdef CONFIG_HPDCA |
132 | unsigned long pa = dio_scodetophysaddr(scode); | 131 | unsigned long pa = dio_scodetophysaddr(scode); |
133 | if (!pa) { | 132 | if (!pa) |
134 | return 0; | 133 | return 0; |
135 | } | ||
136 | 134 | ||
137 | printk(KERN_INFO "Serial console is HP DCA at select code %d\n", scode); | 135 | printk(KERN_INFO "Serial console is HP DCA at select code %d\n", scode); |
138 | 136 | ||
@@ -145,26 +143,23 @@ int __init hp300_setup_serial_console(void) | |||
145 | /* Enable board-interrupts */ | 143 | /* Enable board-interrupts */ |
146 | out_8(pa + DIO_VIRADDRBASE + DCA_IC, DCA_IC_IE); | 144 | out_8(pa + DIO_VIRADDRBASE + DCA_IC, DCA_IC_IE); |
147 | 145 | ||
148 | if (DIO_ID(pa + DIO_VIRADDRBASE) & 0x80) { | 146 | if (DIO_ID(pa + DIO_VIRADDRBASE) & 0x80) |
149 | add_preferred_console("ttyS", port.line, "9600n8"); | 147 | add_preferred_console("ttyS", port.line, "9600n8"); |
150 | } | ||
151 | #else | 148 | #else |
152 | printk(KERN_WARNING "Serial console is DCA but support is disabled (CONFIG_HPDCA)!\n"); | 149 | printk(KERN_WARNING "Serial console is DCA but support is disabled (CONFIG_HPDCA)!\n"); |
153 | return 0; | 150 | return 0; |
154 | #endif | 151 | #endif |
155 | } | 152 | } |
156 | 153 | ||
157 | if (early_serial_setup(&port) < 0) { | 154 | if (early_serial_setup(&port) < 0) |
158 | printk(KERN_WARNING "hp300_setup_serial_console(): early_serial_setup() failed.\n"); | 155 | printk(KERN_WARNING "hp300_setup_serial_console(): early_serial_setup() failed.\n"); |
159 | } | ||
160 | |||
161 | return 0; | 156 | return 0; |
162 | } | 157 | } |
163 | #endif /* CONFIG_SERIAL_8250_CONSOLE */ | 158 | #endif /* CONFIG_SERIAL_8250_CONSOLE */ |
164 | 159 | ||
165 | #ifdef CONFIG_HPDCA | 160 | #ifdef CONFIG_HPDCA |
166 | static int __devinit hpdca_init_one(struct dio_dev *d, | 161 | static int __devinit hpdca_init_one(struct dio_dev *d, |
167 | const struct dio_device_id *ent) | 162 | const struct dio_device_id *ent) |
168 | { | 163 | { |
169 | struct uart_port port; | 164 | struct uart_port port; |
170 | int line; | 165 | int line; |
@@ -210,7 +205,7 @@ static int __devinit hpdca_init_one(struct dio_dev *d, | |||
210 | 205 | ||
211 | static int __init hp300_8250_init(void) | 206 | static int __init hp300_8250_init(void) |
212 | { | 207 | { |
213 | static int called = 0; | 208 | static int called; |
214 | #ifdef CONFIG_HPAPCI | 209 | #ifdef CONFIG_HPAPCI |
215 | int line; | 210 | int line; |
216 | unsigned long base; | 211 | unsigned long base; |
@@ -239,13 +234,12 @@ static int __init hp300_8250_init(void) | |||
239 | * Port 1 is either the console or the DCA. | 234 | * Port 1 is either the console or the DCA. |
240 | */ | 235 | */ |
241 | for (i = 1; i < 4; i++) { | 236 | for (i = 1; i < 4; i++) { |
242 | /* Port 1 is the console on a 425e, on other machines it's mapped to | 237 | /* Port 1 is the console on a 425e, on other machines it's |
243 | * DCA. | 238 | * mapped to DCA. |
244 | */ | 239 | */ |
245 | #ifdef CONFIG_SERIAL_8250_CONSOLE | 240 | #ifdef CONFIG_SERIAL_8250_CONSOLE |
246 | if (i == 1) { | 241 | if (i == 1) |
247 | continue; | 242 | continue; |
248 | } | ||
249 | #endif | 243 | #endif |
250 | 244 | ||
251 | /* Create new serial device */ | 245 | /* Create new serial device */ |
@@ -259,7 +253,8 @@ static int __init hp300_8250_init(void) | |||
259 | 253 | ||
260 | /* Memory mapped I/O */ | 254 | /* Memory mapped I/O */ |
261 | uport.iotype = UPIO_MEM; | 255 | uport.iotype = UPIO_MEM; |
262 | uport.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_BOOT_AUTOCONF; | 256 | uport.flags = UPF_SKIP_TEST | UPF_SHARE_IRQ \ |
257 | | UPF_BOOT_AUTOCONF; | ||
263 | /* XXX - no interrupt support yet */ | 258 | /* XXX - no interrupt support yet */ |
264 | uport.irq = 0; | 259 | uport.irq = 0; |
265 | uport.uartclk = HPAPCI_BAUD_BASE * 16; | 260 | uport.uartclk = HPAPCI_BAUD_BASE * 16; |
@@ -270,8 +265,8 @@ static int __init hp300_8250_init(void) | |||
270 | line = serial8250_register_port(&uport); | 265 | line = serial8250_register_port(&uport); |
271 | 266 | ||
272 | if (line < 0) { | 267 | if (line < 0) { |
273 | printk(KERN_NOTICE "8250_hp300: register_serial() APCI %d" | 268 | printk(KERN_NOTICE "8250_hp300: register_serial() APCI" |
274 | " irq %d failed\n", i, uport.irq); | 269 | " %d irq %d failed\n", i, uport.irq); |
275 | kfree(port); | 270 | kfree(port); |
276 | continue; | 271 | continue; |
277 | } | 272 | } |
diff --git a/drivers/serial/8250_hub6.c b/drivers/serial/8250_hub6.c index daf569cd3c8f..7609150e7d5e 100644 --- a/drivers/serial/8250_hub6.c +++ b/drivers/serial/8250_hub6.c | |||
@@ -23,18 +23,18 @@ | |||
23 | } | 23 | } |
24 | 24 | ||
25 | static struct plat_serial8250_port hub6_data[] = { | 25 | static struct plat_serial8250_port hub6_data[] = { |
26 | HUB6(0,0), | 26 | HUB6(0, 0), |
27 | HUB6(0,1), | 27 | HUB6(0, 1), |
28 | HUB6(0,2), | 28 | HUB6(0, 2), |
29 | HUB6(0,3), | 29 | HUB6(0, 3), |
30 | HUB6(0,4), | 30 | HUB6(0, 4), |
31 | HUB6(0,5), | 31 | HUB6(0, 5), |
32 | HUB6(1,0), | 32 | HUB6(1, 0), |
33 | HUB6(1,1), | 33 | HUB6(1, 1), |
34 | HUB6(1,2), | 34 | HUB6(1, 2), |
35 | HUB6(1,3), | 35 | HUB6(1, 3), |
36 | HUB6(1,4), | 36 | HUB6(1, 4), |
37 | HUB6(1,5), | 37 | HUB6(1, 5), |
38 | { }, | 38 | { }, |
39 | }; | 39 | }; |
40 | 40 | ||
diff --git a/drivers/serial/8250_pci.c b/drivers/serial/8250_pci.c index 0a4ac2b6eb5a..a8bec498cad6 100644 --- a/drivers/serial/8250_pci.c +++ b/drivers/serial/8250_pci.c | |||
@@ -140,7 +140,7 @@ afavlab_setup(struct serial_private *priv, struct pciserial_board *board, | |||
140 | struct uart_port *port, int idx) | 140 | struct uart_port *port, int idx) |
141 | { | 141 | { |
142 | unsigned int bar, offset = board->first_offset; | 142 | unsigned int bar, offset = board->first_offset; |
143 | 143 | ||
144 | bar = FL_GET_BASE(board->flags); | 144 | bar = FL_GET_BASE(board->flags); |
145 | if (idx < 4) | 145 | if (idx < 4) |
146 | bar += idx; | 146 | bar += idx; |
@@ -227,8 +227,8 @@ static int pci_inteli960ni_init(struct pci_dev *dev) | |||
227 | return -ENODEV; | 227 | return -ENODEV; |
228 | 228 | ||
229 | /* is firmware started? */ | 229 | /* is firmware started? */ |
230 | pci_read_config_dword(dev, 0x44, (void*) &oldval); | 230 | pci_read_config_dword(dev, 0x44, (void *)&oldval); |
231 | if (oldval == 0x00001000L) { /* RESET value */ | 231 | if (oldval == 0x00001000L) { /* RESET value */ |
232 | printk(KERN_DEBUG "Local i960 firmware missing"); | 232 | printk(KERN_DEBUG "Local i960 firmware missing"); |
233 | return -ENODEV; | 233 | return -ENODEV; |
234 | } | 234 | } |
@@ -253,11 +253,11 @@ static int pci_plx9050_init(struct pci_dev *dev) | |||
253 | 253 | ||
254 | irq_config = 0x41; | 254 | irq_config = 0x41; |
255 | if (dev->vendor == PCI_VENDOR_ID_PANACOM || | 255 | if (dev->vendor == PCI_VENDOR_ID_PANACOM || |
256 | dev->subsystem_vendor == PCI_SUBVENDOR_ID_EXSYS) { | 256 | dev->subsystem_vendor == PCI_SUBVENDOR_ID_EXSYS) |
257 | irq_config = 0x43; | 257 | irq_config = 0x43; |
258 | } | 258 | |
259 | if ((dev->vendor == PCI_VENDOR_ID_PLX) && | 259 | if ((dev->vendor == PCI_VENDOR_ID_PLX) && |
260 | (dev->device == PCI_DEVICE_ID_PLX_ROMULUS)) { | 260 | (dev->device == PCI_DEVICE_ID_PLX_ROMULUS)) |
261 | /* | 261 | /* |
262 | * As the megawolf cards have the int pins active | 262 | * As the megawolf cards have the int pins active |
263 | * high, and have 2 UART chips, both ints must be | 263 | * high, and have 2 UART chips, both ints must be |
@@ -267,8 +267,6 @@ static int pci_plx9050_init(struct pci_dev *dev) | |||
267 | * deep FIFOs | 267 | * deep FIFOs |
268 | */ | 268 | */ |
269 | irq_config = 0x5b; | 269 | irq_config = 0x5b; |
270 | } | ||
271 | |||
272 | /* | 270 | /* |
273 | * enable/disable interrupts | 271 | * enable/disable interrupts |
274 | */ | 272 | */ |
@@ -343,14 +341,14 @@ static int sbs_init(struct pci_dev *dev) | |||
343 | { | 341 | { |
344 | u8 __iomem *p; | 342 | u8 __iomem *p; |
345 | 343 | ||
346 | p = ioremap(pci_resource_start(dev, 0),pci_resource_len(dev,0)); | 344 | p = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); |
347 | 345 | ||
348 | if (p == NULL) | 346 | if (p == NULL) |
349 | return -ENOMEM; | 347 | return -ENOMEM; |
350 | /* Set bit-4 Control Register (UART RESET) in to reset the uarts */ | 348 | /* Set bit-4 Control Register (UART RESET) in to reset the uarts */ |
351 | writeb(0x10,p + OCT_REG_CR_OFF); | 349 | writeb(0x10, p + OCT_REG_CR_OFF); |
352 | udelay(50); | 350 | udelay(50); |
353 | writeb(0x0,p + OCT_REG_CR_OFF); | 351 | writeb(0x0, p + OCT_REG_CR_OFF); |
354 | 352 | ||
355 | /* Set bit-2 (INTENABLE) of Control Register */ | 353 | /* Set bit-2 (INTENABLE) of Control Register */ |
356 | writeb(0x4, p + OCT_REG_CR_OFF); | 354 | writeb(0x4, p + OCT_REG_CR_OFF); |
@@ -367,10 +365,10 @@ static void __devexit sbs_exit(struct pci_dev *dev) | |||
367 | { | 365 | { |
368 | u8 __iomem *p; | 366 | u8 __iomem *p; |
369 | 367 | ||
370 | p = ioremap(pci_resource_start(dev, 0),pci_resource_len(dev,0)); | 368 | p = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); |
371 | if (p != NULL) { | 369 | /* FIXME: What if resource_len < OCT_REG_CR_OFF */ |
370 | if (p != NULL) | ||
372 | writeb(0, p + OCT_REG_CR_OFF); | 371 | writeb(0, p + OCT_REG_CR_OFF); |
373 | } | ||
374 | iounmap(p); | 372 | iounmap(p); |
375 | } | 373 | } |
376 | 374 | ||
@@ -386,7 +384,7 @@ static void __devexit sbs_exit(struct pci_dev *dev) | |||
386 | * with other OSes (like M$ DOS). | 384 | * with other OSes (like M$ DOS). |
387 | * | 385 | * |
388 | * SIIG support added by Andrey Panin <pazke@donpac.ru>, 10/1999 | 386 | * SIIG support added by Andrey Panin <pazke@donpac.ru>, 10/1999 |
389 | * | 387 | * |
390 | * There is two family of SIIG serial cards with different PCI | 388 | * There is two family of SIIG serial cards with different PCI |
391 | * interface chip and different configuration methods: | 389 | * interface chip and different configuration methods: |
392 | * - 10x cards have control registers in IO and/or memory space; | 390 | * - 10x cards have control registers in IO and/or memory space; |
@@ -489,21 +487,21 @@ static const unsigned short timedia_single_port[] = { | |||
489 | 487 | ||
490 | static const unsigned short timedia_dual_port[] = { | 488 | static const unsigned short timedia_dual_port[] = { |
491 | 0x0002, 0x4036, 0x4037, 0x4038, 0x4078, 0x4079, 0x4085, | 489 | 0x0002, 0x4036, 0x4037, 0x4038, 0x4078, 0x4079, 0x4085, |
492 | 0x4088, 0x4089, 0x5037, 0x5078, 0x5079, 0x5085, 0x6079, | 490 | 0x4088, 0x4089, 0x5037, 0x5078, 0x5079, 0x5085, 0x6079, |
493 | 0x7079, 0x8079, 0x8137, 0x8138, 0x8237, 0x8238, 0x9079, | 491 | 0x7079, 0x8079, 0x8137, 0x8138, 0x8237, 0x8238, 0x9079, |
494 | 0x9137, 0x9138, 0x9237, 0x9238, 0xA079, 0xB079, 0xC079, | 492 | 0x9137, 0x9138, 0x9237, 0x9238, 0xA079, 0xB079, 0xC079, |
495 | 0xD079, 0 | 493 | 0xD079, 0 |
496 | }; | 494 | }; |
497 | 495 | ||
498 | static const unsigned short timedia_quad_port[] = { | 496 | static const unsigned short timedia_quad_port[] = { |
499 | 0x4055, 0x4056, 0x4095, 0x4096, 0x5056, 0x8156, 0x8157, | 497 | 0x4055, 0x4056, 0x4095, 0x4096, 0x5056, 0x8156, 0x8157, |
500 | 0x8256, 0x8257, 0x9056, 0x9156, 0x9157, 0x9158, 0x9159, | 498 | 0x8256, 0x8257, 0x9056, 0x9156, 0x9157, 0x9158, 0x9159, |
501 | 0x9256, 0x9257, 0xA056, 0xA157, 0xA158, 0xA159, 0xB056, | 499 | 0x9256, 0x9257, 0xA056, 0xA157, 0xA158, 0xA159, 0xB056, |
502 | 0xB157, 0 | 500 | 0xB157, 0 |
503 | }; | 501 | }; |
504 | 502 | ||
505 | static const unsigned short timedia_eight_port[] = { | 503 | static const unsigned short timedia_eight_port[] = { |
506 | 0x4065, 0x4066, 0x5065, 0x5066, 0x8166, 0x9066, 0x9166, | 504 | 0x4065, 0x4066, 0x5065, 0x5066, 0x8166, 0x9066, 0x9166, |
507 | 0x9167, 0x9168, 0xA066, 0xA167, 0xA168, 0 | 505 | 0x9167, 0x9168, 0xA066, 0xA167, 0xA168, 0 |
508 | }; | 506 | }; |
509 | 507 | ||
@@ -656,7 +654,8 @@ static int pci_ite887x_init(struct pci_dev *dev) | |||
656 | ITE_887x_POSIO_ENABLE | ITE_887x_POSIO_SPEED | | 654 | ITE_887x_POSIO_ENABLE | ITE_887x_POSIO_SPEED | |
657 | ITE_887x_POSIO_IOSIZE_32 | inta_addr[i]); | 655 | ITE_887x_POSIO_IOSIZE_32 | inta_addr[i]); |
658 | /* write INTCBAR - ioport */ | 656 | /* write INTCBAR - ioport */ |
659 | pci_write_config_dword(dev, ITE_887x_INTCBAR, inta_addr[i]); | 657 | pci_write_config_dword(dev, ITE_887x_INTCBAR, |
658 | inta_addr[i]); | ||
660 | ret = inb(inta_addr[i]); | 659 | ret = inb(inta_addr[i]); |
661 | if (ret != 0xff) { | 660 | if (ret != 0xff) { |
662 | /* ioport connected */ | 661 | /* ioport connected */ |
@@ -755,7 +754,7 @@ pci_default_setup(struct serial_private *priv, struct pciserial_board *board, | |||
755 | 754 | ||
756 | if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr) | 755 | if (board->flags & FL_REGION_SZ_CAP && idx >= maxnr) |
757 | return 1; | 756 | return 1; |
758 | 757 | ||
759 | return setup_port(priv, port, bar, offset, board->reg_shift); | 758 | return setup_port(priv, port, bar, offset, board->reg_shift); |
760 | } | 759 | } |
761 | 760 | ||
@@ -843,7 +842,7 @@ static struct pci_serial_quirk pci_serial_quirks[] = { | |||
843 | .init = pci_plx9050_init, | 842 | .init = pci_plx9050_init, |
844 | .setup = pci_default_setup, | 843 | .setup = pci_default_setup, |
845 | .exit = __devexit_p(pci_plx9050_exit), | 844 | .exit = __devexit_p(pci_plx9050_exit), |
846 | }, | 845 | }, |
847 | { | 846 | { |
848 | .vendor = PCI_VENDOR_ID_PANACOM, | 847 | .vendor = PCI_VENDOR_ID_PANACOM, |
849 | .device = PCI_DEVICE_ID_PANACOM_DUALMODEM, | 848 | .device = PCI_DEVICE_ID_PANACOM_DUALMODEM, |
@@ -1032,7 +1031,7 @@ static struct pci_serial_quirk *find_quirk(struct pci_dev *dev) | |||
1032 | quirk_id_matches(quirk->device, dev->device) && | 1031 | quirk_id_matches(quirk->device, dev->device) && |
1033 | quirk_id_matches(quirk->subvendor, dev->subsystem_vendor) && | 1032 | quirk_id_matches(quirk->subvendor, dev->subsystem_vendor) && |
1034 | quirk_id_matches(quirk->subdevice, dev->subsystem_device)) | 1033 | quirk_id_matches(quirk->subdevice, dev->subsystem_device)) |
1035 | break; | 1034 | break; |
1036 | return quirk; | 1035 | return quirk; |
1037 | } | 1036 | } |
1038 | 1037 | ||
@@ -1711,7 +1710,7 @@ static struct pciserial_board pci_boards[] __devinitdata = { | |||
1711 | }; | 1710 | }; |
1712 | 1711 | ||
1713 | static const struct pci_device_id softmodem_blacklist[] = { | 1712 | static const struct pci_device_id softmodem_blacklist[] = { |
1714 | { PCI_VDEVICE ( AL, 0x5457 ), }, /* ALi Corporation M5457 AC'97 Modem */ | 1713 | { PCI_VDEVICE(AL, 0x5457), }, /* ALi Corporation M5457 AC'97 Modem */ |
1715 | }; | 1714 | }; |
1716 | 1715 | ||
1717 | /* | 1716 | /* |
@@ -1724,13 +1723,13 @@ serial_pci_guess_board(struct pci_dev *dev, struct pciserial_board *board) | |||
1724 | { | 1723 | { |
1725 | const struct pci_device_id *blacklist; | 1724 | const struct pci_device_id *blacklist; |
1726 | int num_iomem, num_port, first_port = -1, i; | 1725 | int num_iomem, num_port, first_port = -1, i; |
1727 | 1726 | ||
1728 | /* | 1727 | /* |
1729 | * If it is not a communications device or the programming | 1728 | * If it is not a communications device or the programming |
1730 | * interface is greater than 6, give up. | 1729 | * interface is greater than 6, give up. |
1731 | * | 1730 | * |
1732 | * (Should we try to make guesses for multiport serial devices | 1731 | * (Should we try to make guesses for multiport serial devices |
1733 | * later?) | 1732 | * later?) |
1734 | */ | 1733 | */ |
1735 | if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && | 1734 | if ((((dev->class >> 8) != PCI_CLASS_COMMUNICATION_SERIAL) && |
1736 | ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) || | 1735 | ((dev->class >> 8) != PCI_CLASS_COMMUNICATION_MODEM)) || |
@@ -1863,25 +1862,23 @@ pciserial_init_ports(struct pci_dev *dev, struct pciserial_board *board) | |||
1863 | break; | 1862 | break; |
1864 | 1863 | ||
1865 | #ifdef SERIAL_DEBUG_PCI | 1864 | #ifdef SERIAL_DEBUG_PCI |
1866 | printk("Setup PCI port: port %x, irq %d, type %d\n", | 1865 | printk(KERN_DEBUG "Setup PCI port: port %x, irq %d, type %d\n", |
1867 | serial_port.iobase, serial_port.irq, serial_port.iotype); | 1866 | serial_port.iobase, serial_port.irq, serial_port.iotype); |
1868 | #endif | 1867 | #endif |
1869 | 1868 | ||
1870 | priv->line[i] = serial8250_register_port(&serial_port); | 1869 | priv->line[i] = serial8250_register_port(&serial_port); |
1871 | if (priv->line[i] < 0) { | 1870 | if (priv->line[i] < 0) { |
1872 | printk(KERN_WARNING "Couldn't register serial port %s: %d\n", pci_name(dev), priv->line[i]); | 1871 | printk(KERN_WARNING "Couldn't register serial port %s: %d\n", pci_name(dev), priv->line[i]); |
1873 | break; | 1872 | break; |
1874 | } | 1873 | } |
1875 | } | 1874 | } |
1876 | |||
1877 | priv->nr = i; | 1875 | priv->nr = i; |
1878 | |||
1879 | return priv; | 1876 | return priv; |
1880 | 1877 | ||
1881 | err_deinit: | 1878 | err_deinit: |
1882 | if (quirk->exit) | 1879 | if (quirk->exit) |
1883 | quirk->exit(dev); | 1880 | quirk->exit(dev); |
1884 | err_out: | 1881 | err_out: |
1885 | return priv; | 1882 | return priv; |
1886 | } | 1883 | } |
1887 | EXPORT_SYMBOL_GPL(pciserial_init_ports); | 1884 | EXPORT_SYMBOL_GPL(pciserial_init_ports); |
@@ -2171,22 +2168,22 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2171 | pbn_b0_8_1843200_200 }, | 2168 | pbn_b0_8_1843200_200 }, |
2172 | 2169 | ||
2173 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_U530, | 2170 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_U530, |
2174 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2171 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2175 | pbn_b2_bt_1_115200 }, | 2172 | pbn_b2_bt_1_115200 }, |
2176 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM2, | 2173 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM2, |
2177 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2174 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2178 | pbn_b2_bt_2_115200 }, | 2175 | pbn_b2_bt_2_115200 }, |
2179 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM422, | 2176 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM422, |
2180 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2177 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2181 | pbn_b2_bt_4_115200 }, | 2178 | pbn_b2_bt_4_115200 }, |
2182 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM232, | 2179 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM232, |
2183 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2180 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2184 | pbn_b2_bt_2_115200 }, | 2181 | pbn_b2_bt_2_115200 }, |
2185 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_COMM4, | 2182 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_COMM4, |
2186 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2183 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2187 | pbn_b2_bt_4_115200 }, | 2184 | pbn_b2_bt_4_115200 }, |
2188 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_COMM8, | 2185 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_COMM8, |
2189 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2186 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2190 | pbn_b2_8_115200 }, | 2187 | pbn_b2_8_115200 }, |
2191 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM8, | 2188 | { PCI_VENDOR_ID_SEALEVEL, PCI_DEVICE_ID_SEALEVEL_UCOMM8, |
2192 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2189 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
@@ -2201,11 +2198,11 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2201 | /* | 2198 | /* |
2202 | * VScom SPCOM800, from sl@s.pl | 2199 | * VScom SPCOM800, from sl@s.pl |
2203 | */ | 2200 | */ |
2204 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_SPCOM800, | 2201 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_SPCOM800, |
2205 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2202 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2206 | pbn_b2_8_921600 }, | 2203 | pbn_b2_8_921600 }, |
2207 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_1077, | 2204 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_1077, |
2208 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2205 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2209 | pbn_b2_4_921600 }, | 2206 | pbn_b2_4_921600 }, |
2210 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, | 2207 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, |
2211 | PCI_SUBVENDOR_ID_KEYSPAN, | 2208 | PCI_SUBVENDOR_ID_KEYSPAN, |
@@ -2223,27 +2220,27 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2223 | pbn_b2_4_115200 }, | 2220 | pbn_b2_4_115200 }, |
2224 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, | 2221 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, |
2225 | PCI_SUBVENDOR_ID_CHASE_PCIFAST, | 2222 | PCI_SUBVENDOR_ID_CHASE_PCIFAST, |
2226 | PCI_SUBDEVICE_ID_CHASE_PCIFAST4, 0, 0, | 2223 | PCI_SUBDEVICE_ID_CHASE_PCIFAST4, 0, 0, |
2227 | pbn_b2_4_460800 }, | 2224 | pbn_b2_4_460800 }, |
2228 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, | 2225 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, |
2229 | PCI_SUBVENDOR_ID_CHASE_PCIFAST, | 2226 | PCI_SUBVENDOR_ID_CHASE_PCIFAST, |
2230 | PCI_SUBDEVICE_ID_CHASE_PCIFAST8, 0, 0, | 2227 | PCI_SUBDEVICE_ID_CHASE_PCIFAST8, 0, 0, |
2231 | pbn_b2_8_460800 }, | 2228 | pbn_b2_8_460800 }, |
2232 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, | 2229 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, |
2233 | PCI_SUBVENDOR_ID_CHASE_PCIFAST, | 2230 | PCI_SUBVENDOR_ID_CHASE_PCIFAST, |
2234 | PCI_SUBDEVICE_ID_CHASE_PCIFAST16, 0, 0, | 2231 | PCI_SUBDEVICE_ID_CHASE_PCIFAST16, 0, 0, |
2235 | pbn_b2_16_460800 }, | 2232 | pbn_b2_16_460800 }, |
2236 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, | 2233 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, |
2237 | PCI_SUBVENDOR_ID_CHASE_PCIFAST, | 2234 | PCI_SUBVENDOR_ID_CHASE_PCIFAST, |
2238 | PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC, 0, 0, | 2235 | PCI_SUBDEVICE_ID_CHASE_PCIFAST16FMC, 0, 0, |
2239 | pbn_b2_16_460800 }, | 2236 | pbn_b2_16_460800 }, |
2240 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, | 2237 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, |
2241 | PCI_SUBVENDOR_ID_CHASE_PCIRAS, | 2238 | PCI_SUBVENDOR_ID_CHASE_PCIRAS, |
2242 | PCI_SUBDEVICE_ID_CHASE_PCIRAS4, 0, 0, | 2239 | PCI_SUBDEVICE_ID_CHASE_PCIRAS4, 0, 0, |
2243 | pbn_b2_4_460800 }, | 2240 | pbn_b2_4_460800 }, |
2244 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, | 2241 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, |
2245 | PCI_SUBVENDOR_ID_CHASE_PCIRAS, | 2242 | PCI_SUBVENDOR_ID_CHASE_PCIRAS, |
2246 | PCI_SUBDEVICE_ID_CHASE_PCIRAS8, 0, 0, | 2243 | PCI_SUBDEVICE_ID_CHASE_PCIRAS8, 0, 0, |
2247 | pbn_b2_8_460800 }, | 2244 | pbn_b2_8_460800 }, |
2248 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, | 2245 | { PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, |
2249 | PCI_SUBVENDOR_ID_EXSYS, | 2246 | PCI_SUBVENDOR_ID_EXSYS, |
@@ -2269,10 +2266,12 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2269 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2266 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2270 | pbn_b1_8_115200 }, | 2267 | pbn_b1_8_115200 }, |
2271 | { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_OXSEMI_16PCI954, | 2268 | { PCI_VENDOR_ID_SPECIALIX, PCI_DEVICE_ID_OXSEMI_16PCI954, |
2272 | PCI_VENDOR_ID_SPECIALIX, PCI_SUBDEVICE_ID_SPECIALIX_SPEED4, 0, 0, | 2269 | PCI_VENDOR_ID_SPECIALIX, PCI_SUBDEVICE_ID_SPECIALIX_SPEED4, |
2270 | 0, 0, | ||
2273 | pbn_b0_4_921600 }, | 2271 | pbn_b0_4_921600 }, |
2274 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, | 2272 | { PCI_VENDOR_ID_OXSEMI, PCI_DEVICE_ID_OXSEMI_16PCI954, |
2275 | PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, 0, 0, | 2273 | PCI_SUBVENDOR_ID_SIIG, PCI_SUBDEVICE_ID_SIIG_QUARTET_SERIAL, |
2274 | 0, 0, | ||
2276 | pbn_b0_4_1152000 }, | 2275 | pbn_b0_4_1152000 }, |
2277 | 2276 | ||
2278 | /* | 2277 | /* |
@@ -2312,7 +2311,7 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2312 | * Digitan DS560-558, from jimd@esoft.com | 2311 | * Digitan DS560-558, from jimd@esoft.com |
2313 | */ | 2312 | */ |
2314 | { PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_ATT_VENUS_MODEM, | 2313 | { PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_ATT_VENUS_MODEM, |
2315 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2314 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2316 | pbn_b1_1_115200 }, | 2315 | pbn_b1_1_115200 }, |
2317 | 2316 | ||
2318 | /* | 2317 | /* |
@@ -2320,16 +2319,16 @@ static struct pci_device_id serial_pci_tbl[] = { | |||
2320 | * The 400L and 800L have a custom setup quirk. | 2319 | * The 400L and 800L have a custom setup quirk. |
2321 | */ | 2320 | */ |
2322 | { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100, | 2321 | { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100, |
2323 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2322 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2324 | pbn_b0_1_921600 }, | 2323 | pbn_b0_1_921600 }, |
2325 | { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200, | 2324 | { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_200, |
2326 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2325 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2327 | pbn_b0_2_921600 }, | 2326 | pbn_b0_2_921600 }, |
2328 | { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400, | 2327 | { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_400, |
2329 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2328 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2330 | pbn_b0_4_921600 }, | 2329 | pbn_b0_4_921600 }, |
2331 | { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800B, | 2330 | { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_800B, |
2332 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2331 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
2333 | pbn_b0_4_921600 }, | 2332 | pbn_b0_4_921600 }, |
2334 | { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100L, | 2333 | { PCI_VENDOR_ID_TITAN, PCI_DEVICE_ID_TITAN_100L, |
2335 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, | 2334 | PCI_ANY_ID, PCI_ANY_ID, 0, 0, |
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig index 84a054d7e986..b82595cf13e8 100644 --- a/drivers/serial/Kconfig +++ b/drivers/serial/Kconfig | |||
@@ -380,6 +380,21 @@ config SERIAL_ATMEL_CONSOLE | |||
380 | console is the device which receives all kernel messages and | 380 | console is the device which receives all kernel messages and |
381 | warnings and which allows logins in single user mode). | 381 | warnings and which allows logins in single user mode). |
382 | 382 | ||
383 | config SERIAL_ATMEL_PDC | ||
384 | bool "Support DMA transfers on AT91 / AT32 serial port" | ||
385 | depends on SERIAL_ATMEL | ||
386 | default y | ||
387 | help | ||
388 | Say Y here if you wish to use the PDC to do DMA transfers to | ||
389 | and from the Atmel AT91 / AT32 serial port. In order to | ||
390 | actually use DMA transfers, make sure that the use_dma_tx | ||
391 | and use_dma_rx members in the atmel_uart_data struct is set | ||
392 | appropriately for each port. | ||
393 | |||
394 | Note that break and error handling currently doesn't work | ||
395 | properly when DMA is enabled. Make sure that ports where | ||
396 | this matters don't use DMA. | ||
397 | |||
383 | config SERIAL_ATMEL_TTYAT | 398 | config SERIAL_ATMEL_TTYAT |
384 | bool "Install as device ttyATn instead of ttySn" | 399 | bool "Install as device ttyATn instead of ttySn" |
385 | depends on SERIAL_ATMEL=y | 400 | depends on SERIAL_ATMEL=y |
diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c index 60f52904aad0..fad245b064d6 100644 --- a/drivers/serial/atmel_serial.c +++ b/drivers/serial/atmel_serial.c | |||
@@ -7,6 +7,8 @@ | |||
7 | * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd. | 7 | * Based on drivers/char/serial_sa1100.c, by Deep Blue Solutions Ltd. |
8 | * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. | 8 | * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. |
9 | * | 9 | * |
10 | * DMA support added by Chip Coldwell. | ||
11 | * | ||
10 | * This program is free software; you can redistribute it and/or modify | 12 | * This program is free software; you can redistribute it and/or modify |
11 | * it under the terms of the GNU General Public License as published by | 13 | * it under the terms of the GNU General Public License as published by |
12 | * the Free Software Foundation; either version 2 of the License, or | 14 | * the Free Software Foundation; either version 2 of the License, or |
@@ -33,6 +35,7 @@ | |||
33 | #include <linux/sysrq.h> | 35 | #include <linux/sysrq.h> |
34 | #include <linux/tty_flip.h> | 36 | #include <linux/tty_flip.h> |
35 | #include <linux/platform_device.h> | 37 | #include <linux/platform_device.h> |
38 | #include <linux/dma-mapping.h> | ||
36 | #include <linux/atmel_pdc.h> | 39 | #include <linux/atmel_pdc.h> |
37 | #include <linux/atmel_serial.h> | 40 | #include <linux/atmel_serial.h> |
38 | 41 | ||
@@ -46,6 +49,10 @@ | |||
46 | #include <asm/arch/gpio.h> | 49 | #include <asm/arch/gpio.h> |
47 | #endif | 50 | #endif |
48 | 51 | ||
52 | #define PDC_BUFFER_SIZE 512 | ||
53 | /* Revisit: We should calculate this based on the actual port settings */ | ||
54 | #define PDC_RX_TIMEOUT (3 * 10) /* 3 bytes */ | ||
55 | |||
49 | #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) | 56 | #if defined(CONFIG_SERIAL_ATMEL_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) |
50 | #define SUPPORT_SYSRQ | 57 | #define SUPPORT_SYSRQ |
51 | #endif | 58 | #endif |
@@ -73,6 +80,7 @@ | |||
73 | 80 | ||
74 | #define ATMEL_ISR_PASS_LIMIT 256 | 81 | #define ATMEL_ISR_PASS_LIMIT 256 |
75 | 82 | ||
83 | /* UART registers. CR is write-only, hence no GET macro */ | ||
76 | #define UART_PUT_CR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_CR) | 84 | #define UART_PUT_CR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_CR) |
77 | #define UART_GET_MR(port) __raw_readl((port)->membase + ATMEL_US_MR) | 85 | #define UART_GET_MR(port) __raw_readl((port)->membase + ATMEL_US_MR) |
78 | #define UART_PUT_MR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_MR) | 86 | #define UART_PUT_MR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_MR) |
@@ -86,8 +94,6 @@ | |||
86 | #define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR) | 94 | #define UART_PUT_BRGR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_BRGR) |
87 | #define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR) | 95 | #define UART_PUT_RTOR(port,v) __raw_writel(v, (port)->membase + ATMEL_US_RTOR) |
88 | 96 | ||
89 | // #define UART_GET_CR(port) __raw_readl((port)->membase + ATMEL_US_CR) // is write-only | ||
90 | |||
91 | /* PDC registers */ | 97 | /* PDC registers */ |
92 | #define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR) | 98 | #define UART_PUT_PTCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_PTCR) |
93 | #define UART_GET_PTSR(port) __raw_readl((port)->membase + ATMEL_PDC_PTSR) | 99 | #define UART_GET_PTSR(port) __raw_readl((port)->membase + ATMEL_PDC_PTSR) |
@@ -100,12 +106,24 @@ | |||
100 | 106 | ||
101 | #define UART_PUT_TPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TPR) | 107 | #define UART_PUT_TPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TPR) |
102 | #define UART_PUT_TCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TCR) | 108 | #define UART_PUT_TCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TCR) |
103 | //#define UART_PUT_TNPR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TNPR) | ||
104 | //#define UART_PUT_TNCR(port,v) __raw_writel(v, (port)->membase + ATMEL_PDC_TNCR) | ||
105 | 109 | ||
106 | static int (*atmel_open_hook)(struct uart_port *); | 110 | static int (*atmel_open_hook)(struct uart_port *); |
107 | static void (*atmel_close_hook)(struct uart_port *); | 111 | static void (*atmel_close_hook)(struct uart_port *); |
108 | 112 | ||
113 | struct atmel_dma_buffer { | ||
114 | unsigned char *buf; | ||
115 | dma_addr_t dma_addr; | ||
116 | unsigned int dma_size; | ||
117 | unsigned int ofs; | ||
118 | }; | ||
119 | |||
120 | struct atmel_uart_char { | ||
121 | u16 status; | ||
122 | u16 ch; | ||
123 | }; | ||
124 | |||
125 | #define ATMEL_SERIAL_RINGSIZE 1024 | ||
126 | |||
109 | /* | 127 | /* |
110 | * We wrap our port structure around the generic uart_port. | 128 | * We wrap our port structure around the generic uart_port. |
111 | */ | 129 | */ |
@@ -114,6 +132,19 @@ struct atmel_uart_port { | |||
114 | struct clk *clk; /* uart clock */ | 132 | struct clk *clk; /* uart clock */ |
115 | unsigned short suspended; /* is port suspended? */ | 133 | unsigned short suspended; /* is port suspended? */ |
116 | int break_active; /* break being received */ | 134 | int break_active; /* break being received */ |
135 | |||
136 | short use_dma_rx; /* enable PDC receiver */ | ||
137 | short pdc_rx_idx; /* current PDC RX buffer */ | ||
138 | struct atmel_dma_buffer pdc_rx[2]; /* PDC receier */ | ||
139 | |||
140 | short use_dma_tx; /* enable PDC transmitter */ | ||
141 | struct atmel_dma_buffer pdc_tx; /* PDC transmitter */ | ||
142 | |||
143 | struct tasklet_struct tasklet; | ||
144 | unsigned int irq_status; | ||
145 | unsigned int irq_status_prev; | ||
146 | |||
147 | struct circ_buf rx_ring; | ||
117 | }; | 148 | }; |
118 | 149 | ||
119 | static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; | 150 | static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; |
@@ -122,6 +153,38 @@ static struct atmel_uart_port atmel_ports[ATMEL_MAX_UART]; | |||
122 | static struct console atmel_console; | 153 | static struct console atmel_console; |
123 | #endif | 154 | #endif |
124 | 155 | ||
156 | static inline struct atmel_uart_port * | ||
157 | to_atmel_uart_port(struct uart_port *uart) | ||
158 | { | ||
159 | return container_of(uart, struct atmel_uart_port, uart); | ||
160 | } | ||
161 | |||
162 | #ifdef CONFIG_SERIAL_ATMEL_PDC | ||
163 | static bool atmel_use_dma_rx(struct uart_port *port) | ||
164 | { | ||
165 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
166 | |||
167 | return atmel_port->use_dma_rx; | ||
168 | } | ||
169 | |||
170 | static bool atmel_use_dma_tx(struct uart_port *port) | ||
171 | { | ||
172 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
173 | |||
174 | return atmel_port->use_dma_tx; | ||
175 | } | ||
176 | #else | ||
177 | static bool atmel_use_dma_rx(struct uart_port *port) | ||
178 | { | ||
179 | return false; | ||
180 | } | ||
181 | |||
182 | static bool atmel_use_dma_tx(struct uart_port *port) | ||
183 | { | ||
184 | return false; | ||
185 | } | ||
186 | #endif | ||
187 | |||
125 | /* | 188 | /* |
126 | * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. | 189 | * Return TIOCSER_TEMT when transmitter FIFO and Shift register is empty. |
127 | */ | 190 | */ |
@@ -141,8 +204,8 @@ static void atmel_set_mctrl(struct uart_port *port, u_int mctrl) | |||
141 | #ifdef CONFIG_ARCH_AT91RM9200 | 204 | #ifdef CONFIG_ARCH_AT91RM9200 |
142 | if (cpu_is_at91rm9200()) { | 205 | if (cpu_is_at91rm9200()) { |
143 | /* | 206 | /* |
144 | * AT91RM9200 Errata #39: RTS0 is not internally connected to PA21. | 207 | * AT91RM9200 Errata #39: RTS0 is not internally connected |
145 | * We need to drive the pin manually. | 208 | * to PA21. We need to drive the pin manually. |
146 | */ | 209 | */ |
147 | if (port->mapbase == AT91RM9200_BASE_US0) { | 210 | if (port->mapbase == AT91RM9200_BASE_US0) { |
148 | if (mctrl & TIOCM_RTS) | 211 | if (mctrl & TIOCM_RTS) |
@@ -203,7 +266,12 @@ static u_int atmel_get_mctrl(struct uart_port *port) | |||
203 | */ | 266 | */ |
204 | static void atmel_stop_tx(struct uart_port *port) | 267 | static void atmel_stop_tx(struct uart_port *port) |
205 | { | 268 | { |
206 | UART_PUT_IDR(port, ATMEL_US_TXRDY); | 269 | if (atmel_use_dma_tx(port)) { |
270 | /* disable PDC transmit */ | ||
271 | UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS); | ||
272 | UART_PUT_IDR(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE); | ||
273 | } else | ||
274 | UART_PUT_IDR(port, ATMEL_US_TXRDY); | ||
207 | } | 275 | } |
208 | 276 | ||
209 | /* | 277 | /* |
@@ -211,7 +279,17 @@ static void atmel_stop_tx(struct uart_port *port) | |||
211 | */ | 279 | */ |
212 | static void atmel_start_tx(struct uart_port *port) | 280 | static void atmel_start_tx(struct uart_port *port) |
213 | { | 281 | { |
214 | UART_PUT_IER(port, ATMEL_US_TXRDY); | 282 | if (atmel_use_dma_tx(port)) { |
283 | if (UART_GET_PTSR(port) & ATMEL_PDC_TXTEN) | ||
284 | /* The transmitter is already running. Yes, we | ||
285 | really need this.*/ | ||
286 | return; | ||
287 | |||
288 | UART_PUT_IER(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE); | ||
289 | /* re-enable PDC transmit */ | ||
290 | UART_PUT_PTCR(port, ATMEL_PDC_TXTEN); | ||
291 | } else | ||
292 | UART_PUT_IER(port, ATMEL_US_TXRDY); | ||
215 | } | 293 | } |
216 | 294 | ||
217 | /* | 295 | /* |
@@ -219,7 +297,12 @@ static void atmel_start_tx(struct uart_port *port) | |||
219 | */ | 297 | */ |
220 | static void atmel_stop_rx(struct uart_port *port) | 298 | static void atmel_stop_rx(struct uart_port *port) |
221 | { | 299 | { |
222 | UART_PUT_IDR(port, ATMEL_US_RXRDY); | 300 | if (atmel_use_dma_rx(port)) { |
301 | /* disable PDC receive */ | ||
302 | UART_PUT_PTCR(port, ATMEL_PDC_RXTDIS); | ||
303 | UART_PUT_IDR(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); | ||
304 | } else | ||
305 | UART_PUT_IDR(port, ATMEL_US_RXRDY); | ||
223 | } | 306 | } |
224 | 307 | ||
225 | /* | 308 | /* |
@@ -227,7 +310,8 @@ static void atmel_stop_rx(struct uart_port *port) | |||
227 | */ | 310 | */ |
228 | static void atmel_enable_ms(struct uart_port *port) | 311 | static void atmel_enable_ms(struct uart_port *port) |
229 | { | 312 | { |
230 | UART_PUT_IER(port, ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC | ATMEL_US_CTSIC); | 313 | UART_PUT_IER(port, ATMEL_US_RIIC | ATMEL_US_DSRIC |
314 | | ATMEL_US_DCDIC | ATMEL_US_CTSIC); | ||
231 | } | 315 | } |
232 | 316 | ||
233 | /* | 317 | /* |
@@ -242,22 +326,63 @@ static void atmel_break_ctl(struct uart_port *port, int break_state) | |||
242 | } | 326 | } |
243 | 327 | ||
244 | /* | 328 | /* |
329 | * Stores the incoming character in the ring buffer | ||
330 | */ | ||
331 | static void | ||
332 | atmel_buffer_rx_char(struct uart_port *port, unsigned int status, | ||
333 | unsigned int ch) | ||
334 | { | ||
335 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
336 | struct circ_buf *ring = &atmel_port->rx_ring; | ||
337 | struct atmel_uart_char *c; | ||
338 | |||
339 | if (!CIRC_SPACE(ring->head, ring->tail, ATMEL_SERIAL_RINGSIZE)) | ||
340 | /* Buffer overflow, ignore char */ | ||
341 | return; | ||
342 | |||
343 | c = &((struct atmel_uart_char *)ring->buf)[ring->head]; | ||
344 | c->status = status; | ||
345 | c->ch = ch; | ||
346 | |||
347 | /* Make sure the character is stored before we update head. */ | ||
348 | smp_wmb(); | ||
349 | |||
350 | ring->head = (ring->head + 1) & (ATMEL_SERIAL_RINGSIZE - 1); | ||
351 | } | ||
352 | |||
353 | /* | ||
354 | * Deal with parity, framing and overrun errors. | ||
355 | */ | ||
356 | static void atmel_pdc_rxerr(struct uart_port *port, unsigned int status) | ||
357 | { | ||
358 | /* clear error */ | ||
359 | UART_PUT_CR(port, ATMEL_US_RSTSTA); | ||
360 | |||
361 | if (status & ATMEL_US_RXBRK) { | ||
362 | /* ignore side-effect */ | ||
363 | status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); | ||
364 | port->icount.brk++; | ||
365 | } | ||
366 | if (status & ATMEL_US_PARE) | ||
367 | port->icount.parity++; | ||
368 | if (status & ATMEL_US_FRAME) | ||
369 | port->icount.frame++; | ||
370 | if (status & ATMEL_US_OVRE) | ||
371 | port->icount.overrun++; | ||
372 | } | ||
373 | |||
374 | /* | ||
245 | * Characters received (called from interrupt handler) | 375 | * Characters received (called from interrupt handler) |
246 | */ | 376 | */ |
247 | static void atmel_rx_chars(struct uart_port *port) | 377 | static void atmel_rx_chars(struct uart_port *port) |
248 | { | 378 | { |
249 | struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port; | 379 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); |
250 | struct tty_struct *tty = port->info->tty; | 380 | unsigned int status, ch; |
251 | unsigned int status, ch, flg; | ||
252 | 381 | ||
253 | status = UART_GET_CSR(port); | 382 | status = UART_GET_CSR(port); |
254 | while (status & ATMEL_US_RXRDY) { | 383 | while (status & ATMEL_US_RXRDY) { |
255 | ch = UART_GET_CHAR(port); | 384 | ch = UART_GET_CHAR(port); |
256 | 385 | ||
257 | port->icount.rx++; | ||
258 | |||
259 | flg = TTY_NORMAL; | ||
260 | |||
261 | /* | 386 | /* |
262 | * note that the error handling code is | 387 | * note that the error handling code is |
263 | * out of the main execution path | 388 | * out of the main execution path |
@@ -265,15 +390,14 @@ static void atmel_rx_chars(struct uart_port *port) | |||
265 | if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME | 390 | if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME |
266 | | ATMEL_US_OVRE | ATMEL_US_RXBRK) | 391 | | ATMEL_US_OVRE | ATMEL_US_RXBRK) |
267 | || atmel_port->break_active)) { | 392 | || atmel_port->break_active)) { |
268 | UART_PUT_CR(port, ATMEL_US_RSTSTA); /* clear error */ | 393 | |
394 | /* clear error */ | ||
395 | UART_PUT_CR(port, ATMEL_US_RSTSTA); | ||
396 | |||
269 | if (status & ATMEL_US_RXBRK | 397 | if (status & ATMEL_US_RXBRK |
270 | && !atmel_port->break_active) { | 398 | && !atmel_port->break_active) { |
271 | status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); /* ignore side-effect */ | ||
272 | port->icount.brk++; | ||
273 | atmel_port->break_active = 1; | 399 | atmel_port->break_active = 1; |
274 | UART_PUT_IER(port, ATMEL_US_RXBRK); | 400 | UART_PUT_IER(port, ATMEL_US_RXBRK); |
275 | if (uart_handle_break(port)) | ||
276 | goto ignore_char; | ||
277 | } else { | 401 | } else { |
278 | /* | 402 | /* |
279 | * This is either the end-of-break | 403 | * This is either the end-of-break |
@@ -286,52 +410,30 @@ static void atmel_rx_chars(struct uart_port *port) | |||
286 | status &= ~ATMEL_US_RXBRK; | 410 | status &= ~ATMEL_US_RXBRK; |
287 | atmel_port->break_active = 0; | 411 | atmel_port->break_active = 0; |
288 | } | 412 | } |
289 | if (status & ATMEL_US_PARE) | ||
290 | port->icount.parity++; | ||
291 | if (status & ATMEL_US_FRAME) | ||
292 | port->icount.frame++; | ||
293 | if (status & ATMEL_US_OVRE) | ||
294 | port->icount.overrun++; | ||
295 | |||
296 | status &= port->read_status_mask; | ||
297 | |||
298 | if (status & ATMEL_US_RXBRK) | ||
299 | flg = TTY_BREAK; | ||
300 | else if (status & ATMEL_US_PARE) | ||
301 | flg = TTY_PARITY; | ||
302 | else if (status & ATMEL_US_FRAME) | ||
303 | flg = TTY_FRAME; | ||
304 | } | 413 | } |
305 | 414 | ||
306 | if (uart_handle_sysrq_char(port, ch)) | 415 | atmel_buffer_rx_char(port, status, ch); |
307 | goto ignore_char; | ||
308 | |||
309 | uart_insert_char(port, status, ATMEL_US_OVRE, ch, flg); | ||
310 | |||
311 | ignore_char: | ||
312 | status = UART_GET_CSR(port); | 416 | status = UART_GET_CSR(port); |
313 | } | 417 | } |
314 | 418 | ||
315 | tty_flip_buffer_push(tty); | 419 | tasklet_schedule(&atmel_port->tasklet); |
316 | } | 420 | } |
317 | 421 | ||
318 | /* | 422 | /* |
319 | * Transmit characters (called from interrupt handler) | 423 | * Transmit characters (called from tasklet with TXRDY interrupt |
424 | * disabled) | ||
320 | */ | 425 | */ |
321 | static void atmel_tx_chars(struct uart_port *port) | 426 | static void atmel_tx_chars(struct uart_port *port) |
322 | { | 427 | { |
323 | struct circ_buf *xmit = &port->info->xmit; | 428 | struct circ_buf *xmit = &port->info->xmit; |
324 | 429 | ||
325 | if (port->x_char) { | 430 | if (port->x_char && UART_GET_CSR(port) & ATMEL_US_TXRDY) { |
326 | UART_PUT_CHAR(port, port->x_char); | 431 | UART_PUT_CHAR(port, port->x_char); |
327 | port->icount.tx++; | 432 | port->icount.tx++; |
328 | port->x_char = 0; | 433 | port->x_char = 0; |
329 | return; | ||
330 | } | 434 | } |
331 | if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { | 435 | if (uart_circ_empty(xmit) || uart_tx_stopped(port)) |
332 | atmel_stop_tx(port); | ||
333 | return; | 436 | return; |
334 | } | ||
335 | 437 | ||
336 | while (UART_GET_CSR(port) & ATMEL_US_TXRDY) { | 438 | while (UART_GET_CSR(port) & ATMEL_US_TXRDY) { |
337 | UART_PUT_CHAR(port, xmit->buf[xmit->tail]); | 439 | UART_PUT_CHAR(port, xmit->buf[xmit->tail]); |
@@ -344,8 +446,88 @@ static void atmel_tx_chars(struct uart_port *port) | |||
344 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | 446 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) |
345 | uart_write_wakeup(port); | 447 | uart_write_wakeup(port); |
346 | 448 | ||
347 | if (uart_circ_empty(xmit)) | 449 | if (!uart_circ_empty(xmit)) |
348 | atmel_stop_tx(port); | 450 | UART_PUT_IER(port, ATMEL_US_TXRDY); |
451 | } | ||
452 | |||
453 | /* | ||
454 | * receive interrupt handler. | ||
455 | */ | ||
456 | static void | ||
457 | atmel_handle_receive(struct uart_port *port, unsigned int pending) | ||
458 | { | ||
459 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
460 | |||
461 | if (atmel_use_dma_rx(port)) { | ||
462 | /* | ||
463 | * PDC receive. Just schedule the tasklet and let it | ||
464 | * figure out the details. | ||
465 | * | ||
466 | * TODO: We're not handling error flags correctly at | ||
467 | * the moment. | ||
468 | */ | ||
469 | if (pending & (ATMEL_US_ENDRX | ATMEL_US_TIMEOUT)) { | ||
470 | UART_PUT_IDR(port, (ATMEL_US_ENDRX | ||
471 | | ATMEL_US_TIMEOUT)); | ||
472 | tasklet_schedule(&atmel_port->tasklet); | ||
473 | } | ||
474 | |||
475 | if (pending & (ATMEL_US_RXBRK | ATMEL_US_OVRE | | ||
476 | ATMEL_US_FRAME | ATMEL_US_PARE)) | ||
477 | atmel_pdc_rxerr(port, pending); | ||
478 | } | ||
479 | |||
480 | /* Interrupt receive */ | ||
481 | if (pending & ATMEL_US_RXRDY) | ||
482 | atmel_rx_chars(port); | ||
483 | else if (pending & ATMEL_US_RXBRK) { | ||
484 | /* | ||
485 | * End of break detected. If it came along with a | ||
486 | * character, atmel_rx_chars will handle it. | ||
487 | */ | ||
488 | UART_PUT_CR(port, ATMEL_US_RSTSTA); | ||
489 | UART_PUT_IDR(port, ATMEL_US_RXBRK); | ||
490 | atmel_port->break_active = 0; | ||
491 | } | ||
492 | } | ||
493 | |||
494 | /* | ||
495 | * transmit interrupt handler. (Transmit is IRQF_NODELAY safe) | ||
496 | */ | ||
497 | static void | ||
498 | atmel_handle_transmit(struct uart_port *port, unsigned int pending) | ||
499 | { | ||
500 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
501 | |||
502 | if (atmel_use_dma_tx(port)) { | ||
503 | /* PDC transmit */ | ||
504 | if (pending & (ATMEL_US_ENDTX | ATMEL_US_TXBUFE)) { | ||
505 | UART_PUT_IDR(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE); | ||
506 | tasklet_schedule(&atmel_port->tasklet); | ||
507 | } | ||
508 | } else { | ||
509 | /* Interrupt transmit */ | ||
510 | if (pending & ATMEL_US_TXRDY) { | ||
511 | UART_PUT_IDR(port, ATMEL_US_TXRDY); | ||
512 | tasklet_schedule(&atmel_port->tasklet); | ||
513 | } | ||
514 | } | ||
515 | } | ||
516 | |||
517 | /* | ||
518 | * status flags interrupt handler. | ||
519 | */ | ||
520 | static void | ||
521 | atmel_handle_status(struct uart_port *port, unsigned int pending, | ||
522 | unsigned int status) | ||
523 | { | ||
524 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
525 | |||
526 | if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC | ||
527 | | ATMEL_US_CTSIC)) { | ||
528 | atmel_port->irq_status = status; | ||
529 | tasklet_schedule(&atmel_port->tasklet); | ||
530 | } | ||
349 | } | 531 | } |
350 | 532 | ||
351 | /* | 533 | /* |
@@ -354,47 +536,255 @@ static void atmel_tx_chars(struct uart_port *port) | |||
354 | static irqreturn_t atmel_interrupt(int irq, void *dev_id) | 536 | static irqreturn_t atmel_interrupt(int irq, void *dev_id) |
355 | { | 537 | { |
356 | struct uart_port *port = dev_id; | 538 | struct uart_port *port = dev_id; |
357 | struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port; | ||
358 | unsigned int status, pending, pass_counter = 0; | 539 | unsigned int status, pending, pass_counter = 0; |
359 | 540 | ||
360 | status = UART_GET_CSR(port); | 541 | do { |
361 | pending = status & UART_GET_IMR(port); | 542 | status = UART_GET_CSR(port); |
362 | while (pending) { | 543 | pending = status & UART_GET_IMR(port); |
363 | /* Interrupt receive */ | 544 | if (!pending) |
364 | if (pending & ATMEL_US_RXRDY) | 545 | break; |
365 | atmel_rx_chars(port); | 546 | |
366 | else if (pending & ATMEL_US_RXBRK) { | 547 | atmel_handle_receive(port, pending); |
548 | atmel_handle_status(port, pending, status); | ||
549 | atmel_handle_transmit(port, pending); | ||
550 | } while (pass_counter++ < ATMEL_ISR_PASS_LIMIT); | ||
551 | |||
552 | return IRQ_HANDLED; | ||
553 | } | ||
554 | |||
555 | /* | ||
556 | * Called from tasklet with ENDTX and TXBUFE interrupts disabled. | ||
557 | */ | ||
558 | static void atmel_tx_dma(struct uart_port *port) | ||
559 | { | ||
560 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
561 | struct circ_buf *xmit = &port->info->xmit; | ||
562 | struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; | ||
563 | int count; | ||
564 | |||
565 | xmit->tail += pdc->ofs; | ||
566 | xmit->tail &= UART_XMIT_SIZE - 1; | ||
567 | |||
568 | port->icount.tx += pdc->ofs; | ||
569 | pdc->ofs = 0; | ||
570 | |||
571 | if (!uart_circ_empty(xmit)) { | ||
572 | /* more to transmit - setup next transfer */ | ||
573 | |||
574 | /* disable PDC transmit */ | ||
575 | UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS); | ||
576 | dma_sync_single_for_device(port->dev, | ||
577 | pdc->dma_addr, | ||
578 | pdc->dma_size, | ||
579 | DMA_TO_DEVICE); | ||
580 | |||
581 | count = CIRC_CNT_TO_END(xmit->head, xmit->tail, UART_XMIT_SIZE); | ||
582 | pdc->ofs = count; | ||
583 | |||
584 | UART_PUT_TPR(port, pdc->dma_addr + xmit->tail); | ||
585 | UART_PUT_TCR(port, count); | ||
586 | /* re-enable PDC transmit and interrupts */ | ||
587 | UART_PUT_PTCR(port, ATMEL_PDC_TXTEN); | ||
588 | UART_PUT_IER(port, ATMEL_US_ENDTX | ATMEL_US_TXBUFE); | ||
589 | } else { | ||
590 | /* nothing left to transmit - disable the transmitter */ | ||
591 | |||
592 | /* disable PDC transmit */ | ||
593 | UART_PUT_PTCR(port, ATMEL_PDC_TXTDIS); | ||
594 | } | ||
595 | |||
596 | if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) | ||
597 | uart_write_wakeup(port); | ||
598 | } | ||
599 | |||
600 | static void atmel_rx_from_ring(struct uart_port *port) | ||
601 | { | ||
602 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
603 | struct circ_buf *ring = &atmel_port->rx_ring; | ||
604 | unsigned int flg; | ||
605 | unsigned int status; | ||
606 | |||
607 | while (ring->head != ring->tail) { | ||
608 | struct atmel_uart_char c; | ||
609 | |||
610 | /* Make sure c is loaded after head. */ | ||
611 | smp_rmb(); | ||
612 | |||
613 | c = ((struct atmel_uart_char *)ring->buf)[ring->tail]; | ||
614 | |||
615 | ring->tail = (ring->tail + 1) & (ATMEL_SERIAL_RINGSIZE - 1); | ||
616 | |||
617 | port->icount.rx++; | ||
618 | status = c.status; | ||
619 | flg = TTY_NORMAL; | ||
620 | |||
621 | /* | ||
622 | * note that the error handling code is | ||
623 | * out of the main execution path | ||
624 | */ | ||
625 | if (unlikely(status & (ATMEL_US_PARE | ATMEL_US_FRAME | ||
626 | | ATMEL_US_OVRE | ATMEL_US_RXBRK))) { | ||
627 | if (status & ATMEL_US_RXBRK) { | ||
628 | /* ignore side-effect */ | ||
629 | status &= ~(ATMEL_US_PARE | ATMEL_US_FRAME); | ||
630 | |||
631 | port->icount.brk++; | ||
632 | if (uart_handle_break(port)) | ||
633 | continue; | ||
634 | } | ||
635 | if (status & ATMEL_US_PARE) | ||
636 | port->icount.parity++; | ||
637 | if (status & ATMEL_US_FRAME) | ||
638 | port->icount.frame++; | ||
639 | if (status & ATMEL_US_OVRE) | ||
640 | port->icount.overrun++; | ||
641 | |||
642 | status &= port->read_status_mask; | ||
643 | |||
644 | if (status & ATMEL_US_RXBRK) | ||
645 | flg = TTY_BREAK; | ||
646 | else if (status & ATMEL_US_PARE) | ||
647 | flg = TTY_PARITY; | ||
648 | else if (status & ATMEL_US_FRAME) | ||
649 | flg = TTY_FRAME; | ||
650 | } | ||
651 | |||
652 | |||
653 | if (uart_handle_sysrq_char(port, c.ch)) | ||
654 | continue; | ||
655 | |||
656 | uart_insert_char(port, status, ATMEL_US_OVRE, c.ch, flg); | ||
657 | } | ||
658 | |||
659 | /* | ||
660 | * Drop the lock here since it might end up calling | ||
661 | * uart_start(), which takes the lock. | ||
662 | */ | ||
663 | spin_unlock(&port->lock); | ||
664 | tty_flip_buffer_push(port->info->tty); | ||
665 | spin_lock(&port->lock); | ||
666 | } | ||
667 | |||
668 | static void atmel_rx_from_dma(struct uart_port *port) | ||
669 | { | ||
670 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
671 | struct tty_struct *tty = port->info->tty; | ||
672 | struct atmel_dma_buffer *pdc; | ||
673 | int rx_idx = atmel_port->pdc_rx_idx; | ||
674 | unsigned int head; | ||
675 | unsigned int tail; | ||
676 | unsigned int count; | ||
677 | |||
678 | do { | ||
679 | /* Reset the UART timeout early so that we don't miss one */ | ||
680 | UART_PUT_CR(port, ATMEL_US_STTTO); | ||
681 | |||
682 | pdc = &atmel_port->pdc_rx[rx_idx]; | ||
683 | head = UART_GET_RPR(port) - pdc->dma_addr; | ||
684 | tail = pdc->ofs; | ||
685 | |||
686 | /* If the PDC has switched buffers, RPR won't contain | ||
687 | * any address within the current buffer. Since head | ||
688 | * is unsigned, we just need a one-way comparison to | ||
689 | * find out. | ||
690 | * | ||
691 | * In this case, we just need to consume the entire | ||
692 | * buffer and resubmit it for DMA. This will clear the | ||
693 | * ENDRX bit as well, so that we can safely re-enable | ||
694 | * all interrupts below. | ||
695 | */ | ||
696 | head = min(head, pdc->dma_size); | ||
697 | |||
698 | if (likely(head != tail)) { | ||
699 | dma_sync_single_for_cpu(port->dev, pdc->dma_addr, | ||
700 | pdc->dma_size, DMA_FROM_DEVICE); | ||
701 | |||
367 | /* | 702 | /* |
368 | * End of break detected. If it came along | 703 | * head will only wrap around when we recycle |
369 | * with a character, atmel_rx_chars will | 704 | * the DMA buffer, and when that happens, we |
370 | * handle it. | 705 | * explicitly set tail to 0. So head will |
706 | * always be greater than tail. | ||
371 | */ | 707 | */ |
372 | UART_PUT_CR(port, ATMEL_US_RSTSTA); | 708 | count = head - tail; |
373 | UART_PUT_IDR(port, ATMEL_US_RXBRK); | 709 | |
374 | atmel_port->break_active = 0; | 710 | tty_insert_flip_string(tty, pdc->buf + pdc->ofs, count); |
711 | |||
712 | dma_sync_single_for_device(port->dev, pdc->dma_addr, | ||
713 | pdc->dma_size, DMA_FROM_DEVICE); | ||
714 | |||
715 | port->icount.rx += count; | ||
716 | pdc->ofs = head; | ||
375 | } | 717 | } |
376 | 718 | ||
377 | // TODO: All reads to CSR will clear these interrupts! | 719 | /* |
378 | if (pending & ATMEL_US_RIIC) port->icount.rng++; | 720 | * If the current buffer is full, we need to check if |
379 | if (pending & ATMEL_US_DSRIC) port->icount.dsr++; | 721 | * the next one contains any additional data. |
380 | if (pending & ATMEL_US_DCDIC) | 722 | */ |
723 | if (head >= pdc->dma_size) { | ||
724 | pdc->ofs = 0; | ||
725 | UART_PUT_RNPR(port, pdc->dma_addr); | ||
726 | UART_PUT_RNCR(port, pdc->dma_size); | ||
727 | |||
728 | rx_idx = !rx_idx; | ||
729 | atmel_port->pdc_rx_idx = rx_idx; | ||
730 | } | ||
731 | } while (head >= pdc->dma_size); | ||
732 | |||
733 | /* | ||
734 | * Drop the lock here since it might end up calling | ||
735 | * uart_start(), which takes the lock. | ||
736 | */ | ||
737 | spin_unlock(&port->lock); | ||
738 | tty_flip_buffer_push(tty); | ||
739 | spin_lock(&port->lock); | ||
740 | |||
741 | UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); | ||
742 | } | ||
743 | |||
744 | /* | ||
745 | * tasklet handling tty stuff outside the interrupt handler. | ||
746 | */ | ||
747 | static void atmel_tasklet_func(unsigned long data) | ||
748 | { | ||
749 | struct uart_port *port = (struct uart_port *)data; | ||
750 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
751 | unsigned int status; | ||
752 | unsigned int status_change; | ||
753 | |||
754 | /* The interrupt handler does not take the lock */ | ||
755 | spin_lock(&port->lock); | ||
756 | |||
757 | if (atmel_use_dma_tx(port)) | ||
758 | atmel_tx_dma(port); | ||
759 | else | ||
760 | atmel_tx_chars(port); | ||
761 | |||
762 | status = atmel_port->irq_status; | ||
763 | status_change = status ^ atmel_port->irq_status_prev; | ||
764 | |||
765 | if (status_change & (ATMEL_US_RI | ATMEL_US_DSR | ||
766 | | ATMEL_US_DCD | ATMEL_US_CTS)) { | ||
767 | /* TODO: All reads to CSR will clear these interrupts! */ | ||
768 | if (status_change & ATMEL_US_RI) | ||
769 | port->icount.rng++; | ||
770 | if (status_change & ATMEL_US_DSR) | ||
771 | port->icount.dsr++; | ||
772 | if (status_change & ATMEL_US_DCD) | ||
381 | uart_handle_dcd_change(port, !(status & ATMEL_US_DCD)); | 773 | uart_handle_dcd_change(port, !(status & ATMEL_US_DCD)); |
382 | if (pending & ATMEL_US_CTSIC) | 774 | if (status_change & ATMEL_US_CTS) |
383 | uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); | 775 | uart_handle_cts_change(port, !(status & ATMEL_US_CTS)); |
384 | if (pending & (ATMEL_US_RIIC | ATMEL_US_DSRIC | ATMEL_US_DCDIC | ATMEL_US_CTSIC)) | ||
385 | wake_up_interruptible(&port->info->delta_msr_wait); | ||
386 | 776 | ||
387 | /* Interrupt transmit */ | 777 | wake_up_interruptible(&port->info->delta_msr_wait); |
388 | if (pending & ATMEL_US_TXRDY) | ||
389 | atmel_tx_chars(port); | ||
390 | 778 | ||
391 | if (pass_counter++ > ATMEL_ISR_PASS_LIMIT) | 779 | atmel_port->irq_status_prev = status; |
392 | break; | ||
393 | |||
394 | status = UART_GET_CSR(port); | ||
395 | pending = status & UART_GET_IMR(port); | ||
396 | } | 780 | } |
397 | return IRQ_HANDLED; | 781 | |
782 | if (atmel_use_dma_rx(port)) | ||
783 | atmel_rx_from_dma(port); | ||
784 | else | ||
785 | atmel_rx_from_ring(port); | ||
786 | |||
787 | spin_unlock(&port->lock); | ||
398 | } | 788 | } |
399 | 789 | ||
400 | /* | 790 | /* |
@@ -402,6 +792,8 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id) | |||
402 | */ | 792 | */ |
403 | static int atmel_startup(struct uart_port *port) | 793 | static int atmel_startup(struct uart_port *port) |
404 | { | 794 | { |
795 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
796 | struct tty_struct *tty = port->info->tty; | ||
405 | int retval; | 797 | int retval; |
406 | 798 | ||
407 | /* | 799 | /* |
@@ -414,13 +806,64 @@ static int atmel_startup(struct uart_port *port) | |||
414 | /* | 806 | /* |
415 | * Allocate the IRQ | 807 | * Allocate the IRQ |
416 | */ | 808 | */ |
417 | retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED, "atmel_serial", port); | 809 | retval = request_irq(port->irq, atmel_interrupt, IRQF_SHARED, |
810 | tty ? tty->name : "atmel_serial", port); | ||
418 | if (retval) { | 811 | if (retval) { |
419 | printk("atmel_serial: atmel_startup - Can't get irq\n"); | 812 | printk("atmel_serial: atmel_startup - Can't get irq\n"); |
420 | return retval; | 813 | return retval; |
421 | } | 814 | } |
422 | 815 | ||
423 | /* | 816 | /* |
817 | * Initialize DMA (if necessary) | ||
818 | */ | ||
819 | if (atmel_use_dma_rx(port)) { | ||
820 | int i; | ||
821 | |||
822 | for (i = 0; i < 2; i++) { | ||
823 | struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; | ||
824 | |||
825 | pdc->buf = kmalloc(PDC_BUFFER_SIZE, GFP_KERNEL); | ||
826 | if (pdc->buf == NULL) { | ||
827 | if (i != 0) { | ||
828 | dma_unmap_single(port->dev, | ||
829 | atmel_port->pdc_rx[0].dma_addr, | ||
830 | PDC_BUFFER_SIZE, | ||
831 | DMA_FROM_DEVICE); | ||
832 | kfree(atmel_port->pdc_rx[0].buf); | ||
833 | } | ||
834 | free_irq(port->irq, port); | ||
835 | return -ENOMEM; | ||
836 | } | ||
837 | pdc->dma_addr = dma_map_single(port->dev, | ||
838 | pdc->buf, | ||
839 | PDC_BUFFER_SIZE, | ||
840 | DMA_FROM_DEVICE); | ||
841 | pdc->dma_size = PDC_BUFFER_SIZE; | ||
842 | pdc->ofs = 0; | ||
843 | } | ||
844 | |||
845 | atmel_port->pdc_rx_idx = 0; | ||
846 | |||
847 | UART_PUT_RPR(port, atmel_port->pdc_rx[0].dma_addr); | ||
848 | UART_PUT_RCR(port, PDC_BUFFER_SIZE); | ||
849 | |||
850 | UART_PUT_RNPR(port, atmel_port->pdc_rx[1].dma_addr); | ||
851 | UART_PUT_RNCR(port, PDC_BUFFER_SIZE); | ||
852 | } | ||
853 | if (atmel_use_dma_tx(port)) { | ||
854 | struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; | ||
855 | struct circ_buf *xmit = &port->info->xmit; | ||
856 | |||
857 | pdc->buf = xmit->buf; | ||
858 | pdc->dma_addr = dma_map_single(port->dev, | ||
859 | pdc->buf, | ||
860 | UART_XMIT_SIZE, | ||
861 | DMA_TO_DEVICE); | ||
862 | pdc->dma_size = UART_XMIT_SIZE; | ||
863 | pdc->ofs = 0; | ||
864 | } | ||
865 | |||
866 | /* | ||
424 | * If there is a specific "open" function (to register | 867 | * If there is a specific "open" function (to register |
425 | * control line interrupts) | 868 | * control line interrupts) |
426 | */ | 869 | */ |
@@ -436,9 +879,21 @@ static int atmel_startup(struct uart_port *port) | |||
436 | * Finally, enable the serial port | 879 | * Finally, enable the serial port |
437 | */ | 880 | */ |
438 | UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); | 881 | UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); |
439 | UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN); /* enable xmit & rcvr */ | 882 | /* enable xmit & rcvr */ |
883 | UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN); | ||
440 | 884 | ||
441 | UART_PUT_IER(port, ATMEL_US_RXRDY); /* enable receive only */ | 885 | if (atmel_use_dma_rx(port)) { |
886 | /* set UART timeout */ | ||
887 | UART_PUT_RTOR(port, PDC_RX_TIMEOUT); | ||
888 | UART_PUT_CR(port, ATMEL_US_STTTO); | ||
889 | |||
890 | UART_PUT_IER(port, ATMEL_US_ENDRX | ATMEL_US_TIMEOUT); | ||
891 | /* enable PDC controller */ | ||
892 | UART_PUT_PTCR(port, ATMEL_PDC_RXTEN); | ||
893 | } else { | ||
894 | /* enable receive only */ | ||
895 | UART_PUT_IER(port, ATMEL_US_RXRDY); | ||
896 | } | ||
442 | 897 | ||
443 | return 0; | 898 | return 0; |
444 | } | 899 | } |
@@ -448,6 +903,38 @@ static int atmel_startup(struct uart_port *port) | |||
448 | */ | 903 | */ |
449 | static void atmel_shutdown(struct uart_port *port) | 904 | static void atmel_shutdown(struct uart_port *port) |
450 | { | 905 | { |
906 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); | ||
907 | /* | ||
908 | * Ensure everything is stopped. | ||
909 | */ | ||
910 | atmel_stop_rx(port); | ||
911 | atmel_stop_tx(port); | ||
912 | |||
913 | /* | ||
914 | * Shut-down the DMA. | ||
915 | */ | ||
916 | if (atmel_use_dma_rx(port)) { | ||
917 | int i; | ||
918 | |||
919 | for (i = 0; i < 2; i++) { | ||
920 | struct atmel_dma_buffer *pdc = &atmel_port->pdc_rx[i]; | ||
921 | |||
922 | dma_unmap_single(port->dev, | ||
923 | pdc->dma_addr, | ||
924 | pdc->dma_size, | ||
925 | DMA_FROM_DEVICE); | ||
926 | kfree(pdc->buf); | ||
927 | } | ||
928 | } | ||
929 | if (atmel_use_dma_tx(port)) { | ||
930 | struct atmel_dma_buffer *pdc = &atmel_port->pdc_tx; | ||
931 | |||
932 | dma_unmap_single(port->dev, | ||
933 | pdc->dma_addr, | ||
934 | pdc->dma_size, | ||
935 | DMA_TO_DEVICE); | ||
936 | } | ||
937 | |||
451 | /* | 938 | /* |
452 | * Disable all interrupts, port and break condition. | 939 | * Disable all interrupts, port and break condition. |
453 | */ | 940 | */ |
@@ -470,45 +957,48 @@ static void atmel_shutdown(struct uart_port *port) | |||
470 | /* | 957 | /* |
471 | * Power / Clock management. | 958 | * Power / Clock management. |
472 | */ | 959 | */ |
473 | static void atmel_serial_pm(struct uart_port *port, unsigned int state, unsigned int oldstate) | 960 | static void atmel_serial_pm(struct uart_port *port, unsigned int state, |
961 | unsigned int oldstate) | ||
474 | { | 962 | { |
475 | struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port; | 963 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); |
476 | 964 | ||
477 | switch (state) { | 965 | switch (state) { |
478 | case 0: | 966 | case 0: |
479 | /* | 967 | /* |
480 | * Enable the peripheral clock for this serial port. | 968 | * Enable the peripheral clock for this serial port. |
481 | * This is called on uart_open() or a resume event. | 969 | * This is called on uart_open() or a resume event. |
482 | */ | 970 | */ |
483 | clk_enable(atmel_port->clk); | 971 | clk_enable(atmel_port->clk); |
484 | break; | 972 | break; |
485 | case 3: | 973 | case 3: |
486 | /* | 974 | /* |
487 | * Disable the peripheral clock for this serial port. | 975 | * Disable the peripheral clock for this serial port. |
488 | * This is called on uart_close() or a suspend event. | 976 | * This is called on uart_close() or a suspend event. |
489 | */ | 977 | */ |
490 | clk_disable(atmel_port->clk); | 978 | clk_disable(atmel_port->clk); |
491 | break; | 979 | break; |
492 | default: | 980 | default: |
493 | printk(KERN_ERR "atmel_serial: unknown pm %d\n", state); | 981 | printk(KERN_ERR "atmel_serial: unknown pm %d\n", state); |
494 | } | 982 | } |
495 | } | 983 | } |
496 | 984 | ||
497 | /* | 985 | /* |
498 | * Change the port parameters | 986 | * Change the port parameters |
499 | */ | 987 | */ |
500 | static void atmel_set_termios(struct uart_port *port, struct ktermios * termios, struct ktermios * old) | 988 | static void atmel_set_termios(struct uart_port *port, struct ktermios *termios, |
989 | struct ktermios *old) | ||
501 | { | 990 | { |
502 | unsigned long flags; | 991 | unsigned long flags; |
503 | unsigned int mode, imr, quot, baud; | 992 | unsigned int mode, imr, quot, baud; |
504 | 993 | ||
505 | /* Get current mode register */ | 994 | /* Get current mode register */ |
506 | mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL | ATMEL_US_NBSTOP | ATMEL_US_PAR); | 995 | mode = UART_GET_MR(port) & ~(ATMEL_US_USCLKS | ATMEL_US_CHRL |
996 | | ATMEL_US_NBSTOP | ATMEL_US_PAR); | ||
507 | 997 | ||
508 | baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); | 998 | baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); |
509 | quot = uart_get_divisor(port, baud); | 999 | quot = uart_get_divisor(port, baud); |
510 | 1000 | ||
511 | if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */ | 1001 | if (quot > 65535) { /* BRGR is 16-bit, so switch to slower clock */ |
512 | quot /= 8; | 1002 | quot /= 8; |
513 | mode |= ATMEL_US_USCLKS_MCK_DIV8; | 1003 | mode |= ATMEL_US_USCLKS_MCK_DIV8; |
514 | } | 1004 | } |
@@ -535,18 +1025,17 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios * termios, | |||
535 | 1025 | ||
536 | /* parity */ | 1026 | /* parity */ |
537 | if (termios->c_cflag & PARENB) { | 1027 | if (termios->c_cflag & PARENB) { |
538 | if (termios->c_cflag & CMSPAR) { /* Mark or Space parity */ | 1028 | /* Mark or Space parity */ |
1029 | if (termios->c_cflag & CMSPAR) { | ||
539 | if (termios->c_cflag & PARODD) | 1030 | if (termios->c_cflag & PARODD) |
540 | mode |= ATMEL_US_PAR_MARK; | 1031 | mode |= ATMEL_US_PAR_MARK; |
541 | else | 1032 | else |
542 | mode |= ATMEL_US_PAR_SPACE; | 1033 | mode |= ATMEL_US_PAR_SPACE; |
543 | } | 1034 | } else if (termios->c_cflag & PARODD) |
544 | else if (termios->c_cflag & PARODD) | ||
545 | mode |= ATMEL_US_PAR_ODD; | 1035 | mode |= ATMEL_US_PAR_ODD; |
546 | else | 1036 | else |
547 | mode |= ATMEL_US_PAR_EVEN; | 1037 | mode |= ATMEL_US_PAR_EVEN; |
548 | } | 1038 | } else |
549 | else | ||
550 | mode |= ATMEL_US_PAR_NONE; | 1039 | mode |= ATMEL_US_PAR_NONE; |
551 | 1040 | ||
552 | spin_lock_irqsave(&port->lock, flags); | 1041 | spin_lock_irqsave(&port->lock, flags); |
@@ -557,6 +1046,10 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios * termios, | |||
557 | if (termios->c_iflag & (BRKINT | PARMRK)) | 1046 | if (termios->c_iflag & (BRKINT | PARMRK)) |
558 | port->read_status_mask |= ATMEL_US_RXBRK; | 1047 | port->read_status_mask |= ATMEL_US_RXBRK; |
559 | 1048 | ||
1049 | if (atmel_use_dma_rx(port)) | ||
1050 | /* need to enable error interrupts */ | ||
1051 | UART_PUT_IER(port, port->read_status_mask); | ||
1052 | |||
560 | /* | 1053 | /* |
561 | * Characters to ignore | 1054 | * Characters to ignore |
562 | */ | 1055 | */ |
@@ -572,16 +1065,16 @@ static void atmel_set_termios(struct uart_port *port, struct ktermios * termios, | |||
572 | if (termios->c_iflag & IGNPAR) | 1065 | if (termios->c_iflag & IGNPAR) |
573 | port->ignore_status_mask |= ATMEL_US_OVRE; | 1066 | port->ignore_status_mask |= ATMEL_US_OVRE; |
574 | } | 1067 | } |
575 | 1068 | /* TODO: Ignore all characters if CREAD is set.*/ | |
576 | // TODO: Ignore all characters if CREAD is set. | ||
577 | 1069 | ||
578 | /* update the per-port timeout */ | 1070 | /* update the per-port timeout */ |
579 | uart_update_timeout(port, termios->c_cflag, baud); | 1071 | uart_update_timeout(port, termios->c_cflag, baud); |
580 | 1072 | ||
581 | /* disable interrupts and drain transmitter */ | 1073 | /* save/disable interrupts and drain transmitter */ |
582 | imr = UART_GET_IMR(port); /* get interrupt mask */ | 1074 | imr = UART_GET_IMR(port); |
583 | UART_PUT_IDR(port, -1); /* disable all interrupts */ | 1075 | UART_PUT_IDR(port, -1); |
584 | while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY)) { barrier(); } | 1076 | while (!(UART_GET_CSR(port) & ATMEL_US_TXEMPTY)) |
1077 | cpu_relax(); | ||
585 | 1078 | ||
586 | /* disable receiver and transmitter */ | 1079 | /* disable receiver and transmitter */ |
587 | UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS); | 1080 | UART_PUT_CR(port, ATMEL_US_TXDIS | ATMEL_US_RXDIS); |
@@ -707,7 +1200,8 @@ static struct uart_ops atmel_pops = { | |||
707 | /* | 1200 | /* |
708 | * Configure the port from the platform device resource info. | 1201 | * Configure the port from the platform device resource info. |
709 | */ | 1202 | */ |
710 | static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port, struct platform_device *pdev) | 1203 | static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port, |
1204 | struct platform_device *pdev) | ||
711 | { | 1205 | { |
712 | struct uart_port *port = &atmel_port->uart; | 1206 | struct uart_port *port = &atmel_port->uart; |
713 | struct atmel_uart_data *data = pdev->dev.platform_data; | 1207 | struct atmel_uart_data *data = pdev->dev.platform_data; |
@@ -722,6 +1216,11 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port, struct | |||
722 | port->mapbase = pdev->resource[0].start; | 1216 | port->mapbase = pdev->resource[0].start; |
723 | port->irq = pdev->resource[1].start; | 1217 | port->irq = pdev->resource[1].start; |
724 | 1218 | ||
1219 | tasklet_init(&atmel_port->tasklet, atmel_tasklet_func, | ||
1220 | (unsigned long)port); | ||
1221 | |||
1222 | memset(&atmel_port->rx_ring, 0, sizeof(atmel_port->rx_ring)); | ||
1223 | |||
725 | if (data->regs) | 1224 | if (data->regs) |
726 | /* Already mapped by setup code */ | 1225 | /* Already mapped by setup code */ |
727 | port->membase = data->regs; | 1226 | port->membase = data->regs; |
@@ -730,11 +1229,17 @@ static void __devinit atmel_init_port(struct atmel_uart_port *atmel_port, struct | |||
730 | port->membase = NULL; | 1229 | port->membase = NULL; |
731 | } | 1230 | } |
732 | 1231 | ||
733 | if (!atmel_port->clk) { /* for console, the clock could already be configured */ | 1232 | /* for console, the clock could already be configured */ |
1233 | if (!atmel_port->clk) { | ||
734 | atmel_port->clk = clk_get(&pdev->dev, "usart"); | 1234 | atmel_port->clk = clk_get(&pdev->dev, "usart"); |
735 | clk_enable(atmel_port->clk); | 1235 | clk_enable(atmel_port->clk); |
736 | port->uartclk = clk_get_rate(atmel_port->clk); | 1236 | port->uartclk = clk_get_rate(atmel_port->clk); |
737 | } | 1237 | } |
1238 | |||
1239 | atmel_port->use_dma_rx = data->use_dma_rx; | ||
1240 | atmel_port->use_dma_tx = data->use_dma_tx; | ||
1241 | if (atmel_use_dma_tx(port)) | ||
1242 | port->fifosize = PDC_BUFFER_SIZE; | ||
738 | } | 1243 | } |
739 | 1244 | ||
740 | /* | 1245 | /* |
@@ -754,12 +1259,11 @@ void __init atmel_register_uart_fns(struct atmel_port_fns *fns) | |||
754 | atmel_pops.set_wake = fns->set_wake; | 1259 | atmel_pops.set_wake = fns->set_wake; |
755 | } | 1260 | } |
756 | 1261 | ||
757 | |||
758 | #ifdef CONFIG_SERIAL_ATMEL_CONSOLE | 1262 | #ifdef CONFIG_SERIAL_ATMEL_CONSOLE |
759 | static void atmel_console_putchar(struct uart_port *port, int ch) | 1263 | static void atmel_console_putchar(struct uart_port *port, int ch) |
760 | { | 1264 | { |
761 | while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY)) | 1265 | while (!(UART_GET_CSR(port) & ATMEL_US_TXRDY)) |
762 | barrier(); | 1266 | cpu_relax(); |
763 | UART_PUT_CHAR(port, ch); | 1267 | UART_PUT_CHAR(port, ch); |
764 | } | 1268 | } |
765 | 1269 | ||
@@ -772,38 +1276,40 @@ static void atmel_console_write(struct console *co, const char *s, u_int count) | |||
772 | unsigned int status, imr; | 1276 | unsigned int status, imr; |
773 | 1277 | ||
774 | /* | 1278 | /* |
775 | * First, save IMR and then disable interrupts | 1279 | * First, save IMR and then disable interrupts |
776 | */ | 1280 | */ |
777 | imr = UART_GET_IMR(port); /* get interrupt mask */ | 1281 | imr = UART_GET_IMR(port); |
778 | UART_PUT_IDR(port, ATMEL_US_RXRDY | ATMEL_US_TXRDY); | 1282 | UART_PUT_IDR(port, ATMEL_US_RXRDY | ATMEL_US_TXRDY); |
779 | 1283 | ||
780 | uart_console_write(port, s, count, atmel_console_putchar); | 1284 | uart_console_write(port, s, count, atmel_console_putchar); |
781 | 1285 | ||
782 | /* | 1286 | /* |
783 | * Finally, wait for transmitter to become empty | 1287 | * Finally, wait for transmitter to become empty |
784 | * and restore IMR | 1288 | * and restore IMR |
785 | */ | 1289 | */ |
786 | do { | 1290 | do { |
787 | status = UART_GET_CSR(port); | 1291 | status = UART_GET_CSR(port); |
788 | } while (!(status & ATMEL_US_TXRDY)); | 1292 | } while (!(status & ATMEL_US_TXRDY)); |
789 | UART_PUT_IER(port, imr); /* set interrupts back the way they were */ | 1293 | /* set interrupts back the way they were */ |
1294 | UART_PUT_IER(port, imr); | ||
790 | } | 1295 | } |
791 | 1296 | ||
792 | /* | 1297 | /* |
793 | * If the port was already initialised (eg, by a boot loader), try to determine | 1298 | * If the port was already initialised (eg, by a boot loader), |
794 | * the current setup. | 1299 | * try to determine the current setup. |
795 | */ | 1300 | */ |
796 | static void __init atmel_console_get_options(struct uart_port *port, int *baud, int *parity, int *bits) | 1301 | static void __init atmel_console_get_options(struct uart_port *port, int *baud, |
1302 | int *parity, int *bits) | ||
797 | { | 1303 | { |
798 | unsigned int mr, quot; | 1304 | unsigned int mr, quot; |
799 | 1305 | ||
800 | // TODO: CR is a write-only register | 1306 | /* |
801 | // unsigned int cr; | 1307 | * If the baud rate generator isn't running, the port wasn't |
802 | // | 1308 | * initialized by the boot loader. |
803 | // cr = UART_GET_CR(port) & (ATMEL_US_RXEN | ATMEL_US_TXEN); | 1309 | */ |
804 | // if (cr == (ATMEL_US_RXEN | ATMEL_US_TXEN)) { | 1310 | quot = UART_GET_BRGR(port); |
805 | // /* ok, the port was enabled */ | 1311 | if (!quot) |
806 | // } | 1312 | return; |
807 | 1313 | ||
808 | mr = UART_GET_MR(port) & ATMEL_US_CHRL; | 1314 | mr = UART_GET_MR(port) & ATMEL_US_CHRL; |
809 | if (mr == ATMEL_US_CHRL_8) | 1315 | if (mr == ATMEL_US_CHRL_8) |
@@ -823,7 +1329,6 @@ static void __init atmel_console_get_options(struct uart_port *port, int *baud, | |||
823 | * lower than one of those, as it would make us fall through | 1329 | * lower than one of those, as it would make us fall through |
824 | * to a much lower baud rate than we really want. | 1330 | * to a much lower baud rate than we really want. |
825 | */ | 1331 | */ |
826 | quot = UART_GET_BRGR(port); | ||
827 | *baud = port->uartclk / (16 * (quot - 1)); | 1332 | *baud = port->uartclk / (16 * (quot - 1)); |
828 | } | 1333 | } |
829 | 1334 | ||
@@ -835,10 +1340,12 @@ static int __init atmel_console_setup(struct console *co, char *options) | |||
835 | int parity = 'n'; | 1340 | int parity = 'n'; |
836 | int flow = 'n'; | 1341 | int flow = 'n'; |
837 | 1342 | ||
838 | if (port->membase == 0) /* Port not initialized yet - delay setup */ | 1343 | if (port->membase == NULL) { |
1344 | /* Port not initialized yet - delay setup */ | ||
839 | return -ENODEV; | 1345 | return -ENODEV; |
1346 | } | ||
840 | 1347 | ||
841 | UART_PUT_IDR(port, -1); /* disable interrupts */ | 1348 | UART_PUT_IDR(port, -1); |
842 | UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); | 1349 | UART_PUT_CR(port, ATMEL_US_RSTSTA | ATMEL_US_RSTRX); |
843 | UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN); | 1350 | UART_PUT_CR(port, ATMEL_US_TXEN | ATMEL_US_RXEN); |
844 | 1351 | ||
@@ -870,13 +1377,16 @@ static struct console atmel_console = { | |||
870 | static int __init atmel_console_init(void) | 1377 | static int __init atmel_console_init(void) |
871 | { | 1378 | { |
872 | if (atmel_default_console_device) { | 1379 | if (atmel_default_console_device) { |
873 | add_preferred_console(ATMEL_DEVICENAME, atmel_default_console_device->id, NULL); | 1380 | add_preferred_console(ATMEL_DEVICENAME, |
874 | atmel_init_port(&(atmel_ports[atmel_default_console_device->id]), atmel_default_console_device); | 1381 | atmel_default_console_device->id, NULL); |
1382 | atmel_init_port(&atmel_ports[atmel_default_console_device->id], | ||
1383 | atmel_default_console_device); | ||
875 | register_console(&atmel_console); | 1384 | register_console(&atmel_console); |
876 | } | 1385 | } |
877 | 1386 | ||
878 | return 0; | 1387 | return 0; |
879 | } | 1388 | } |
1389 | |||
880 | console_initcall(atmel_console_init); | 1390 | console_initcall(atmel_console_init); |
881 | 1391 | ||
882 | /* | 1392 | /* |
@@ -884,34 +1394,48 @@ console_initcall(atmel_console_init); | |||
884 | */ | 1394 | */ |
885 | static int __init atmel_late_console_init(void) | 1395 | static int __init atmel_late_console_init(void) |
886 | { | 1396 | { |
887 | if (atmel_default_console_device && !(atmel_console.flags & CON_ENABLED)) | 1397 | if (atmel_default_console_device |
1398 | && !(atmel_console.flags & CON_ENABLED)) | ||
888 | register_console(&atmel_console); | 1399 | register_console(&atmel_console); |
889 | 1400 | ||
890 | return 0; | 1401 | return 0; |
891 | } | 1402 | } |
1403 | |||
892 | core_initcall(atmel_late_console_init); | 1404 | core_initcall(atmel_late_console_init); |
893 | 1405 | ||
1406 | static inline bool atmel_is_console_port(struct uart_port *port) | ||
1407 | { | ||
1408 | return port->cons && port->cons->index == port->line; | ||
1409 | } | ||
1410 | |||
894 | #else | 1411 | #else |
895 | #define ATMEL_CONSOLE_DEVICE NULL | 1412 | #define ATMEL_CONSOLE_DEVICE NULL |
1413 | |||
1414 | static inline bool atmel_is_console_port(struct uart_port *port) | ||
1415 | { | ||
1416 | return false; | ||
1417 | } | ||
896 | #endif | 1418 | #endif |
897 | 1419 | ||
898 | static struct uart_driver atmel_uart = { | 1420 | static struct uart_driver atmel_uart = { |
899 | .owner = THIS_MODULE, | 1421 | .owner = THIS_MODULE, |
900 | .driver_name = "atmel_serial", | 1422 | .driver_name = "atmel_serial", |
901 | .dev_name = ATMEL_DEVICENAME, | 1423 | .dev_name = ATMEL_DEVICENAME, |
902 | .major = SERIAL_ATMEL_MAJOR, | 1424 | .major = SERIAL_ATMEL_MAJOR, |
903 | .minor = MINOR_START, | 1425 | .minor = MINOR_START, |
904 | .nr = ATMEL_MAX_UART, | 1426 | .nr = ATMEL_MAX_UART, |
905 | .cons = ATMEL_CONSOLE_DEVICE, | 1427 | .cons = ATMEL_CONSOLE_DEVICE, |
906 | }; | 1428 | }; |
907 | 1429 | ||
908 | #ifdef CONFIG_PM | 1430 | #ifdef CONFIG_PM |
909 | static int atmel_serial_suspend(struct platform_device *pdev, pm_message_t state) | 1431 | static int atmel_serial_suspend(struct platform_device *pdev, |
1432 | pm_message_t state) | ||
910 | { | 1433 | { |
911 | struct uart_port *port = platform_get_drvdata(pdev); | 1434 | struct uart_port *port = platform_get_drvdata(pdev); |
912 | struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port; | 1435 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); |
913 | 1436 | ||
914 | if (device_may_wakeup(&pdev->dev) && !at91_suspend_entering_slow_clock()) | 1437 | if (device_may_wakeup(&pdev->dev) |
1438 | && !at91_suspend_entering_slow_clock()) | ||
915 | enable_irq_wake(port->irq); | 1439 | enable_irq_wake(port->irq); |
916 | else { | 1440 | else { |
917 | uart_suspend_port(&atmel_uart, port); | 1441 | uart_suspend_port(&atmel_uart, port); |
@@ -924,13 +1448,12 @@ static int atmel_serial_suspend(struct platform_device *pdev, pm_message_t state | |||
924 | static int atmel_serial_resume(struct platform_device *pdev) | 1448 | static int atmel_serial_resume(struct platform_device *pdev) |
925 | { | 1449 | { |
926 | struct uart_port *port = platform_get_drvdata(pdev); | 1450 | struct uart_port *port = platform_get_drvdata(pdev); |
927 | struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port; | 1451 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); |
928 | 1452 | ||
929 | if (atmel_port->suspended) { | 1453 | if (atmel_port->suspended) { |
930 | uart_resume_port(&atmel_uart, port); | 1454 | uart_resume_port(&atmel_uart, port); |
931 | atmel_port->suspended = 0; | 1455 | atmel_port->suspended = 0; |
932 | } | 1456 | } else |
933 | else | ||
934 | disable_irq_wake(port->irq); | 1457 | disable_irq_wake(port->irq); |
935 | 1458 | ||
936 | return 0; | 1459 | return 0; |
@@ -943,15 +1466,40 @@ static int atmel_serial_resume(struct platform_device *pdev) | |||
943 | static int __devinit atmel_serial_probe(struct platform_device *pdev) | 1466 | static int __devinit atmel_serial_probe(struct platform_device *pdev) |
944 | { | 1467 | { |
945 | struct atmel_uart_port *port; | 1468 | struct atmel_uart_port *port; |
1469 | void *data; | ||
946 | int ret; | 1470 | int ret; |
947 | 1471 | ||
1472 | BUILD_BUG_ON(!is_power_of_2(ATMEL_SERIAL_RINGSIZE)); | ||
1473 | |||
948 | port = &atmel_ports[pdev->id]; | 1474 | port = &atmel_ports[pdev->id]; |
949 | atmel_init_port(port, pdev); | 1475 | atmel_init_port(port, pdev); |
950 | 1476 | ||
1477 | if (!atmel_use_dma_rx(&port->uart)) { | ||
1478 | ret = -ENOMEM; | ||
1479 | data = kmalloc(sizeof(struct atmel_uart_char) | ||
1480 | * ATMEL_SERIAL_RINGSIZE, GFP_KERNEL); | ||
1481 | if (!data) | ||
1482 | goto err_alloc_ring; | ||
1483 | port->rx_ring.buf = data; | ||
1484 | } | ||
1485 | |||
951 | ret = uart_add_one_port(&atmel_uart, &port->uart); | 1486 | ret = uart_add_one_port(&atmel_uart, &port->uart); |
952 | if (!ret) { | 1487 | if (ret) |
953 | device_init_wakeup(&pdev->dev, 1); | 1488 | goto err_add_port; |
954 | platform_set_drvdata(pdev, port); | 1489 | |
1490 | device_init_wakeup(&pdev->dev, 1); | ||
1491 | platform_set_drvdata(pdev, port); | ||
1492 | |||
1493 | return 0; | ||
1494 | |||
1495 | err_add_port: | ||
1496 | kfree(port->rx_ring.buf); | ||
1497 | port->rx_ring.buf = NULL; | ||
1498 | err_alloc_ring: | ||
1499 | if (!atmel_is_console_port(&port->uart)) { | ||
1500 | clk_disable(port->clk); | ||
1501 | clk_put(port->clk); | ||
1502 | port->clk = NULL; | ||
955 | } | 1503 | } |
956 | 1504 | ||
957 | return ret; | 1505 | return ret; |
@@ -960,19 +1508,21 @@ static int __devinit atmel_serial_probe(struct platform_device *pdev) | |||
960 | static int __devexit atmel_serial_remove(struct platform_device *pdev) | 1508 | static int __devexit atmel_serial_remove(struct platform_device *pdev) |
961 | { | 1509 | { |
962 | struct uart_port *port = platform_get_drvdata(pdev); | 1510 | struct uart_port *port = platform_get_drvdata(pdev); |
963 | struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port; | 1511 | struct atmel_uart_port *atmel_port = to_atmel_uart_port(port); |
964 | int ret = 0; | 1512 | int ret = 0; |
965 | 1513 | ||
966 | clk_disable(atmel_port->clk); | ||
967 | clk_put(atmel_port->clk); | ||
968 | |||
969 | device_init_wakeup(&pdev->dev, 0); | 1514 | device_init_wakeup(&pdev->dev, 0); |
970 | platform_set_drvdata(pdev, NULL); | 1515 | platform_set_drvdata(pdev, NULL); |
971 | 1516 | ||
972 | if (port) { | 1517 | ret = uart_remove_one_port(&atmel_uart, port); |
973 | ret = uart_remove_one_port(&atmel_uart, port); | 1518 | |
974 | kfree(port); | 1519 | tasklet_kill(&atmel_port->tasklet); |
975 | } | 1520 | kfree(atmel_port->rx_ring.buf); |
1521 | |||
1522 | /* "port" is allocated statically, so we shouldn't free it */ | ||
1523 | |||
1524 | clk_disable(atmel_port->clk); | ||
1525 | clk_put(atmel_port->clk); | ||
976 | 1526 | ||
977 | return ret; | 1527 | return ret; |
978 | } | 1528 | } |
diff --git a/drivers/serial/serial_core.c b/drivers/serial/serial_core.c index 276da148c57e..0f5a17987cca 100644 --- a/drivers/serial/serial_core.c +++ b/drivers/serial/serial_core.c | |||
@@ -58,7 +58,8 @@ static struct lock_class_key port_lock_key; | |||
58 | #define uart_console(port) (0) | 58 | #define uart_console(port) (0) |
59 | #endif | 59 | #endif |
60 | 60 | ||
61 | static void uart_change_speed(struct uart_state *state, struct ktermios *old_termios); | 61 | static void uart_change_speed(struct uart_state *state, |
62 | struct ktermios *old_termios); | ||
62 | static void uart_wait_until_sent(struct tty_struct *tty, int timeout); | 63 | static void uart_wait_until_sent(struct tty_struct *tty, int timeout); |
63 | static void uart_change_pm(struct uart_state *state, int pm_state); | 64 | static void uart_change_pm(struct uart_state *state, int pm_state); |
64 | 65 | ||
@@ -129,8 +130,8 @@ uart_update_mctrl(struct uart_port *port, unsigned int set, unsigned int clear) | |||
129 | spin_unlock_irqrestore(&port->lock, flags); | 130 | spin_unlock_irqrestore(&port->lock, flags); |
130 | } | 131 | } |
131 | 132 | ||
132 | #define uart_set_mctrl(port,set) uart_update_mctrl(port,set,0) | 133 | #define uart_set_mctrl(port, set) uart_update_mctrl(port, set, 0) |
133 | #define uart_clear_mctrl(port,clear) uart_update_mctrl(port,0,clear) | 134 | #define uart_clear_mctrl(port, clear) uart_update_mctrl(port, 0, clear) |
134 | 135 | ||
135 | /* | 136 | /* |
136 | * Startup the port. This will be called once per open. All calls | 137 | * Startup the port. This will be called once per open. All calls |
@@ -290,7 +291,7 @@ uart_update_timeout(struct uart_port *port, unsigned int cflag, | |||
290 | break; | 291 | break; |
291 | default: | 292 | default: |
292 | bits = 10; | 293 | bits = 10; |
293 | break; // CS8 | 294 | break; /* CS8 */ |
294 | } | 295 | } |
295 | 296 | ||
296 | if (cflag & CSTOPB) | 297 | if (cflag & CSTOPB) |
@@ -622,7 +623,7 @@ static int uart_get_info(struct uart_state *state, | |||
622 | tmp.close_delay = state->close_delay / 10; | 623 | tmp.close_delay = state->close_delay / 10; |
623 | tmp.closing_wait = state->closing_wait == USF_CLOSING_WAIT_NONE ? | 624 | tmp.closing_wait = state->closing_wait == USF_CLOSING_WAIT_NONE ? |
624 | ASYNC_CLOSING_WAIT_NONE : | 625 | ASYNC_CLOSING_WAIT_NONE : |
625 | state->closing_wait / 10; | 626 | state->closing_wait / 10; |
626 | tmp.custom_divisor = port->custom_divisor; | 627 | tmp.custom_divisor = port->custom_divisor; |
627 | tmp.hub6 = port->hub6; | 628 | tmp.hub6 = port->hub6; |
628 | tmp.io_type = port->iotype; | 629 | tmp.io_type = port->iotype; |
@@ -788,7 +789,8 @@ static int uart_set_info(struct uart_state *state, | |||
788 | * We failed anyway. | 789 | * We failed anyway. |
789 | */ | 790 | */ |
790 | retval = -EBUSY; | 791 | retval = -EBUSY; |
791 | goto exit; // Added to return the correct error -Ram Gupta | 792 | /* Added to return the correct error -Ram Gupta */ |
793 | goto exit; | ||
792 | } | 794 | } |
793 | } | 795 | } |
794 | 796 | ||
@@ -858,7 +860,7 @@ static int uart_get_lsr_info(struct uart_state *state, | |||
858 | ((uart_circ_chars_pending(&state->info->xmit) > 0) && | 860 | ((uart_circ_chars_pending(&state->info->xmit) > 0) && |
859 | !state->info->tty->stopped && !state->info->tty->hw_stopped)) | 861 | !state->info->tty->stopped && !state->info->tty->hw_stopped)) |
860 | result &= ~TIOCSER_TEMT; | 862 | result &= ~TIOCSER_TEMT; |
861 | 863 | ||
862 | return put_user(result, value); | 864 | return put_user(result, value); |
863 | } | 865 | } |
864 | 866 | ||
@@ -996,8 +998,8 @@ uart_wait_modem_status(struct uart_state *state, unsigned long arg) | |||
996 | ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || | 998 | ((arg & TIOCM_DSR) && (cnow.dsr != cprev.dsr)) || |
997 | ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || | 999 | ((arg & TIOCM_CD) && (cnow.dcd != cprev.dcd)) || |
998 | ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { | 1000 | ((arg & TIOCM_CTS) && (cnow.cts != cprev.cts))) { |
999 | ret = 0; | 1001 | ret = 0; |
1000 | break; | 1002 | break; |
1001 | } | 1003 | } |
1002 | 1004 | ||
1003 | schedule(); | 1005 | schedule(); |
@@ -1137,7 +1139,8 @@ uart_ioctl(struct tty_struct *tty, struct file *filp, unsigned int cmd, | |||
1137 | return ret; | 1139 | return ret; |
1138 | } | 1140 | } |
1139 | 1141 | ||
1140 | static void uart_set_termios(struct tty_struct *tty, struct ktermios *old_termios) | 1142 | static void uart_set_termios(struct tty_struct *tty, |
1143 | struct ktermios *old_termios) | ||
1141 | { | 1144 | { |
1142 | struct uart_state *state = tty->driver_data; | 1145 | struct uart_state *state = tty->driver_data; |
1143 | unsigned long flags; | 1146 | unsigned long flags; |
@@ -1213,7 +1216,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp) | |||
1213 | { | 1216 | { |
1214 | struct uart_state *state = tty->driver_data; | 1217 | struct uart_state *state = tty->driver_data; |
1215 | struct uart_port *port; | 1218 | struct uart_port *port; |
1216 | 1219 | ||
1217 | BUG_ON(!kernel_locked()); | 1220 | BUG_ON(!kernel_locked()); |
1218 | 1221 | ||
1219 | if (!state || !state->port) | 1222 | if (!state || !state->port) |
@@ -1278,8 +1281,8 @@ static void uart_close(struct tty_struct *tty, struct file *filp) | |||
1278 | uart_shutdown(state); | 1281 | uart_shutdown(state); |
1279 | uart_flush_buffer(tty); | 1282 | uart_flush_buffer(tty); |
1280 | 1283 | ||
1281 | tty_ldisc_flush(tty); | 1284 | tty_ldisc_flush(tty); |
1282 | 1285 | ||
1283 | tty->closing = 0; | 1286 | tty->closing = 0; |
1284 | state->info->tty = NULL; | 1287 | state->info->tty = NULL; |
1285 | 1288 | ||
@@ -1341,7 +1344,7 @@ static void uart_wait_until_sent(struct tty_struct *tty, int timeout) | |||
1341 | expire = jiffies + timeout; | 1344 | expire = jiffies + timeout; |
1342 | 1345 | ||
1343 | pr_debug("uart_wait_until_sent(%d), jiffies=%lu, expire=%lu...\n", | 1346 | pr_debug("uart_wait_until_sent(%d), jiffies=%lu, expire=%lu...\n", |
1344 | port->line, jiffies, expire); | 1347 | port->line, jiffies, expire); |
1345 | 1348 | ||
1346 | /* | 1349 | /* |
1347 | * Check whether the transmitter is empty every 'char_time'. | 1350 | * Check whether the transmitter is empty every 'char_time'. |
@@ -1460,10 +1463,9 @@ uart_block_til_ready(struct file *filp, struct uart_state *state) | |||
1460 | * have set TTY_IO_ERROR for a non-existant port. | 1463 | * have set TTY_IO_ERROR for a non-existant port. |
1461 | */ | 1464 | */ |
1462 | if ((filp->f_flags & O_NONBLOCK) || | 1465 | if ((filp->f_flags & O_NONBLOCK) || |
1463 | (info->tty->termios->c_cflag & CLOCAL) || | 1466 | (info->tty->termios->c_cflag & CLOCAL) || |
1464 | (info->tty->flags & (1 << TTY_IO_ERROR))) { | 1467 | (info->tty->flags & (1 << TTY_IO_ERROR))) |
1465 | break; | 1468 | break; |
1466 | } | ||
1467 | 1469 | ||
1468 | /* | 1470 | /* |
1469 | * Set DTR to allow modem to know we're waiting. Do | 1471 | * Set DTR to allow modem to know we're waiting. Do |
@@ -1551,8 +1553,8 @@ static struct uart_state *uart_get(struct uart_driver *drv, int line) | |||
1551 | } | 1553 | } |
1552 | 1554 | ||
1553 | /* | 1555 | /* |
1554 | * In 2.4.5, calls to uart_open are serialised by the BKL in | 1556 | * calls to uart_open are serialised by the BKL in |
1555 | * linux/fs/devices.c:chrdev_open() | 1557 | * fs/char_dev.c:chrdev_open() |
1556 | * Note that if this fails, then uart_close() _will_ be called. | 1558 | * Note that if this fails, then uart_close() _will_ be called. |
1557 | * | 1559 | * |
1558 | * In time, we want to scrap the "opening nonpresent ports" | 1560 | * In time, we want to scrap the "opening nonpresent ports" |
@@ -1674,7 +1676,7 @@ static int uart_line_info(char *buf, struct uart_driver *drv, int i) | |||
1674 | port->line, uart_type(port), | 1676 | port->line, uart_type(port), |
1675 | mmio ? "mmio:0x" : "port:", | 1677 | mmio ? "mmio:0x" : "port:", |
1676 | mmio ? (unsigned long long)port->mapbase | 1678 | mmio ? (unsigned long long)port->mapbase |
1677 | : (unsigned long long) port->iobase, | 1679 | : (unsigned long long) port->iobase, |
1678 | port->irq); | 1680 | port->irq); |
1679 | 1681 | ||
1680 | if (port->type == PORT_UNKNOWN) { | 1682 | if (port->type == PORT_UNKNOWN) { |
@@ -1682,8 +1684,7 @@ static int uart_line_info(char *buf, struct uart_driver *drv, int i) | |||
1682 | return ret + 1; | 1684 | return ret + 1; |
1683 | } | 1685 | } |
1684 | 1686 | ||
1685 | if(capable(CAP_SYS_ADMIN)) | 1687 | if (capable(CAP_SYS_ADMIN)) { |
1686 | { | ||
1687 | mutex_lock(&state->mutex); | 1688 | mutex_lock(&state->mutex); |
1688 | pm_state = state->pm_state; | 1689 | pm_state = state->pm_state; |
1689 | if (pm_state) | 1690 | if (pm_state) |
@@ -1709,12 +1710,12 @@ static int uart_line_info(char *buf, struct uart_driver *drv, int i) | |||
1709 | if (port->icount.overrun) | 1710 | if (port->icount.overrun) |
1710 | ret += sprintf(buf + ret, " oe:%d", | 1711 | ret += sprintf(buf + ret, " oe:%d", |
1711 | port->icount.overrun); | 1712 | port->icount.overrun); |
1712 | 1713 | ||
1713 | #define INFOBIT(bit,str) \ | 1714 | #define INFOBIT(bit, str) \ |
1714 | if (port->mctrl & (bit)) \ | 1715 | if (port->mctrl & (bit)) \ |
1715 | strncat(stat_buf, (str), sizeof(stat_buf) - \ | 1716 | strncat(stat_buf, (str), sizeof(stat_buf) - \ |
1716 | strlen(stat_buf) - 2) | 1717 | strlen(stat_buf) - 2) |
1717 | #define STATBIT(bit,str) \ | 1718 | #define STATBIT(bit, str) \ |
1718 | if (status & (bit)) \ | 1719 | if (status & (bit)) \ |
1719 | strncat(stat_buf, (str), sizeof(stat_buf) - \ | 1720 | strncat(stat_buf, (str), sizeof(stat_buf) - \ |
1720 | strlen(stat_buf) - 2) | 1721 | strlen(stat_buf) - 2) |
@@ -1730,7 +1731,7 @@ static int uart_line_info(char *buf, struct uart_driver *drv, int i) | |||
1730 | if (stat_buf[0]) | 1731 | if (stat_buf[0]) |
1731 | stat_buf[0] = ' '; | 1732 | stat_buf[0] = ' '; |
1732 | strcat(stat_buf, "\n"); | 1733 | strcat(stat_buf, "\n"); |
1733 | 1734 | ||
1734 | ret += sprintf(buf + ret, stat_buf); | 1735 | ret += sprintf(buf + ret, stat_buf); |
1735 | } else { | 1736 | } else { |
1736 | strcat(buf, "\n"); | 1737 | strcat(buf, "\n"); |
@@ -1992,11 +1993,11 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *port) | |||
1992 | /* | 1993 | /* |
1993 | * Wait for the transmitter to empty. | 1994 | * Wait for the transmitter to empty. |
1994 | */ | 1995 | */ |
1995 | for (tries = 3; !ops->tx_empty(port) && tries; tries--) { | 1996 | for (tries = 3; !ops->tx_empty(port) && tries; tries--) |
1996 | msleep(10); | 1997 | msleep(10); |
1997 | } | ||
1998 | if (!tries) | 1998 | if (!tries) |
1999 | printk(KERN_ERR "%s%s%s%d: Unable to drain transmitter\n", | 1999 | printk(KERN_ERR "%s%s%s%d: Unable to drain " |
2000 | "transmitter\n", | ||
2000 | port->dev ? port->dev->bus_id : "", | 2001 | port->dev ? port->dev->bus_id : "", |
2001 | port->dev ? ": " : "", | 2002 | port->dev ? ": " : "", |
2002 | drv->dev_name, port->line); | 2003 | drv->dev_name, port->line); |
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig index 9b3f61200000..69f19f224875 100644 --- a/drivers/thermal/Kconfig +++ b/drivers/thermal/Kconfig | |||
@@ -9,7 +9,7 @@ menuconfig THERMAL | |||
9 | Generic Thermal Sysfs driver offers a generic mechanism for | 9 | Generic Thermal Sysfs driver offers a generic mechanism for |
10 | thermal management. Usually it's made up of one or more thermal | 10 | thermal management. Usually it's made up of one or more thermal |
11 | zone and cooling device. | 11 | zone and cooling device. |
12 | each thermal zone contains its own temperature, trip points, | 12 | Each thermal zone contains its own temperature, trip points, |
13 | cooling devices. | 13 | cooling devices. |
14 | All platforms with ACPI thermal support can use this driver. | 14 | All platforms with ACPI thermal support can use this driver. |
15 | If you want this support, you should say Y here | 15 | If you want this support, you should say Y here. |
diff --git a/drivers/thermal/thermal.c b/drivers/thermal/thermal.c index 3273e348fd14..e782b3e7fcdb 100644 --- a/drivers/thermal/thermal.c +++ b/drivers/thermal/thermal.c | |||
@@ -267,7 +267,7 @@ thermal_cooling_device_cur_state_store(struct device *dev, | |||
267 | } | 267 | } |
268 | 268 | ||
269 | static struct device_attribute dev_attr_cdev_type = | 269 | static struct device_attribute dev_attr_cdev_type = |
270 | __ATTR(type, 0444, thermal_cooling_device_type_show, NULL); | 270 | __ATTR(type, 0444, thermal_cooling_device_type_show, NULL); |
271 | static DEVICE_ATTR(max_state, 0444, | 271 | static DEVICE_ATTR(max_state, 0444, |
272 | thermal_cooling_device_max_state_show, NULL); | 272 | thermal_cooling_device_max_state_show, NULL); |
273 | static DEVICE_ATTR(cur_state, 0644, | 273 | static DEVICE_ATTR(cur_state, 0644, |
@@ -276,7 +276,7 @@ static DEVICE_ATTR(cur_state, 0644, | |||
276 | 276 | ||
277 | static ssize_t | 277 | static ssize_t |
278 | thermal_cooling_device_trip_point_show(struct device *dev, | 278 | thermal_cooling_device_trip_point_show(struct device *dev, |
279 | struct device_attribute *attr, char *buf) | 279 | struct device_attribute *attr, char *buf) |
280 | { | 280 | { |
281 | struct thermal_cooling_device_instance *instance; | 281 | struct thermal_cooling_device_instance *instance; |
282 | 282 | ||
@@ -293,11 +293,12 @@ thermal_cooling_device_trip_point_show(struct device *dev, | |||
293 | 293 | ||
294 | /** | 294 | /** |
295 | * thermal_zone_bind_cooling_device - bind a cooling device to a thermal zone | 295 | * thermal_zone_bind_cooling_device - bind a cooling device to a thermal zone |
296 | * this function is usually called in the thermal zone device .bind callback. | ||
297 | * @tz: thermal zone device | 296 | * @tz: thermal zone device |
298 | * @trip: indicates which trip point the cooling devices is | 297 | * @trip: indicates which trip point the cooling devices is |
299 | * associated with in this thermal zone. | 298 | * associated with in this thermal zone. |
300 | * @cdev: thermal cooling device | 299 | * @cdev: thermal cooling device |
300 | * | ||
301 | * This function is usually called in the thermal zone device .bind callback. | ||
301 | */ | 302 | */ |
302 | int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, | 303 | int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, |
303 | int trip, | 304 | int trip, |
@@ -307,8 +308,7 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, | |||
307 | struct thermal_cooling_device_instance *pos; | 308 | struct thermal_cooling_device_instance *pos; |
308 | int result; | 309 | int result; |
309 | 310 | ||
310 | if (trip >= tz->trips || | 311 | if (trip >= tz->trips || (trip < 0 && trip != THERMAL_TRIPS_NONE)) |
311 | (trip < 0 && trip != THERMAL_TRIPS_NONE)) | ||
312 | return -EINVAL; | 312 | return -EINVAL; |
313 | 313 | ||
314 | if (!tz || !cdev) | 314 | if (!tz || !cdev) |
@@ -361,15 +361,17 @@ int thermal_zone_bind_cooling_device(struct thermal_zone_device *tz, | |||
361 | kfree(dev); | 361 | kfree(dev); |
362 | return result; | 362 | return result; |
363 | } | 363 | } |
364 | |||
364 | EXPORT_SYMBOL(thermal_zone_bind_cooling_device); | 365 | EXPORT_SYMBOL(thermal_zone_bind_cooling_device); |
365 | 366 | ||
366 | /** | 367 | /** |
367 | * thermal_zone_unbind_cooling_device - unbind a cooling device from a thermal zone | 368 | * thermal_zone_unbind_cooling_device - unbind a cooling device from a thermal zone |
368 | * this function is usually called in the thermal zone device .unbind callback. | ||
369 | * @tz: thermal zone device | 369 | * @tz: thermal zone device |
370 | * @trip: indicates which trip point the cooling devices is | 370 | * @trip: indicates which trip point the cooling devices is |
371 | * associated with in this thermal zone. | 371 | * associated with in this thermal zone. |
372 | * @cdev: thermal cooling device | 372 | * @cdev: thermal cooling device |
373 | * | ||
374 | * This function is usually called in the thermal zone device .unbind callback. | ||
373 | */ | 375 | */ |
374 | int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, | 376 | int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, |
375 | int trip, | 377 | int trip, |
@@ -379,8 +381,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, | |||
379 | 381 | ||
380 | mutex_lock(&tz->lock); | 382 | mutex_lock(&tz->lock); |
381 | list_for_each_entry_safe(pos, next, &tz->cooling_devices, node) { | 383 | list_for_each_entry_safe(pos, next, &tz->cooling_devices, node) { |
382 | if (pos->tz == tz && pos->trip == trip | 384 | if (pos->tz == tz && pos->trip == trip && pos->cdev == cdev) { |
383 | && pos->cdev == cdev) { | ||
384 | list_del(&pos->node); | 385 | list_del(&pos->node); |
385 | mutex_unlock(&tz->lock); | 386 | mutex_unlock(&tz->lock); |
386 | goto unbind; | 387 | goto unbind; |
@@ -397,6 +398,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz, | |||
397 | kfree(pos); | 398 | kfree(pos); |
398 | return 0; | 399 | return 0; |
399 | } | 400 | } |
401 | |||
400 | EXPORT_SYMBOL(thermal_zone_unbind_cooling_device); | 402 | EXPORT_SYMBOL(thermal_zone_unbind_cooling_device); |
401 | 403 | ||
402 | static void thermal_release(struct device *dev) | 404 | static void thermal_release(struct device *dev) |
@@ -425,7 +427,10 @@ static struct class thermal_class = { | |||
425 | * @ops: standard thermal cooling devices callbacks. | 427 | * @ops: standard thermal cooling devices callbacks. |
426 | */ | 428 | */ |
427 | struct thermal_cooling_device *thermal_cooling_device_register(char *type, | 429 | struct thermal_cooling_device *thermal_cooling_device_register(char *type, |
428 | void *devdata, struct thermal_cooling_device_ops *ops) | 430 | void *devdata, |
431 | struct | ||
432 | thermal_cooling_device_ops | ||
433 | *ops) | ||
429 | { | 434 | { |
430 | struct thermal_cooling_device *cdev; | 435 | struct thermal_cooling_device *cdev; |
431 | struct thermal_zone_device *pos; | 436 | struct thermal_zone_device *pos; |
@@ -435,7 +440,7 @@ struct thermal_cooling_device *thermal_cooling_device_register(char *type, | |||
435 | return NULL; | 440 | return NULL; |
436 | 441 | ||
437 | if (!ops || !ops->get_max_state || !ops->get_cur_state || | 442 | if (!ops || !ops->get_max_state || !ops->get_cur_state || |
438 | !ops->set_cur_state) | 443 | !ops->set_cur_state) |
439 | return NULL; | 444 | return NULL; |
440 | 445 | ||
441 | cdev = kzalloc(sizeof(struct thermal_cooling_device), GFP_KERNEL); | 446 | cdev = kzalloc(sizeof(struct thermal_cooling_device), GFP_KERNEL); |
@@ -462,8 +467,7 @@ struct thermal_cooling_device *thermal_cooling_device_register(char *type, | |||
462 | 467 | ||
463 | /* sys I/F */ | 468 | /* sys I/F */ |
464 | if (type) { | 469 | if (type) { |
465 | result = device_create_file(&cdev->device, | 470 | result = device_create_file(&cdev->device, &dev_attr_cdev_type); |
466 | &dev_attr_cdev_type); | ||
467 | if (result) | 471 | if (result) |
468 | goto unregister; | 472 | goto unregister; |
469 | } | 473 | } |
@@ -496,11 +500,11 @@ struct thermal_cooling_device *thermal_cooling_device_register(char *type, | |||
496 | device_unregister(&cdev->device); | 500 | device_unregister(&cdev->device); |
497 | return NULL; | 501 | return NULL; |
498 | } | 502 | } |
503 | |||
499 | EXPORT_SYMBOL(thermal_cooling_device_register); | 504 | EXPORT_SYMBOL(thermal_cooling_device_register); |
500 | 505 | ||
501 | /** | 506 | /** |
502 | * thermal_cooling_device_unregister - removes the registered thermal cooling device | 507 | * thermal_cooling_device_unregister - removes the registered thermal cooling device |
503 | * | ||
504 | * @cdev: the thermal cooling device to remove. | 508 | * @cdev: the thermal cooling device to remove. |
505 | * | 509 | * |
506 | * thermal_cooling_device_unregister() must be called when the device is no | 510 | * thermal_cooling_device_unregister() must be called when the device is no |
@@ -533,8 +537,7 @@ void thermal_cooling_device_unregister(struct | |||
533 | } | 537 | } |
534 | mutex_unlock(&thermal_list_lock); | 538 | mutex_unlock(&thermal_list_lock); |
535 | if (cdev->type[0]) | 539 | if (cdev->type[0]) |
536 | device_remove_file(&cdev->device, | 540 | device_remove_file(&cdev->device, &dev_attr_cdev_type); |
537 | &dev_attr_cdev_type); | ||
538 | device_remove_file(&cdev->device, &dev_attr_max_state); | 541 | device_remove_file(&cdev->device, &dev_attr_max_state); |
539 | device_remove_file(&cdev->device, &dev_attr_cur_state); | 542 | device_remove_file(&cdev->device, &dev_attr_cur_state); |
540 | 543 | ||
@@ -542,6 +545,7 @@ void thermal_cooling_device_unregister(struct | |||
542 | device_unregister(&cdev->device); | 545 | device_unregister(&cdev->device); |
543 | return; | 546 | return; |
544 | } | 547 | } |
548 | |||
545 | EXPORT_SYMBOL(thermal_cooling_device_unregister); | 549 | EXPORT_SYMBOL(thermal_cooling_device_unregister); |
546 | 550 | ||
547 | /** | 551 | /** |
@@ -555,8 +559,10 @@ EXPORT_SYMBOL(thermal_cooling_device_unregister); | |||
555 | * longer needed. | 559 | * longer needed. |
556 | */ | 560 | */ |
557 | struct thermal_zone_device *thermal_zone_device_register(char *type, | 561 | struct thermal_zone_device *thermal_zone_device_register(char *type, |
558 | int trips, void *devdata, | 562 | int trips, |
559 | struct thermal_zone_device_ops *ops) | 563 | void *devdata, struct |
564 | thermal_zone_device_ops | ||
565 | *ops) | ||
560 | { | 566 | { |
561 | struct thermal_zone_device *tz; | 567 | struct thermal_zone_device *tz; |
562 | struct thermal_cooling_device *pos; | 568 | struct thermal_cooling_device *pos; |
@@ -625,9 +631,9 @@ struct thermal_zone_device *thermal_zone_device_register(char *type, | |||
625 | list_add_tail(&tz->node, &thermal_tz_list); | 631 | list_add_tail(&tz->node, &thermal_tz_list); |
626 | if (ops->bind) | 632 | if (ops->bind) |
627 | list_for_each_entry(pos, &thermal_cdev_list, node) { | 633 | list_for_each_entry(pos, &thermal_cdev_list, node) { |
628 | result = ops->bind(tz, pos); | 634 | result = ops->bind(tz, pos); |
629 | if (result) | 635 | if (result) |
630 | break; | 636 | break; |
631 | } | 637 | } |
632 | mutex_unlock(&thermal_list_lock); | 638 | mutex_unlock(&thermal_list_lock); |
633 | 639 | ||
@@ -639,11 +645,11 @@ struct thermal_zone_device *thermal_zone_device_register(char *type, | |||
639 | device_unregister(&tz->device); | 645 | device_unregister(&tz->device); |
640 | return NULL; | 646 | return NULL; |
641 | } | 647 | } |
648 | |||
642 | EXPORT_SYMBOL(thermal_zone_device_register); | 649 | EXPORT_SYMBOL(thermal_zone_device_register); |
643 | 650 | ||
644 | /** | 651 | /** |
645 | * thermal_device_unregister - removes the registered thermal zone device | 652 | * thermal_device_unregister - removes the registered thermal zone device |
646 | * | ||
647 | * @tz: the thermal zone device to remove | 653 | * @tz: the thermal zone device to remove |
648 | */ | 654 | */ |
649 | void thermal_zone_device_unregister(struct thermal_zone_device *tz) | 655 | void thermal_zone_device_unregister(struct thermal_zone_device *tz) |
@@ -685,6 +691,7 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz) | |||
685 | device_unregister(&tz->device); | 691 | device_unregister(&tz->device); |
686 | return; | 692 | return; |
687 | } | 693 | } |
694 | |||
688 | EXPORT_SYMBOL(thermal_zone_device_unregister); | 695 | EXPORT_SYMBOL(thermal_zone_device_unregister); |
689 | 696 | ||
690 | static int __init thermal_init(void) | 697 | static int __init thermal_init(void) |
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c index 33469cf5aec3..e01862300169 100644 --- a/drivers/usb/gadget/net2280.c +++ b/drivers/usb/gadget/net2280.c | |||
@@ -1418,8 +1418,8 @@ show_function (struct device *_dev, struct device_attribute *attr, char *buf) | |||
1418 | } | 1418 | } |
1419 | static DEVICE_ATTR (function, S_IRUGO, show_function, NULL); | 1419 | static DEVICE_ATTR (function, S_IRUGO, show_function, NULL); |
1420 | 1420 | ||
1421 | static ssize_t | 1421 | static ssize_t net2280_show_registers(struct device *_dev, |
1422 | show_registers (struct device *_dev, struct device_attribute *attr, char *buf) | 1422 | struct device_attribute *attr, char *buf) |
1423 | { | 1423 | { |
1424 | struct net2280 *dev; | 1424 | struct net2280 *dev; |
1425 | char *next; | 1425 | char *next; |
@@ -1571,7 +1571,7 @@ show_registers (struct device *_dev, struct device_attribute *attr, char *buf) | |||
1571 | 1571 | ||
1572 | return PAGE_SIZE - size; | 1572 | return PAGE_SIZE - size; |
1573 | } | 1573 | } |
1574 | static DEVICE_ATTR (registers, S_IRUGO, show_registers, NULL); | 1574 | static DEVICE_ATTR(registers, S_IRUGO, net2280_show_registers, NULL); |
1575 | 1575 | ||
1576 | static ssize_t | 1576 | static ssize_t |
1577 | show_queues (struct device *_dev, struct device_attribute *attr, char *buf) | 1577 | show_queues (struct device *_dev, struct device_attribute *attr, char *buf) |
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 2b53d1f56281..06f87b04f207 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig | |||
@@ -6,7 +6,7 @@ menu "Console display driver support" | |||
6 | 6 | ||
7 | config VGA_CONSOLE | 7 | config VGA_CONSOLE |
8 | bool "VGA text console" if EMBEDDED || !X86 | 8 | bool "VGA text console" if EMBEDDED || !X86 |
9 | depends on !ARCH_ACORN && !ARCH_EBSA110 && !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !ARCH_VERSATILE && !SUPERH && !BLACKFIN && !AVR32 | 9 | depends on !ARCH_ACORN && !ARCH_EBSA110 && !4xx && !8xx && !SPARC && !M68K && !PARISC && !FRV && !ARCH_VERSATILE && !SUPERH && !BLACKFIN && !AVR32 && !MN10300 |
10 | default y | 10 | default y |
11 | help | 11 | help |
12 | Saying Y here will allow you to use Linux in text mode through a | 12 | Saying Y here will allow you to use Linux in text mode through a |